1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VMware vSockets Driver
4 *
5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 */
7
8 /* Implementation notes:
9 *
10 * - There are two kinds of sockets: those created by user action (such as
11 * calling socket(2)) and those created by incoming connection request packets.
12 *
13 * - There are two "global" tables, one for bound sockets (sockets that have
14 * specified an address that they are responsible for) and one for connected
15 * sockets (sockets that have established a connection with another socket).
16 * These tables are "global" in that all sockets on the system are placed
17 * within them. - Note, though, that the bound table contains an extra entry
18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19 * that list. The bound table is used solely for lookup of sockets when packets
20 * are received and that's not necessary for SOCK_DGRAM sockets since we create
21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
22 * sockets out of the bound hash buckets will reduce the chance of collisions
23 * when looking for SOCK_STREAM sockets and prevents us from having to check the
24 * socket type in the hash table lookups.
25 *
26 * - Sockets created by user action will either be "client" sockets that
27 * initiate a connection or "server" sockets that listen for connections; we do
28 * not support simultaneous connects (two "client" sockets connecting).
29 *
30 * - "Server" sockets are referred to as listener sockets throughout this
31 * implementation because they are in the TCP_LISTEN state. When a
32 * connection request is received (the second kind of socket mentioned above),
33 * we create a new socket and refer to it as a pending socket. These pending
34 * sockets are placed on the pending connection list of the listener socket.
35 * When future packets are received for the address the listener socket is
36 * bound to, we check if the source of the packet is from one that has an
37 * existing pending connection. If it does, we process the packet for the
38 * pending socket. When that socket reaches the connected state, it is removed
39 * from the listener socket's pending list and enqueued in the listener
40 * socket's accept queue. Callers of accept(2) will accept connected sockets
41 * from the listener socket's accept queue. If the socket cannot be accepted
42 * for some reason then it is marked rejected. Once the connection is
43 * accepted, it is owned by the user process and the responsibility for cleanup
44 * falls with that user process.
45 *
46 * - It is possible that these pending sockets will never reach the connected
47 * state; in fact, we may never receive another packet after the connection
48 * request. Because of this, we must schedule a cleanup function to run in the
49 * future, after some amount of time passes where a connection should have been
50 * established. This function ensures that the socket is off all lists so it
51 * cannot be retrieved, then drops all references to the socket so it is cleaned
52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
53 * function will also cleanup rejected sockets, those that reach the connected
54 * state but leave it before they have been accepted.
55 *
56 * - Lock ordering for pending or accept queue sockets is:
57 *
58 * lock_sock(listener);
59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60 *
61 * Using explicit nested locking keeps lockdep happy since normally only one
62 * lock of a given class may be taken at a time.
63 *
64 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our
67 * sk_destruct implementation is invoked. Our sk_destruct implementation will
68 * perform additional cleanup that's common for both types of sockets.
69 *
70 * - A socket's reference count is what ensures that the structure won't be
71 * freed. Each entry in a list (such as the "global" bound and connected tables
72 * and the listener socket's pending list and connected queue) ensures a
73 * reference. When we defer work until process context and pass a socket as our
74 * argument, we must ensure the reference count is increased to ensure the
75 * socket isn't freed before the function is run; the deferred function will
76 * then drop the reference.
77 *
78 * - sk->sk_state uses the TCP state constants because they are widely used by
79 * other address families and exposed to userspace tools like ss(8):
80 *
81 * TCP_CLOSE - unconnected
82 * TCP_SYN_SENT - connecting
83 * TCP_ESTABLISHED - connected
84 * TCP_CLOSING - disconnecting
85 * TCP_LISTEN - listening
86 */
87
88 #include <linux/compat.h>
89 #include <linux/types.h>
90 #include <linux/bitops.h>
91 #include <linux/cred.h>
92 #include <linux/errqueue.h>
93 #include <linux/init.h>
94 #include <linux/io.h>
95 #include <linux/kernel.h>
96 #include <linux/sched/signal.h>
97 #include <linux/kmod.h>
98 #include <linux/list.h>
99 #include <linux/miscdevice.h>
100 #include <linux/module.h>
101 #include <linux/mutex.h>
102 #include <linux/net.h>
103 #include <linux/poll.h>
104 #include <linux/random.h>
105 #include <linux/skbuff.h>
106 #include <linux/smp.h>
107 #include <linux/socket.h>
108 #include <linux/stddef.h>
109 #include <linux/unistd.h>
110 #include <linux/wait.h>
111 #include <linux/workqueue.h>
112 #include <net/sock.h>
113 #include <net/af_vsock.h>
114 #include <uapi/linux/vm_sockets.h>
115 #include <uapi/asm-generic/ioctls.h>
116
117 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
118 static void vsock_sk_destruct(struct sock *sk);
119 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
120 static void vsock_close(struct sock *sk, long timeout);
121
122 /* Protocol family. */
123 struct proto vsock_proto = {
124 .name = "AF_VSOCK",
125 .owner = THIS_MODULE,
126 .obj_size = sizeof(struct vsock_sock),
127 .close = vsock_close,
128 #ifdef CONFIG_BPF_SYSCALL
129 .psock_update_sk_prot = vsock_bpf_update_proto,
130 #endif
131 };
132
133 /* The default peer timeout indicates how long we will wait for a peer response
134 * to a control message.
135 */
136 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
137
138 #define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
139 #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
140 #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
141
142 /* Transport used for host->guest communication */
143 static const struct vsock_transport *transport_h2g;
144 /* Transport used for guest->host communication */
145 static const struct vsock_transport *transport_g2h;
146 /* Transport used for DGRAM communication */
147 static const struct vsock_transport *transport_dgram;
148 /* Transport used for local communication */
149 static const struct vsock_transport *transport_local;
150 static DEFINE_MUTEX(vsock_register_mutex);
151
152 /**** UTILS ****/
153
154 /* Each bound VSocket is stored in the bind hash table and each connected
155 * VSocket is stored in the connected hash table.
156 *
157 * Unbound sockets are all put on the same list attached to the end of the hash
158 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
159 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
160 * represents the list that addr hashes to).
161 *
162 * Specifically, we initialize the vsock_bind_table array to a size of
163 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
164 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
165 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
166 * mods with VSOCK_HASH_SIZE to ensure this.
167 */
168 #define MAX_PORT_RETRIES 24
169
170 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
171 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
172 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
173
174 /* XXX This can probably be implemented in a better way. */
175 #define VSOCK_CONN_HASH(src, dst) \
176 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
177 #define vsock_connected_sockets(src, dst) \
178 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
179 #define vsock_connected_sockets_vsk(vsk) \
180 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
181
182 struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
183 EXPORT_SYMBOL_GPL(vsock_bind_table);
184 struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
185 EXPORT_SYMBOL_GPL(vsock_connected_table);
186 DEFINE_SPINLOCK(vsock_table_lock);
187 EXPORT_SYMBOL_GPL(vsock_table_lock);
188
189 /* Autobind this socket to the local address if necessary. */
vsock_auto_bind(struct vsock_sock * vsk)190 static int vsock_auto_bind(struct vsock_sock *vsk)
191 {
192 struct sock *sk = sk_vsock(vsk);
193 struct sockaddr_vm local_addr;
194
195 if (vsock_addr_bound(&vsk->local_addr))
196 return 0;
197 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
198 return __vsock_bind(sk, &local_addr);
199 }
200
vsock_init_tables(void)201 static void vsock_init_tables(void)
202 {
203 int i;
204
205 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
206 INIT_LIST_HEAD(&vsock_bind_table[i]);
207
208 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
209 INIT_LIST_HEAD(&vsock_connected_table[i]);
210 }
211
__vsock_insert_bound(struct list_head * list,struct vsock_sock * vsk)212 static void __vsock_insert_bound(struct list_head *list,
213 struct vsock_sock *vsk)
214 {
215 sock_hold(&vsk->sk);
216 list_add(&vsk->bound_table, list);
217 }
218
__vsock_insert_connected(struct list_head * list,struct vsock_sock * vsk)219 static void __vsock_insert_connected(struct list_head *list,
220 struct vsock_sock *vsk)
221 {
222 sock_hold(&vsk->sk);
223 list_add(&vsk->connected_table, list);
224 }
225
__vsock_remove_bound(struct vsock_sock * vsk)226 static void __vsock_remove_bound(struct vsock_sock *vsk)
227 {
228 list_del_init(&vsk->bound_table);
229 sock_put(&vsk->sk);
230 }
231
__vsock_remove_connected(struct vsock_sock * vsk)232 static void __vsock_remove_connected(struct vsock_sock *vsk)
233 {
234 list_del_init(&vsk->connected_table);
235 sock_put(&vsk->sk);
236 }
237
__vsock_find_bound_socket(struct sockaddr_vm * addr)238 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
239 {
240 struct vsock_sock *vsk;
241
242 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
243 if (vsock_addr_equals_addr(addr, &vsk->local_addr))
244 return sk_vsock(vsk);
245
246 if (addr->svm_port == vsk->local_addr.svm_port &&
247 (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
248 addr->svm_cid == VMADDR_CID_ANY))
249 return sk_vsock(vsk);
250 }
251
252 return NULL;
253 }
254
__vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)255 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
256 struct sockaddr_vm *dst)
257 {
258 struct vsock_sock *vsk;
259
260 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
261 connected_table) {
262 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
263 dst->svm_port == vsk->local_addr.svm_port) {
264 return sk_vsock(vsk);
265 }
266 }
267
268 return NULL;
269 }
270
vsock_insert_unbound(struct vsock_sock * vsk)271 static void vsock_insert_unbound(struct vsock_sock *vsk)
272 {
273 spin_lock_bh(&vsock_table_lock);
274 __vsock_insert_bound(vsock_unbound_sockets, vsk);
275 spin_unlock_bh(&vsock_table_lock);
276 }
277
vsock_insert_connected(struct vsock_sock * vsk)278 void vsock_insert_connected(struct vsock_sock *vsk)
279 {
280 struct list_head *list = vsock_connected_sockets(
281 &vsk->remote_addr, &vsk->local_addr);
282
283 spin_lock_bh(&vsock_table_lock);
284 __vsock_insert_connected(list, vsk);
285 spin_unlock_bh(&vsock_table_lock);
286 }
287 EXPORT_SYMBOL_GPL(vsock_insert_connected);
288
vsock_remove_bound(struct vsock_sock * vsk)289 void vsock_remove_bound(struct vsock_sock *vsk)
290 {
291 spin_lock_bh(&vsock_table_lock);
292 if (__vsock_in_bound_table(vsk))
293 __vsock_remove_bound(vsk);
294 spin_unlock_bh(&vsock_table_lock);
295 }
296 EXPORT_SYMBOL_GPL(vsock_remove_bound);
297
vsock_remove_connected(struct vsock_sock * vsk)298 void vsock_remove_connected(struct vsock_sock *vsk)
299 {
300 spin_lock_bh(&vsock_table_lock);
301 if (__vsock_in_connected_table(vsk))
302 __vsock_remove_connected(vsk);
303 spin_unlock_bh(&vsock_table_lock);
304 }
305 EXPORT_SYMBOL_GPL(vsock_remove_connected);
306
vsock_find_bound_socket(struct sockaddr_vm * addr)307 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
308 {
309 struct sock *sk;
310
311 spin_lock_bh(&vsock_table_lock);
312 sk = __vsock_find_bound_socket(addr);
313 if (sk)
314 sock_hold(sk);
315
316 spin_unlock_bh(&vsock_table_lock);
317
318 return sk;
319 }
320 EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
321
vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)322 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
323 struct sockaddr_vm *dst)
324 {
325 struct sock *sk;
326
327 spin_lock_bh(&vsock_table_lock);
328 sk = __vsock_find_connected_socket(src, dst);
329 if (sk)
330 sock_hold(sk);
331
332 spin_unlock_bh(&vsock_table_lock);
333
334 return sk;
335 }
336 EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
337
vsock_remove_sock(struct vsock_sock * vsk)338 void vsock_remove_sock(struct vsock_sock *vsk)
339 {
340 /* Transport reassignment must not remove the binding. */
341 if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
342 vsock_remove_bound(vsk);
343
344 vsock_remove_connected(vsk);
345 }
346 EXPORT_SYMBOL_GPL(vsock_remove_sock);
347
vsock_for_each_connected_socket(struct vsock_transport * transport,void (* fn)(struct sock * sk))348 void vsock_for_each_connected_socket(struct vsock_transport *transport,
349 void (*fn)(struct sock *sk))
350 {
351 int i;
352
353 spin_lock_bh(&vsock_table_lock);
354
355 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
356 struct vsock_sock *vsk;
357 list_for_each_entry(vsk, &vsock_connected_table[i],
358 connected_table) {
359 if (vsk->transport != transport)
360 continue;
361
362 fn(sk_vsock(vsk));
363 }
364 }
365
366 spin_unlock_bh(&vsock_table_lock);
367 }
368 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
369
vsock_add_pending(struct sock * listener,struct sock * pending)370 void vsock_add_pending(struct sock *listener, struct sock *pending)
371 {
372 struct vsock_sock *vlistener;
373 struct vsock_sock *vpending;
374
375 vlistener = vsock_sk(listener);
376 vpending = vsock_sk(pending);
377
378 sock_hold(pending);
379 sock_hold(listener);
380 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
381 }
382 EXPORT_SYMBOL_GPL(vsock_add_pending);
383
vsock_remove_pending(struct sock * listener,struct sock * pending)384 void vsock_remove_pending(struct sock *listener, struct sock *pending)
385 {
386 struct vsock_sock *vpending = vsock_sk(pending);
387
388 list_del_init(&vpending->pending_links);
389 sock_put(listener);
390 sock_put(pending);
391 }
392 EXPORT_SYMBOL_GPL(vsock_remove_pending);
393
vsock_enqueue_accept(struct sock * listener,struct sock * connected)394 void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
395 {
396 struct vsock_sock *vlistener;
397 struct vsock_sock *vconnected;
398
399 vlistener = vsock_sk(listener);
400 vconnected = vsock_sk(connected);
401
402 sock_hold(connected);
403 sock_hold(listener);
404 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
405 }
406 EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
407
vsock_use_local_transport(unsigned int remote_cid)408 static bool vsock_use_local_transport(unsigned int remote_cid)
409 {
410 lockdep_assert_held(&vsock_register_mutex);
411
412 if (!transport_local)
413 return false;
414
415 if (remote_cid == VMADDR_CID_LOCAL)
416 return true;
417
418 if (transport_g2h) {
419 return remote_cid == transport_g2h->get_local_cid();
420 } else {
421 return remote_cid == VMADDR_CID_HOST;
422 }
423 }
424
vsock_deassign_transport(struct vsock_sock * vsk)425 static void vsock_deassign_transport(struct vsock_sock *vsk)
426 {
427 if (!vsk->transport)
428 return;
429
430 vsk->transport->destruct(vsk);
431 module_put(vsk->transport->module);
432 vsk->transport = NULL;
433 }
434
435 /* Assign a transport to a socket and call the .init transport callback.
436 *
437 * Note: for connection oriented socket this must be called when vsk->remote_addr
438 * is set (e.g. during the connect() or when a connection request on a listener
439 * socket is received).
440 * The vsk->remote_addr is used to decide which transport to use:
441 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
442 * g2h is not loaded, will use local transport;
443 * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
444 * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
445 * - remote CID > VMADDR_CID_HOST will use host->guest transport;
446 */
vsock_assign_transport(struct vsock_sock * vsk,struct vsock_sock * psk)447 int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
448 {
449 const struct vsock_transport *new_transport;
450 struct sock *sk = sk_vsock(vsk);
451 unsigned int remote_cid = vsk->remote_addr.svm_cid;
452 __u8 remote_flags;
453 int ret;
454
455 /* If the packet is coming with the source and destination CIDs higher
456 * than VMADDR_CID_HOST, then a vsock channel where all the packets are
457 * forwarded to the host should be established. Then the host will
458 * need to forward the packets to the guest.
459 *
460 * The flag is set on the (listen) receive path (psk is not NULL). On
461 * the connect path the flag can be set by the user space application.
462 */
463 if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
464 vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
465 vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
466
467 remote_flags = vsk->remote_addr.svm_flags;
468
469 mutex_lock(&vsock_register_mutex);
470
471 switch (sk->sk_type) {
472 case SOCK_DGRAM:
473 new_transport = transport_dgram;
474 break;
475 case SOCK_STREAM:
476 case SOCK_SEQPACKET:
477 if (vsock_use_local_transport(remote_cid))
478 new_transport = transport_local;
479 else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
480 (remote_flags & VMADDR_FLAG_TO_HOST))
481 new_transport = transport_g2h;
482 else
483 new_transport = transport_h2g;
484 break;
485 default:
486 ret = -ESOCKTNOSUPPORT;
487 goto err;
488 }
489
490 if (vsk->transport) {
491 if (vsk->transport == new_transport) {
492 ret = 0;
493 goto err;
494 }
495
496 /* transport->release() must be called with sock lock acquired.
497 * This path can only be taken during vsock_connect(), where we
498 * have already held the sock lock. In the other cases, this
499 * function is called on a new socket which is not assigned to
500 * any transport.
501 */
502 vsk->transport->release(vsk);
503 vsock_deassign_transport(vsk);
504
505 /* transport's release() and destruct() can touch some socket
506 * state, since we are reassigning the socket to a new transport
507 * during vsock_connect(), let's reset these fields to have a
508 * clean state.
509 */
510 sock_reset_flag(sk, SOCK_DONE);
511 sk->sk_state = TCP_CLOSE;
512 vsk->peer_shutdown = 0;
513 }
514
515 /* We increase the module refcnt to prevent the transport unloading
516 * while there are open sockets assigned to it.
517 */
518 if (!new_transport || !try_module_get(new_transport->module)) {
519 ret = -ENODEV;
520 goto err;
521 }
522
523 /* It's safe to release the mutex after a successful try_module_get().
524 * Whichever transport `new_transport` points at, it won't go away until
525 * the last module_put() below or in vsock_deassign_transport().
526 */
527 mutex_unlock(&vsock_register_mutex);
528
529 if (sk->sk_type == SOCK_SEQPACKET) {
530 if (!new_transport->seqpacket_allow ||
531 !new_transport->seqpacket_allow(remote_cid)) {
532 module_put(new_transport->module);
533 return -ESOCKTNOSUPPORT;
534 }
535 }
536
537 ret = new_transport->init(vsk, psk);
538 if (ret) {
539 module_put(new_transport->module);
540 return ret;
541 }
542
543 vsk->transport = new_transport;
544
545 return 0;
546 err:
547 mutex_unlock(&vsock_register_mutex);
548 return ret;
549 }
550 EXPORT_SYMBOL_GPL(vsock_assign_transport);
551
552 /*
553 * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks.
554 * Otherwise we may race with module removal. Do not use on `vsk->transport`.
555 */
vsock_registered_transport_cid(const struct vsock_transport ** transport)556 static u32 vsock_registered_transport_cid(const struct vsock_transport **transport)
557 {
558 u32 cid = VMADDR_CID_ANY;
559
560 mutex_lock(&vsock_register_mutex);
561 if (*transport)
562 cid = (*transport)->get_local_cid();
563 mutex_unlock(&vsock_register_mutex);
564
565 return cid;
566 }
567
vsock_find_cid(unsigned int cid)568 bool vsock_find_cid(unsigned int cid)
569 {
570 if (cid == vsock_registered_transport_cid(&transport_g2h))
571 return true;
572
573 if (transport_h2g && cid == VMADDR_CID_HOST)
574 return true;
575
576 if (transport_local && cid == VMADDR_CID_LOCAL)
577 return true;
578
579 return false;
580 }
581 EXPORT_SYMBOL_GPL(vsock_find_cid);
582
vsock_dequeue_accept(struct sock * listener)583 static struct sock *vsock_dequeue_accept(struct sock *listener)
584 {
585 struct vsock_sock *vlistener;
586 struct vsock_sock *vconnected;
587
588 vlistener = vsock_sk(listener);
589
590 if (list_empty(&vlistener->accept_queue))
591 return NULL;
592
593 vconnected = list_entry(vlistener->accept_queue.next,
594 struct vsock_sock, accept_queue);
595
596 list_del_init(&vconnected->accept_queue);
597 sock_put(listener);
598 /* The caller will need a reference on the connected socket so we let
599 * it call sock_put().
600 */
601
602 return sk_vsock(vconnected);
603 }
604
vsock_is_accept_queue_empty(struct sock * sk)605 static bool vsock_is_accept_queue_empty(struct sock *sk)
606 {
607 struct vsock_sock *vsk = vsock_sk(sk);
608 return list_empty(&vsk->accept_queue);
609 }
610
vsock_is_pending(struct sock * sk)611 static bool vsock_is_pending(struct sock *sk)
612 {
613 struct vsock_sock *vsk = vsock_sk(sk);
614 return !list_empty(&vsk->pending_links);
615 }
616
vsock_send_shutdown(struct sock * sk,int mode)617 static int vsock_send_shutdown(struct sock *sk, int mode)
618 {
619 struct vsock_sock *vsk = vsock_sk(sk);
620
621 if (!vsk->transport)
622 return -ENODEV;
623
624 return vsk->transport->shutdown(vsk, mode);
625 }
626
vsock_pending_work(struct work_struct * work)627 static void vsock_pending_work(struct work_struct *work)
628 {
629 struct sock *sk;
630 struct sock *listener;
631 struct vsock_sock *vsk;
632 bool cleanup;
633
634 vsk = container_of(work, struct vsock_sock, pending_work.work);
635 sk = sk_vsock(vsk);
636 listener = vsk->listener;
637 cleanup = true;
638
639 lock_sock(listener);
640 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
641
642 if (vsock_is_pending(sk)) {
643 vsock_remove_pending(listener, sk);
644
645 sk_acceptq_removed(listener);
646 } else if (!vsk->rejected) {
647 /* We are not on the pending list and accept() did not reject
648 * us, so we must have been accepted by our user process. We
649 * just need to drop our references to the sockets and be on
650 * our way.
651 */
652 cleanup = false;
653 goto out;
654 }
655
656 /* We need to remove ourself from the global connected sockets list so
657 * incoming packets can't find this socket, and to reduce the reference
658 * count.
659 */
660 vsock_remove_connected(vsk);
661
662 sk->sk_state = TCP_CLOSE;
663
664 out:
665 release_sock(sk);
666 release_sock(listener);
667 if (cleanup)
668 sock_put(sk);
669
670 sock_put(sk);
671 sock_put(listener);
672 }
673
674 /**** SOCKET OPERATIONS ****/
675
__vsock_bind_connectible(struct vsock_sock * vsk,struct sockaddr_vm * addr)676 static int __vsock_bind_connectible(struct vsock_sock *vsk,
677 struct sockaddr_vm *addr)
678 {
679 static u32 port;
680 struct sockaddr_vm new_addr;
681
682 if (!port)
683 port = get_random_u32_above(LAST_RESERVED_PORT);
684
685 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
686
687 if (addr->svm_port == VMADDR_PORT_ANY) {
688 bool found = false;
689 unsigned int i;
690
691 for (i = 0; i < MAX_PORT_RETRIES; i++) {
692 if (port <= LAST_RESERVED_PORT)
693 port = LAST_RESERVED_PORT + 1;
694
695 new_addr.svm_port = port++;
696
697 if (!__vsock_find_bound_socket(&new_addr)) {
698 found = true;
699 break;
700 }
701 }
702
703 if (!found)
704 return -EADDRNOTAVAIL;
705 } else {
706 /* If port is in reserved range, ensure caller
707 * has necessary privileges.
708 */
709 if (addr->svm_port <= LAST_RESERVED_PORT &&
710 !capable(CAP_NET_BIND_SERVICE)) {
711 return -EACCES;
712 }
713
714 if (__vsock_find_bound_socket(&new_addr))
715 return -EADDRINUSE;
716 }
717
718 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
719
720 /* Remove connection oriented sockets from the unbound list and add them
721 * to the hash table for easy lookup by its address. The unbound list
722 * is simply an extra entry at the end of the hash table, a trick used
723 * by AF_UNIX.
724 */
725 __vsock_remove_bound(vsk);
726 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
727
728 return 0;
729 }
730
__vsock_bind_dgram(struct vsock_sock * vsk,struct sockaddr_vm * addr)731 static int __vsock_bind_dgram(struct vsock_sock *vsk,
732 struct sockaddr_vm *addr)
733 {
734 return vsk->transport->dgram_bind(vsk, addr);
735 }
736
__vsock_bind(struct sock * sk,struct sockaddr_vm * addr)737 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
738 {
739 struct vsock_sock *vsk = vsock_sk(sk);
740 int retval;
741
742 /* First ensure this socket isn't already bound. */
743 if (vsock_addr_bound(&vsk->local_addr))
744 return -EINVAL;
745
746 /* Now bind to the provided address or select appropriate values if
747 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
748 * like AF_INET prevents binding to a non-local IP address (in most
749 * cases), we only allow binding to a local CID.
750 */
751 if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
752 return -EADDRNOTAVAIL;
753
754 switch (sk->sk_socket->type) {
755 case SOCK_STREAM:
756 case SOCK_SEQPACKET:
757 spin_lock_bh(&vsock_table_lock);
758 retval = __vsock_bind_connectible(vsk, addr);
759 spin_unlock_bh(&vsock_table_lock);
760 break;
761
762 case SOCK_DGRAM:
763 retval = __vsock_bind_dgram(vsk, addr);
764 break;
765
766 default:
767 retval = -EINVAL;
768 break;
769 }
770
771 return retval;
772 }
773
774 static void vsock_connect_timeout(struct work_struct *work);
775
__vsock_create(struct net * net,struct socket * sock,struct sock * parent,gfp_t priority,unsigned short type,int kern)776 static struct sock *__vsock_create(struct net *net,
777 struct socket *sock,
778 struct sock *parent,
779 gfp_t priority,
780 unsigned short type,
781 int kern)
782 {
783 struct sock *sk;
784 struct vsock_sock *psk;
785 struct vsock_sock *vsk;
786
787 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
788 if (!sk)
789 return NULL;
790
791 sock_init_data(sock, sk);
792
793 /* sk->sk_type is normally set in sock_init_data, but only if sock is
794 * non-NULL. We make sure that our sockets always have a type by
795 * setting it here if needed.
796 */
797 if (!sock)
798 sk->sk_type = type;
799
800 vsk = vsock_sk(sk);
801 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
802 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
803
804 sk->sk_destruct = vsock_sk_destruct;
805 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
806 sock_reset_flag(sk, SOCK_DONE);
807
808 INIT_LIST_HEAD(&vsk->bound_table);
809 INIT_LIST_HEAD(&vsk->connected_table);
810 vsk->listener = NULL;
811 INIT_LIST_HEAD(&vsk->pending_links);
812 INIT_LIST_HEAD(&vsk->accept_queue);
813 vsk->rejected = false;
814 vsk->sent_request = false;
815 vsk->ignore_connecting_rst = false;
816 vsk->peer_shutdown = 0;
817 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
818 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
819
820 psk = parent ? vsock_sk(parent) : NULL;
821 if (parent) {
822 vsk->trusted = psk->trusted;
823 vsk->owner = get_cred(psk->owner);
824 vsk->connect_timeout = psk->connect_timeout;
825 vsk->buffer_size = psk->buffer_size;
826 vsk->buffer_min_size = psk->buffer_min_size;
827 vsk->buffer_max_size = psk->buffer_max_size;
828 security_sk_clone(parent, sk);
829 } else {
830 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
831 vsk->owner = get_current_cred();
832 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
833 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
834 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
835 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
836 }
837
838 return sk;
839 }
840
sock_type_connectible(u16 type)841 static bool sock_type_connectible(u16 type)
842 {
843 return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
844 }
845
__vsock_release(struct sock * sk,int level)846 static void __vsock_release(struct sock *sk, int level)
847 {
848 struct vsock_sock *vsk;
849 struct sock *pending;
850
851 vsk = vsock_sk(sk);
852 pending = NULL; /* Compiler warning. */
853
854 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
855 * version to avoid the warning "possible recursive locking
856 * detected". When "level" is 0, lock_sock_nested(sk, level)
857 * is the same as lock_sock(sk).
858 */
859 lock_sock_nested(sk, level);
860
861 /* Indicate to vsock_remove_sock() that the socket is being released and
862 * can be removed from the bound_table. Unlike transport reassignment
863 * case, where the socket must remain bound despite vsock_remove_sock()
864 * being called from the transport release() callback.
865 */
866 sock_set_flag(sk, SOCK_DEAD);
867
868 if (vsk->transport)
869 vsk->transport->release(vsk);
870 else if (sock_type_connectible(sk->sk_type))
871 vsock_remove_sock(vsk);
872
873 sock_orphan(sk);
874 sk->sk_shutdown = SHUTDOWN_MASK;
875
876 skb_queue_purge(&sk->sk_receive_queue);
877
878 /* Clean up any sockets that never were accepted. */
879 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
880 __vsock_release(pending, SINGLE_DEPTH_NESTING);
881 sock_put(pending);
882 }
883
884 release_sock(sk);
885 sock_put(sk);
886 }
887
vsock_sk_destruct(struct sock * sk)888 static void vsock_sk_destruct(struct sock *sk)
889 {
890 struct vsock_sock *vsk = vsock_sk(sk);
891
892 /* Flush MSG_ZEROCOPY leftovers. */
893 __skb_queue_purge(&sk->sk_error_queue);
894
895 vsock_deassign_transport(vsk);
896
897 /* When clearing these addresses, there's no need to set the family and
898 * possibly register the address family with the kernel.
899 */
900 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
901 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
902
903 put_cred(vsk->owner);
904 }
905
vsock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)906 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
907 {
908 int err;
909
910 err = sock_queue_rcv_skb(sk, skb);
911 if (err)
912 kfree_skb(skb);
913
914 return err;
915 }
916
vsock_create_connected(struct sock * parent)917 struct sock *vsock_create_connected(struct sock *parent)
918 {
919 return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
920 parent->sk_type, 0);
921 }
922 EXPORT_SYMBOL_GPL(vsock_create_connected);
923
vsock_stream_has_data(struct vsock_sock * vsk)924 s64 vsock_stream_has_data(struct vsock_sock *vsk)
925 {
926 if (WARN_ON(!vsk->transport))
927 return 0;
928
929 return vsk->transport->stream_has_data(vsk);
930 }
931 EXPORT_SYMBOL_GPL(vsock_stream_has_data);
932
vsock_connectible_has_data(struct vsock_sock * vsk)933 s64 vsock_connectible_has_data(struct vsock_sock *vsk)
934 {
935 struct sock *sk = sk_vsock(vsk);
936
937 if (WARN_ON(!vsk->transport))
938 return 0;
939
940 if (sk->sk_type == SOCK_SEQPACKET)
941 return vsk->transport->seqpacket_has_data(vsk);
942 else
943 return vsock_stream_has_data(vsk);
944 }
945 EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
946
vsock_stream_has_space(struct vsock_sock * vsk)947 s64 vsock_stream_has_space(struct vsock_sock *vsk)
948 {
949 if (WARN_ON(!vsk->transport))
950 return 0;
951
952 return vsk->transport->stream_has_space(vsk);
953 }
954 EXPORT_SYMBOL_GPL(vsock_stream_has_space);
955
vsock_data_ready(struct sock * sk)956 void vsock_data_ready(struct sock *sk)
957 {
958 struct vsock_sock *vsk = vsock_sk(sk);
959
960 if (vsock_stream_has_data(vsk) >= sk->sk_rcvlowat ||
961 sock_flag(sk, SOCK_DONE))
962 sk->sk_data_ready(sk);
963 }
964 EXPORT_SYMBOL_GPL(vsock_data_ready);
965
966 /* Dummy callback required by sockmap.
967 * See unconditional call of saved_close() in sock_map_close().
968 */
vsock_close(struct sock * sk,long timeout)969 static void vsock_close(struct sock *sk, long timeout)
970 {
971 }
972
vsock_release(struct socket * sock)973 static int vsock_release(struct socket *sock)
974 {
975 struct sock *sk = sock->sk;
976
977 if (!sk)
978 return 0;
979
980 sk->sk_prot->close(sk, 0);
981 __vsock_release(sk, 0);
982 sock->sk = NULL;
983 sock->state = SS_FREE;
984
985 return 0;
986 }
987
988 static int
vsock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)989 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
990 {
991 int err;
992 struct sock *sk;
993 struct sockaddr_vm *vm_addr;
994
995 sk = sock->sk;
996
997 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
998 return -EINVAL;
999
1000 lock_sock(sk);
1001 err = __vsock_bind(sk, vm_addr);
1002 release_sock(sk);
1003
1004 return err;
1005 }
1006
vsock_getname(struct socket * sock,struct sockaddr * addr,int peer)1007 static int vsock_getname(struct socket *sock,
1008 struct sockaddr *addr, int peer)
1009 {
1010 int err;
1011 struct sock *sk;
1012 struct vsock_sock *vsk;
1013 struct sockaddr_vm *vm_addr;
1014
1015 sk = sock->sk;
1016 vsk = vsock_sk(sk);
1017 err = 0;
1018
1019 lock_sock(sk);
1020
1021 if (peer) {
1022 if (sock->state != SS_CONNECTED) {
1023 err = -ENOTCONN;
1024 goto out;
1025 }
1026 vm_addr = &vsk->remote_addr;
1027 } else {
1028 vm_addr = &vsk->local_addr;
1029 }
1030
1031 /* sys_getsockname() and sys_getpeername() pass us a
1032 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
1033 * that macro is defined in socket.c instead of .h, so we hardcode its
1034 * value here.
1035 */
1036 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
1037 memcpy(addr, vm_addr, sizeof(*vm_addr));
1038 err = sizeof(*vm_addr);
1039
1040 out:
1041 release_sock(sk);
1042 return err;
1043 }
1044
vsock_linger(struct sock * sk)1045 void vsock_linger(struct sock *sk)
1046 {
1047 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1048 ssize_t (*unsent)(struct vsock_sock *vsk);
1049 struct vsock_sock *vsk = vsock_sk(sk);
1050 long timeout;
1051
1052 if (!sock_flag(sk, SOCK_LINGER))
1053 return;
1054
1055 timeout = sk->sk_lingertime;
1056 if (!timeout)
1057 return;
1058
1059 /* Transports must implement `unsent_bytes` if they want to support
1060 * SOCK_LINGER through `vsock_linger()` since we use it to check when
1061 * the socket can be closed.
1062 */
1063 unsent = vsk->transport->unsent_bytes;
1064 if (!unsent)
1065 return;
1066
1067 add_wait_queue(sk_sleep(sk), &wait);
1068
1069 do {
1070 if (sk_wait_event(sk, &timeout, unsent(vsk) == 0, &wait))
1071 break;
1072 } while (!signal_pending(current) && timeout);
1073
1074 remove_wait_queue(sk_sleep(sk), &wait);
1075 }
1076 EXPORT_SYMBOL_GPL(vsock_linger);
1077
vsock_shutdown(struct socket * sock,int mode)1078 static int vsock_shutdown(struct socket *sock, int mode)
1079 {
1080 int err;
1081 struct sock *sk;
1082
1083 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
1084 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
1085 * here like the other address families do. Note also that the
1086 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
1087 * which is what we want.
1088 */
1089 mode++;
1090
1091 if ((mode & ~SHUTDOWN_MASK) || !mode)
1092 return -EINVAL;
1093
1094 /* If this is a connection oriented socket and it is not connected then
1095 * bail out immediately. If it is a DGRAM socket then we must first
1096 * kick the socket so that it wakes up from any sleeping calls, for
1097 * example recv(), and then afterwards return the error.
1098 */
1099
1100 sk = sock->sk;
1101
1102 lock_sock(sk);
1103 if (sock->state == SS_UNCONNECTED) {
1104 err = -ENOTCONN;
1105 if (sock_type_connectible(sk->sk_type))
1106 goto out;
1107 } else {
1108 sock->state = SS_DISCONNECTING;
1109 err = 0;
1110 }
1111
1112 /* Receive and send shutdowns are treated alike. */
1113 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
1114 if (mode) {
1115 sk->sk_shutdown |= mode;
1116 sk->sk_state_change(sk);
1117
1118 if (sock_type_connectible(sk->sk_type)) {
1119 sock_reset_flag(sk, SOCK_DONE);
1120 vsock_send_shutdown(sk, mode);
1121 }
1122 }
1123
1124 out:
1125 release_sock(sk);
1126 return err;
1127 }
1128
vsock_poll(struct file * file,struct socket * sock,poll_table * wait)1129 static __poll_t vsock_poll(struct file *file, struct socket *sock,
1130 poll_table *wait)
1131 {
1132 struct sock *sk;
1133 __poll_t mask;
1134 struct vsock_sock *vsk;
1135
1136 sk = sock->sk;
1137 vsk = vsock_sk(sk);
1138
1139 poll_wait(file, sk_sleep(sk), wait);
1140 mask = 0;
1141
1142 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
1143 /* Signify that there has been an error on this socket. */
1144 mask |= EPOLLERR;
1145
1146 /* INET sockets treat local write shutdown and peer write shutdown as a
1147 * case of EPOLLHUP set.
1148 */
1149 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1150 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
1151 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
1152 mask |= EPOLLHUP;
1153 }
1154
1155 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1156 vsk->peer_shutdown & SEND_SHUTDOWN) {
1157 mask |= EPOLLRDHUP;
1158 }
1159
1160 if (sk_is_readable(sk))
1161 mask |= EPOLLIN | EPOLLRDNORM;
1162
1163 if (sock->type == SOCK_DGRAM) {
1164 /* For datagram sockets we can read if there is something in
1165 * the queue and write as long as the socket isn't shutdown for
1166 * sending.
1167 */
1168 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
1169 (sk->sk_shutdown & RCV_SHUTDOWN)) {
1170 mask |= EPOLLIN | EPOLLRDNORM;
1171 }
1172
1173 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1174 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1175
1176 } else if (sock_type_connectible(sk->sk_type)) {
1177 const struct vsock_transport *transport;
1178
1179 lock_sock(sk);
1180
1181 transport = vsk->transport;
1182
1183 /* Listening sockets that have connections in their accept
1184 * queue can be read.
1185 */
1186 if (sk->sk_state == TCP_LISTEN
1187 && !vsock_is_accept_queue_empty(sk))
1188 mask |= EPOLLIN | EPOLLRDNORM;
1189
1190 /* If there is something in the queue then we can read. */
1191 if (transport && transport->stream_is_active(vsk) &&
1192 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1193 bool data_ready_now = false;
1194 int target = sock_rcvlowat(sk, 0, INT_MAX);
1195 int ret = transport->notify_poll_in(
1196 vsk, target, &data_ready_now);
1197 if (ret < 0) {
1198 mask |= EPOLLERR;
1199 } else {
1200 if (data_ready_now)
1201 mask |= EPOLLIN | EPOLLRDNORM;
1202
1203 }
1204 }
1205
1206 /* Sockets whose connections have been closed, reset, or
1207 * terminated should also be considered read, and we check the
1208 * shutdown flag for that.
1209 */
1210 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1211 vsk->peer_shutdown & SEND_SHUTDOWN) {
1212 mask |= EPOLLIN | EPOLLRDNORM;
1213 }
1214
1215 /* Connected sockets that can produce data can be written. */
1216 if (transport && sk->sk_state == TCP_ESTABLISHED) {
1217 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1218 bool space_avail_now = false;
1219 int ret = transport->notify_poll_out(
1220 vsk, 1, &space_avail_now);
1221 if (ret < 0) {
1222 mask |= EPOLLERR;
1223 } else {
1224 if (space_avail_now)
1225 /* Remove EPOLLWRBAND since INET
1226 * sockets are not setting it.
1227 */
1228 mask |= EPOLLOUT | EPOLLWRNORM;
1229
1230 }
1231 }
1232 }
1233
1234 /* Simulate INET socket poll behaviors, which sets
1235 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
1236 * but local send is not shutdown.
1237 */
1238 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
1239 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1240 mask |= EPOLLOUT | EPOLLWRNORM;
1241
1242 }
1243
1244 release_sock(sk);
1245 }
1246
1247 return mask;
1248 }
1249
vsock_read_skb(struct sock * sk,skb_read_actor_t read_actor)1250 static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor)
1251 {
1252 struct vsock_sock *vsk = vsock_sk(sk);
1253
1254 if (WARN_ON_ONCE(!vsk->transport))
1255 return -ENODEV;
1256
1257 return vsk->transport->read_skb(vsk, read_actor);
1258 }
1259
vsock_dgram_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1260 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1261 size_t len)
1262 {
1263 int err;
1264 struct sock *sk;
1265 struct vsock_sock *vsk;
1266 struct sockaddr_vm *remote_addr;
1267 const struct vsock_transport *transport;
1268
1269 if (msg->msg_flags & MSG_OOB)
1270 return -EOPNOTSUPP;
1271
1272 /* For now, MSG_DONTWAIT is always assumed... */
1273 err = 0;
1274 sk = sock->sk;
1275 vsk = vsock_sk(sk);
1276
1277 lock_sock(sk);
1278
1279 transport = vsk->transport;
1280
1281 err = vsock_auto_bind(vsk);
1282 if (err)
1283 goto out;
1284
1285
1286 /* If the provided message contains an address, use that. Otherwise
1287 * fall back on the socket's remote handle (if it has been connected).
1288 */
1289 if (msg->msg_name &&
1290 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1291 &remote_addr) == 0) {
1292 /* Ensure this address is of the right type and is a valid
1293 * destination.
1294 */
1295
1296 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1297 remote_addr->svm_cid = transport->get_local_cid();
1298
1299 if (!vsock_addr_bound(remote_addr)) {
1300 err = -EINVAL;
1301 goto out;
1302 }
1303 } else if (sock->state == SS_CONNECTED) {
1304 remote_addr = &vsk->remote_addr;
1305
1306 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1307 remote_addr->svm_cid = transport->get_local_cid();
1308
1309 /* XXX Should connect() or this function ensure remote_addr is
1310 * bound?
1311 */
1312 if (!vsock_addr_bound(&vsk->remote_addr)) {
1313 err = -EINVAL;
1314 goto out;
1315 }
1316 } else {
1317 err = -EINVAL;
1318 goto out;
1319 }
1320
1321 if (!transport->dgram_allow(remote_addr->svm_cid,
1322 remote_addr->svm_port)) {
1323 err = -EINVAL;
1324 goto out;
1325 }
1326
1327 err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
1328
1329 out:
1330 release_sock(sk);
1331 return err;
1332 }
1333
vsock_dgram_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1334 static int vsock_dgram_connect(struct socket *sock,
1335 struct sockaddr *addr, int addr_len, int flags)
1336 {
1337 int err;
1338 struct sock *sk;
1339 struct vsock_sock *vsk;
1340 struct sockaddr_vm *remote_addr;
1341
1342 sk = sock->sk;
1343 vsk = vsock_sk(sk);
1344
1345 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1346 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1347 lock_sock(sk);
1348 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1349 VMADDR_PORT_ANY);
1350 sock->state = SS_UNCONNECTED;
1351 release_sock(sk);
1352 return 0;
1353 } else if (err != 0)
1354 return -EINVAL;
1355
1356 lock_sock(sk);
1357
1358 err = vsock_auto_bind(vsk);
1359 if (err)
1360 goto out;
1361
1362 if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1363 remote_addr->svm_port)) {
1364 err = -EINVAL;
1365 goto out;
1366 }
1367
1368 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1369 sock->state = SS_CONNECTED;
1370
1371 /* sock map disallows redirection of non-TCP sockets with sk_state !=
1372 * TCP_ESTABLISHED (see sock_map_redirect_allowed()), so we set
1373 * TCP_ESTABLISHED here to allow redirection of connected vsock dgrams.
1374 *
1375 * This doesn't seem to be abnormal state for datagram sockets, as the
1376 * same approach can be see in other datagram socket types as well
1377 * (such as unix sockets).
1378 */
1379 sk->sk_state = TCP_ESTABLISHED;
1380
1381 out:
1382 release_sock(sk);
1383 return err;
1384 }
1385
__vsock_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1386 int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1387 size_t len, int flags)
1388 {
1389 struct sock *sk = sock->sk;
1390 struct vsock_sock *vsk = vsock_sk(sk);
1391
1392 return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
1393 }
1394
vsock_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1395 int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1396 size_t len, int flags)
1397 {
1398 #ifdef CONFIG_BPF_SYSCALL
1399 struct sock *sk = sock->sk;
1400 const struct proto *prot;
1401
1402 prot = READ_ONCE(sk->sk_prot);
1403 if (prot != &vsock_proto)
1404 return prot->recvmsg(sk, msg, len, flags, NULL);
1405 #endif
1406
1407 return __vsock_dgram_recvmsg(sock, msg, len, flags);
1408 }
1409 EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
1410
vsock_do_ioctl(struct socket * sock,unsigned int cmd,int __user * arg)1411 static int vsock_do_ioctl(struct socket *sock, unsigned int cmd,
1412 int __user *arg)
1413 {
1414 struct sock *sk = sock->sk;
1415 struct vsock_sock *vsk;
1416 int ret;
1417
1418 vsk = vsock_sk(sk);
1419
1420 switch (cmd) {
1421 case SIOCINQ: {
1422 ssize_t n_bytes;
1423
1424 if (!vsk->transport) {
1425 ret = -EOPNOTSUPP;
1426 break;
1427 }
1428
1429 if (sock_type_connectible(sk->sk_type) &&
1430 sk->sk_state == TCP_LISTEN) {
1431 ret = -EINVAL;
1432 break;
1433 }
1434
1435 n_bytes = vsock_stream_has_data(vsk);
1436 if (n_bytes < 0) {
1437 ret = n_bytes;
1438 break;
1439 }
1440 ret = put_user(n_bytes, arg);
1441 break;
1442 }
1443 case SIOCOUTQ: {
1444 ssize_t n_bytes;
1445
1446 if (!vsk->transport || !vsk->transport->unsent_bytes) {
1447 ret = -EOPNOTSUPP;
1448 break;
1449 }
1450
1451 if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) {
1452 ret = -EINVAL;
1453 break;
1454 }
1455
1456 n_bytes = vsk->transport->unsent_bytes(vsk);
1457 if (n_bytes < 0) {
1458 ret = n_bytes;
1459 break;
1460 }
1461
1462 ret = put_user(n_bytes, arg);
1463 break;
1464 }
1465 default:
1466 ret = -ENOIOCTLCMD;
1467 }
1468
1469 return ret;
1470 }
1471
vsock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1472 static int vsock_ioctl(struct socket *sock, unsigned int cmd,
1473 unsigned long arg)
1474 {
1475 int ret;
1476
1477 lock_sock(sock->sk);
1478 ret = vsock_do_ioctl(sock, cmd, (int __user *)arg);
1479 release_sock(sock->sk);
1480
1481 return ret;
1482 }
1483
1484 static const struct proto_ops vsock_dgram_ops = {
1485 .family = PF_VSOCK,
1486 .owner = THIS_MODULE,
1487 .release = vsock_release,
1488 .bind = vsock_bind,
1489 .connect = vsock_dgram_connect,
1490 .socketpair = sock_no_socketpair,
1491 .accept = sock_no_accept,
1492 .getname = vsock_getname,
1493 .poll = vsock_poll,
1494 .ioctl = vsock_ioctl,
1495 .listen = sock_no_listen,
1496 .shutdown = vsock_shutdown,
1497 .sendmsg = vsock_dgram_sendmsg,
1498 .recvmsg = vsock_dgram_recvmsg,
1499 .mmap = sock_no_mmap,
1500 .read_skb = vsock_read_skb,
1501 };
1502
vsock_transport_cancel_pkt(struct vsock_sock * vsk)1503 static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1504 {
1505 const struct vsock_transport *transport = vsk->transport;
1506
1507 if (!transport || !transport->cancel_pkt)
1508 return -EOPNOTSUPP;
1509
1510 return transport->cancel_pkt(vsk);
1511 }
1512
vsock_connect_timeout(struct work_struct * work)1513 static void vsock_connect_timeout(struct work_struct *work)
1514 {
1515 struct sock *sk;
1516 struct vsock_sock *vsk;
1517
1518 vsk = container_of(work, struct vsock_sock, connect_work.work);
1519 sk = sk_vsock(vsk);
1520
1521 lock_sock(sk);
1522 if (sk->sk_state == TCP_SYN_SENT &&
1523 (sk->sk_shutdown != SHUTDOWN_MASK)) {
1524 sk->sk_state = TCP_CLOSE;
1525 sk->sk_socket->state = SS_UNCONNECTED;
1526 sk->sk_err = ETIMEDOUT;
1527 sk_error_report(sk);
1528 vsock_transport_cancel_pkt(vsk);
1529 }
1530 release_sock(sk);
1531
1532 sock_put(sk);
1533 }
1534
vsock_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1535 static int vsock_connect(struct socket *sock, struct sockaddr *addr,
1536 int addr_len, int flags)
1537 {
1538 int err;
1539 struct sock *sk;
1540 struct vsock_sock *vsk;
1541 const struct vsock_transport *transport;
1542 struct sockaddr_vm *remote_addr;
1543 long timeout;
1544 DEFINE_WAIT(wait);
1545
1546 err = 0;
1547 sk = sock->sk;
1548 vsk = vsock_sk(sk);
1549
1550 lock_sock(sk);
1551
1552 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1553 switch (sock->state) {
1554 case SS_CONNECTED:
1555 err = -EISCONN;
1556 goto out;
1557 case SS_DISCONNECTING:
1558 err = -EINVAL;
1559 goto out;
1560 case SS_CONNECTING:
1561 /* This continues on so we can move sock into the SS_CONNECTED
1562 * state once the connection has completed (at which point err
1563 * will be set to zero also). Otherwise, we will either wait
1564 * for the connection or return -EALREADY should this be a
1565 * non-blocking call.
1566 */
1567 err = -EALREADY;
1568 if (flags & O_NONBLOCK)
1569 goto out;
1570 break;
1571 default:
1572 if ((sk->sk_state == TCP_LISTEN) ||
1573 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1574 err = -EINVAL;
1575 goto out;
1576 }
1577
1578 /* Set the remote address that we are connecting to. */
1579 memcpy(&vsk->remote_addr, remote_addr,
1580 sizeof(vsk->remote_addr));
1581
1582 err = vsock_assign_transport(vsk, NULL);
1583 if (err)
1584 goto out;
1585
1586 transport = vsk->transport;
1587
1588 /* The hypervisor and well-known contexts do not have socket
1589 * endpoints.
1590 */
1591 if (!transport ||
1592 !transport->stream_allow(remote_addr->svm_cid,
1593 remote_addr->svm_port)) {
1594 err = -ENETUNREACH;
1595 goto out;
1596 }
1597
1598 if (vsock_msgzerocopy_allow(transport)) {
1599 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
1600 } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1601 /* If this option was set before 'connect()',
1602 * when transport was unknown, check that this
1603 * feature is supported here.
1604 */
1605 err = -EOPNOTSUPP;
1606 goto out;
1607 }
1608
1609 err = vsock_auto_bind(vsk);
1610 if (err)
1611 goto out;
1612
1613 sk->sk_state = TCP_SYN_SENT;
1614
1615 err = transport->connect(vsk);
1616 if (err < 0)
1617 goto out;
1618
1619 /* sk_err might have been set as a result of an earlier
1620 * (failed) connect attempt.
1621 */
1622 sk->sk_err = 0;
1623
1624 /* Mark sock as connecting and set the error code to in
1625 * progress in case this is a non-blocking connect.
1626 */
1627 sock->state = SS_CONNECTING;
1628 err = -EINPROGRESS;
1629 }
1630
1631 /* The receive path will handle all communication until we are able to
1632 * enter the connected state. Here we wait for the connection to be
1633 * completed or a notification of an error.
1634 */
1635 timeout = vsk->connect_timeout;
1636 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1637
1638 /* If the socket is already closing or it is in an error state, there
1639 * is no point in waiting.
1640 */
1641 while (sk->sk_state != TCP_ESTABLISHED &&
1642 sk->sk_state != TCP_CLOSING && sk->sk_err == 0) {
1643 if (flags & O_NONBLOCK) {
1644 /* If we're not going to block, we schedule a timeout
1645 * function to generate a timeout on the connection
1646 * attempt, in case the peer doesn't respond in a
1647 * timely manner. We hold on to the socket until the
1648 * timeout fires.
1649 */
1650 sock_hold(sk);
1651
1652 /* If the timeout function is already scheduled,
1653 * reschedule it, then ungrab the socket refcount to
1654 * keep it balanced.
1655 */
1656 if (mod_delayed_work(system_wq, &vsk->connect_work,
1657 timeout))
1658 sock_put(sk);
1659
1660 /* Skip ahead to preserve error code set above. */
1661 goto out_wait;
1662 }
1663
1664 release_sock(sk);
1665 timeout = schedule_timeout(timeout);
1666 lock_sock(sk);
1667
1668 if (signal_pending(current)) {
1669 err = sock_intr_errno(timeout);
1670 sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
1671 sock->state = SS_UNCONNECTED;
1672 vsock_transport_cancel_pkt(vsk);
1673 vsock_remove_connected(vsk);
1674 goto out_wait;
1675 } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
1676 err = -ETIMEDOUT;
1677 sk->sk_state = TCP_CLOSE;
1678 sock->state = SS_UNCONNECTED;
1679 vsock_transport_cancel_pkt(vsk);
1680 goto out_wait;
1681 }
1682
1683 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1684 }
1685
1686 if (sk->sk_err) {
1687 err = -sk->sk_err;
1688 sk->sk_state = TCP_CLOSE;
1689 sock->state = SS_UNCONNECTED;
1690 } else {
1691 err = 0;
1692 }
1693
1694 out_wait:
1695 finish_wait(sk_sleep(sk), &wait);
1696 out:
1697 release_sock(sk);
1698 return err;
1699 }
1700
vsock_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)1701 static int vsock_accept(struct socket *sock, struct socket *newsock,
1702 struct proto_accept_arg *arg)
1703 {
1704 struct sock *listener;
1705 int err;
1706 struct sock *connected;
1707 struct vsock_sock *vconnected;
1708 long timeout;
1709 DEFINE_WAIT(wait);
1710
1711 err = 0;
1712 listener = sock->sk;
1713
1714 lock_sock(listener);
1715
1716 if (!sock_type_connectible(sock->type)) {
1717 err = -EOPNOTSUPP;
1718 goto out;
1719 }
1720
1721 if (listener->sk_state != TCP_LISTEN) {
1722 err = -EINVAL;
1723 goto out;
1724 }
1725
1726 /* Wait for children sockets to appear; these are the new sockets
1727 * created upon connection establishment.
1728 */
1729 timeout = sock_rcvtimeo(listener, arg->flags & O_NONBLOCK);
1730 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1731
1732 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1733 listener->sk_err == 0) {
1734 release_sock(listener);
1735 timeout = schedule_timeout(timeout);
1736 finish_wait(sk_sleep(listener), &wait);
1737 lock_sock(listener);
1738
1739 if (signal_pending(current)) {
1740 err = sock_intr_errno(timeout);
1741 goto out;
1742 } else if (timeout == 0) {
1743 err = -EAGAIN;
1744 goto out;
1745 }
1746
1747 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1748 }
1749 finish_wait(sk_sleep(listener), &wait);
1750
1751 if (listener->sk_err)
1752 err = -listener->sk_err;
1753
1754 if (connected) {
1755 sk_acceptq_removed(listener);
1756
1757 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
1758 vconnected = vsock_sk(connected);
1759
1760 /* If the listener socket has received an error, then we should
1761 * reject this socket and return. Note that we simply mark the
1762 * socket rejected, drop our reference, and let the cleanup
1763 * function handle the cleanup; the fact that we found it in
1764 * the listener's accept queue guarantees that the cleanup
1765 * function hasn't run yet.
1766 */
1767 if (err) {
1768 vconnected->rejected = true;
1769 } else {
1770 newsock->state = SS_CONNECTED;
1771 sock_graft(connected, newsock);
1772 if (vsock_msgzerocopy_allow(vconnected->transport))
1773 set_bit(SOCK_SUPPORT_ZC,
1774 &connected->sk_socket->flags);
1775 }
1776
1777 release_sock(connected);
1778 sock_put(connected);
1779 }
1780
1781 out:
1782 release_sock(listener);
1783 return err;
1784 }
1785
vsock_listen(struct socket * sock,int backlog)1786 static int vsock_listen(struct socket *sock, int backlog)
1787 {
1788 int err;
1789 struct sock *sk;
1790 struct vsock_sock *vsk;
1791
1792 sk = sock->sk;
1793
1794 lock_sock(sk);
1795
1796 if (!sock_type_connectible(sk->sk_type)) {
1797 err = -EOPNOTSUPP;
1798 goto out;
1799 }
1800
1801 if (sock->state != SS_UNCONNECTED) {
1802 err = -EINVAL;
1803 goto out;
1804 }
1805
1806 vsk = vsock_sk(sk);
1807
1808 if (!vsock_addr_bound(&vsk->local_addr)) {
1809 err = -EINVAL;
1810 goto out;
1811 }
1812
1813 sk->sk_max_ack_backlog = backlog;
1814 sk->sk_state = TCP_LISTEN;
1815
1816 err = 0;
1817
1818 out:
1819 release_sock(sk);
1820 return err;
1821 }
1822
vsock_update_buffer_size(struct vsock_sock * vsk,const struct vsock_transport * transport,u64 val)1823 static void vsock_update_buffer_size(struct vsock_sock *vsk,
1824 const struct vsock_transport *transport,
1825 u64 val)
1826 {
1827 if (val > vsk->buffer_max_size)
1828 val = vsk->buffer_max_size;
1829
1830 if (val < vsk->buffer_min_size)
1831 val = vsk->buffer_min_size;
1832
1833 if (val != vsk->buffer_size &&
1834 transport && transport->notify_buffer_size)
1835 transport->notify_buffer_size(vsk, &val);
1836
1837 vsk->buffer_size = val;
1838 }
1839
vsock_connectible_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1840 static int vsock_connectible_setsockopt(struct socket *sock,
1841 int level,
1842 int optname,
1843 sockptr_t optval,
1844 unsigned int optlen)
1845 {
1846 int err;
1847 struct sock *sk;
1848 struct vsock_sock *vsk;
1849 const struct vsock_transport *transport;
1850 u64 val;
1851
1852 if (level != AF_VSOCK && level != SOL_SOCKET)
1853 return -ENOPROTOOPT;
1854
1855 #define COPY_IN(_v) \
1856 do { \
1857 if (optlen < sizeof(_v)) { \
1858 err = -EINVAL; \
1859 goto exit; \
1860 } \
1861 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \
1862 err = -EFAULT; \
1863 goto exit; \
1864 } \
1865 } while (0)
1866
1867 err = 0;
1868 sk = sock->sk;
1869 vsk = vsock_sk(sk);
1870
1871 lock_sock(sk);
1872
1873 transport = vsk->transport;
1874
1875 if (level == SOL_SOCKET) {
1876 int zerocopy;
1877
1878 if (optname != SO_ZEROCOPY) {
1879 release_sock(sk);
1880 return sock_setsockopt(sock, level, optname, optval, optlen);
1881 }
1882
1883 /* Use 'int' type here, because variable to
1884 * set this option usually has this type.
1885 */
1886 COPY_IN(zerocopy);
1887
1888 if (zerocopy < 0 || zerocopy > 1) {
1889 err = -EINVAL;
1890 goto exit;
1891 }
1892
1893 if (transport && !vsock_msgzerocopy_allow(transport)) {
1894 err = -EOPNOTSUPP;
1895 goto exit;
1896 }
1897
1898 sock_valbool_flag(sk, SOCK_ZEROCOPY, zerocopy);
1899 goto exit;
1900 }
1901
1902 switch (optname) {
1903 case SO_VM_SOCKETS_BUFFER_SIZE:
1904 COPY_IN(val);
1905 vsock_update_buffer_size(vsk, transport, val);
1906 break;
1907
1908 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1909 COPY_IN(val);
1910 vsk->buffer_max_size = val;
1911 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1912 break;
1913
1914 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1915 COPY_IN(val);
1916 vsk->buffer_min_size = val;
1917 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1918 break;
1919
1920 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1921 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: {
1922 struct __kernel_sock_timeval tv;
1923
1924 err = sock_copy_user_timeval(&tv, optval, optlen,
1925 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1926 if (err)
1927 break;
1928 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1929 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1930 vsk->connect_timeout = tv.tv_sec * HZ +
1931 DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ));
1932 if (vsk->connect_timeout == 0)
1933 vsk->connect_timeout =
1934 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1935
1936 } else {
1937 err = -ERANGE;
1938 }
1939 break;
1940 }
1941
1942 default:
1943 err = -ENOPROTOOPT;
1944 break;
1945 }
1946
1947 #undef COPY_IN
1948
1949 exit:
1950 release_sock(sk);
1951 return err;
1952 }
1953
vsock_connectible_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1954 static int vsock_connectible_getsockopt(struct socket *sock,
1955 int level, int optname,
1956 char __user *optval,
1957 int __user *optlen)
1958 {
1959 struct sock *sk = sock->sk;
1960 struct vsock_sock *vsk = vsock_sk(sk);
1961
1962 union {
1963 u64 val64;
1964 struct old_timeval32 tm32;
1965 struct __kernel_old_timeval tm;
1966 struct __kernel_sock_timeval stm;
1967 } v;
1968
1969 int lv = sizeof(v.val64);
1970 int len;
1971
1972 if (level != AF_VSOCK)
1973 return -ENOPROTOOPT;
1974
1975 if (get_user(len, optlen))
1976 return -EFAULT;
1977
1978 memset(&v, 0, sizeof(v));
1979
1980 switch (optname) {
1981 case SO_VM_SOCKETS_BUFFER_SIZE:
1982 v.val64 = vsk->buffer_size;
1983 break;
1984
1985 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1986 v.val64 = vsk->buffer_max_size;
1987 break;
1988
1989 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1990 v.val64 = vsk->buffer_min_size;
1991 break;
1992
1993 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1994 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD:
1995 lv = sock_get_timeout(vsk->connect_timeout, &v,
1996 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1997 break;
1998
1999 default:
2000 return -ENOPROTOOPT;
2001 }
2002
2003 if (len < lv)
2004 return -EINVAL;
2005 if (len > lv)
2006 len = lv;
2007 if (copy_to_user(optval, &v, len))
2008 return -EFAULT;
2009
2010 if (put_user(len, optlen))
2011 return -EFAULT;
2012
2013 return 0;
2014 }
2015
vsock_connectible_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)2016 static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
2017 size_t len)
2018 {
2019 struct sock *sk;
2020 struct vsock_sock *vsk;
2021 const struct vsock_transport *transport;
2022 ssize_t total_written;
2023 long timeout;
2024 int err;
2025 struct vsock_transport_send_notify_data send_data;
2026 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2027
2028 sk = sock->sk;
2029 vsk = vsock_sk(sk);
2030 total_written = 0;
2031 err = 0;
2032
2033 if (msg->msg_flags & MSG_OOB)
2034 return -EOPNOTSUPP;
2035
2036 lock_sock(sk);
2037
2038 transport = vsk->transport;
2039
2040 /* Callers should not provide a destination with connection oriented
2041 * sockets.
2042 */
2043 if (msg->msg_namelen) {
2044 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2045 goto out;
2046 }
2047
2048 /* Send data only if both sides are not shutdown in the direction. */
2049 if (sk->sk_shutdown & SEND_SHUTDOWN ||
2050 vsk->peer_shutdown & RCV_SHUTDOWN) {
2051 err = -EPIPE;
2052 goto out;
2053 }
2054
2055 if (!transport || sk->sk_state != TCP_ESTABLISHED ||
2056 !vsock_addr_bound(&vsk->local_addr)) {
2057 err = -ENOTCONN;
2058 goto out;
2059 }
2060
2061 if (!vsock_addr_bound(&vsk->remote_addr)) {
2062 err = -EDESTADDRREQ;
2063 goto out;
2064 }
2065
2066 if (msg->msg_flags & MSG_ZEROCOPY &&
2067 !vsock_msgzerocopy_allow(transport)) {
2068 err = -EOPNOTSUPP;
2069 goto out;
2070 }
2071
2072 /* Wait for room in the produce queue to enqueue our user's data. */
2073 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2074
2075 err = transport->notify_send_init(vsk, &send_data);
2076 if (err < 0)
2077 goto out;
2078
2079 while (total_written < len) {
2080 ssize_t written;
2081
2082 add_wait_queue(sk_sleep(sk), &wait);
2083 while (vsock_stream_has_space(vsk) == 0 &&
2084 sk->sk_err == 0 &&
2085 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
2086 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
2087
2088 /* Don't wait for non-blocking sockets. */
2089 if (timeout == 0) {
2090 err = -EAGAIN;
2091 remove_wait_queue(sk_sleep(sk), &wait);
2092 goto out_err;
2093 }
2094
2095 err = transport->notify_send_pre_block(vsk, &send_data);
2096 if (err < 0) {
2097 remove_wait_queue(sk_sleep(sk), &wait);
2098 goto out_err;
2099 }
2100
2101 release_sock(sk);
2102 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
2103 lock_sock(sk);
2104 if (signal_pending(current)) {
2105 err = sock_intr_errno(timeout);
2106 remove_wait_queue(sk_sleep(sk), &wait);
2107 goto out_err;
2108 } else if (timeout == 0) {
2109 err = -EAGAIN;
2110 remove_wait_queue(sk_sleep(sk), &wait);
2111 goto out_err;
2112 }
2113 }
2114 remove_wait_queue(sk_sleep(sk), &wait);
2115
2116 /* These checks occur both as part of and after the loop
2117 * conditional since we need to check before and after
2118 * sleeping.
2119 */
2120 if (sk->sk_err) {
2121 err = -sk->sk_err;
2122 goto out_err;
2123 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
2124 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
2125 err = -EPIPE;
2126 goto out_err;
2127 }
2128
2129 err = transport->notify_send_pre_enqueue(vsk, &send_data);
2130 if (err < 0)
2131 goto out_err;
2132
2133 /* Note that enqueue will only write as many bytes as are free
2134 * in the produce queue, so we don't need to ensure len is
2135 * smaller than the queue size. It is the caller's
2136 * responsibility to check how many bytes we were able to send.
2137 */
2138
2139 if (sk->sk_type == SOCK_SEQPACKET) {
2140 written = transport->seqpacket_enqueue(vsk,
2141 msg, len - total_written);
2142 } else {
2143 written = transport->stream_enqueue(vsk,
2144 msg, len - total_written);
2145 }
2146
2147 if (written < 0) {
2148 err = written;
2149 goto out_err;
2150 }
2151
2152 total_written += written;
2153
2154 err = transport->notify_send_post_enqueue(
2155 vsk, written, &send_data);
2156 if (err < 0)
2157 goto out_err;
2158
2159 }
2160
2161 out_err:
2162 if (total_written > 0) {
2163 /* Return number of written bytes only if:
2164 * 1) SOCK_STREAM socket.
2165 * 2) SOCK_SEQPACKET socket when whole buffer is sent.
2166 */
2167 if (sk->sk_type == SOCK_STREAM || total_written == len)
2168 err = total_written;
2169 }
2170 out:
2171 if (sk->sk_type == SOCK_STREAM)
2172 err = sk_stream_error(sk, msg->msg_flags, err);
2173
2174 release_sock(sk);
2175 return err;
2176 }
2177
vsock_connectible_wait_data(struct sock * sk,struct wait_queue_entry * wait,long timeout,struct vsock_transport_recv_notify_data * recv_data,size_t target)2178 static int vsock_connectible_wait_data(struct sock *sk,
2179 struct wait_queue_entry *wait,
2180 long timeout,
2181 struct vsock_transport_recv_notify_data *recv_data,
2182 size_t target)
2183 {
2184 const struct vsock_transport *transport;
2185 struct vsock_sock *vsk;
2186 s64 data;
2187 int err;
2188
2189 vsk = vsock_sk(sk);
2190 err = 0;
2191 transport = vsk->transport;
2192
2193 while (1) {
2194 prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
2195 data = vsock_connectible_has_data(vsk);
2196 if (data != 0)
2197 break;
2198
2199 if (sk->sk_err != 0 ||
2200 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2201 (vsk->peer_shutdown & SEND_SHUTDOWN)) {
2202 break;
2203 }
2204
2205 /* Don't wait for non-blocking sockets. */
2206 if (timeout == 0) {
2207 err = -EAGAIN;
2208 break;
2209 }
2210
2211 if (recv_data) {
2212 err = transport->notify_recv_pre_block(vsk, target, recv_data);
2213 if (err < 0)
2214 break;
2215 }
2216
2217 release_sock(sk);
2218 timeout = schedule_timeout(timeout);
2219 lock_sock(sk);
2220
2221 if (signal_pending(current)) {
2222 err = sock_intr_errno(timeout);
2223 break;
2224 } else if (timeout == 0) {
2225 err = -EAGAIN;
2226 break;
2227 }
2228 }
2229
2230 finish_wait(sk_sleep(sk), wait);
2231
2232 if (err)
2233 return err;
2234
2235 /* Internal transport error when checking for available
2236 * data. XXX This should be changed to a connection
2237 * reset in a later change.
2238 */
2239 if (data < 0)
2240 return -ENOMEM;
2241
2242 return data;
2243 }
2244
__vsock_stream_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2245 static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2246 size_t len, int flags)
2247 {
2248 struct vsock_transport_recv_notify_data recv_data;
2249 const struct vsock_transport *transport;
2250 struct vsock_sock *vsk;
2251 ssize_t copied;
2252 size_t target;
2253 long timeout;
2254 int err;
2255
2256 DEFINE_WAIT(wait);
2257
2258 vsk = vsock_sk(sk);
2259 transport = vsk->transport;
2260
2261 /* We must not copy less than target bytes into the user's buffer
2262 * before returning successfully, so we wait for the consume queue to
2263 * have that much data to consume before dequeueing. Note that this
2264 * makes it impossible to handle cases where target is greater than the
2265 * queue size.
2266 */
2267 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2268 if (target >= transport->stream_rcvhiwat(vsk)) {
2269 err = -ENOMEM;
2270 goto out;
2271 }
2272 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2273 copied = 0;
2274
2275 err = transport->notify_recv_init(vsk, target, &recv_data);
2276 if (err < 0)
2277 goto out;
2278
2279
2280 while (1) {
2281 ssize_t read;
2282
2283 err = vsock_connectible_wait_data(sk, &wait, timeout,
2284 &recv_data, target);
2285 if (err <= 0)
2286 break;
2287
2288 err = transport->notify_recv_pre_dequeue(vsk, target,
2289 &recv_data);
2290 if (err < 0)
2291 break;
2292
2293 read = transport->stream_dequeue(vsk, msg, len - copied, flags);
2294 if (read < 0) {
2295 err = read;
2296 break;
2297 }
2298
2299 copied += read;
2300
2301 err = transport->notify_recv_post_dequeue(vsk, target, read,
2302 !(flags & MSG_PEEK), &recv_data);
2303 if (err < 0)
2304 goto out;
2305
2306 if (read >= target || flags & MSG_PEEK)
2307 break;
2308
2309 target -= read;
2310 }
2311
2312 if (sk->sk_err)
2313 err = -sk->sk_err;
2314 else if (sk->sk_shutdown & RCV_SHUTDOWN)
2315 err = 0;
2316
2317 if (copied > 0)
2318 err = copied;
2319
2320 out:
2321 return err;
2322 }
2323
__vsock_seqpacket_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2324 static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
2325 size_t len, int flags)
2326 {
2327 const struct vsock_transport *transport;
2328 struct vsock_sock *vsk;
2329 ssize_t msg_len;
2330 long timeout;
2331 int err = 0;
2332 DEFINE_WAIT(wait);
2333
2334 vsk = vsock_sk(sk);
2335 transport = vsk->transport;
2336
2337 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2338
2339 err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0);
2340 if (err <= 0)
2341 goto out;
2342
2343 msg_len = transport->seqpacket_dequeue(vsk, msg, flags);
2344
2345 if (msg_len < 0) {
2346 err = msg_len;
2347 goto out;
2348 }
2349
2350 if (sk->sk_err) {
2351 err = -sk->sk_err;
2352 } else if (sk->sk_shutdown & RCV_SHUTDOWN) {
2353 err = 0;
2354 } else {
2355 /* User sets MSG_TRUNC, so return real length of
2356 * packet.
2357 */
2358 if (flags & MSG_TRUNC)
2359 err = msg_len;
2360 else
2361 err = len - msg_data_left(msg);
2362
2363 /* Always set MSG_TRUNC if real length of packet is
2364 * bigger than user's buffer.
2365 */
2366 if (msg_len > len)
2367 msg->msg_flags |= MSG_TRUNC;
2368 }
2369
2370 out:
2371 return err;
2372 }
2373
2374 int
__vsock_connectible_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)2375 __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2376 int flags)
2377 {
2378 struct sock *sk;
2379 struct vsock_sock *vsk;
2380 const struct vsock_transport *transport;
2381 int err;
2382
2383 sk = sock->sk;
2384
2385 if (unlikely(flags & MSG_ERRQUEUE))
2386 return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
2387
2388 vsk = vsock_sk(sk);
2389 err = 0;
2390
2391 lock_sock(sk);
2392
2393 transport = vsk->transport;
2394
2395 if (!transport || sk->sk_state != TCP_ESTABLISHED) {
2396 /* Recvmsg is supposed to return 0 if a peer performs an
2397 * orderly shutdown. Differentiate between that case and when a
2398 * peer has not connected or a local shutdown occurred with the
2399 * SOCK_DONE flag.
2400 */
2401 if (sock_flag(sk, SOCK_DONE))
2402 err = 0;
2403 else
2404 err = -ENOTCONN;
2405
2406 goto out;
2407 }
2408
2409 if (flags & MSG_OOB) {
2410 err = -EOPNOTSUPP;
2411 goto out;
2412 }
2413
2414 /* We don't check peer_shutdown flag here since peer may actually shut
2415 * down, but there can be data in the queue that a local socket can
2416 * receive.
2417 */
2418 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2419 err = 0;
2420 goto out;
2421 }
2422
2423 /* It is valid on Linux to pass in a zero-length receive buffer. This
2424 * is not an error. We may as well bail out now.
2425 */
2426 if (!len) {
2427 err = 0;
2428 goto out;
2429 }
2430
2431 if (sk->sk_type == SOCK_STREAM)
2432 err = __vsock_stream_recvmsg(sk, msg, len, flags);
2433 else
2434 err = __vsock_seqpacket_recvmsg(sk, msg, len, flags);
2435
2436 out:
2437 release_sock(sk);
2438 return err;
2439 }
2440
2441 int
vsock_connectible_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)2442 vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2443 int flags)
2444 {
2445 #ifdef CONFIG_BPF_SYSCALL
2446 struct sock *sk = sock->sk;
2447 const struct proto *prot;
2448
2449 prot = READ_ONCE(sk->sk_prot);
2450 if (prot != &vsock_proto)
2451 return prot->recvmsg(sk, msg, len, flags, NULL);
2452 #endif
2453
2454 return __vsock_connectible_recvmsg(sock, msg, len, flags);
2455 }
2456 EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
2457
vsock_set_rcvlowat(struct sock * sk,int val)2458 static int vsock_set_rcvlowat(struct sock *sk, int val)
2459 {
2460 const struct vsock_transport *transport;
2461 struct vsock_sock *vsk;
2462
2463 vsk = vsock_sk(sk);
2464
2465 if (val > vsk->buffer_size)
2466 return -EINVAL;
2467
2468 transport = vsk->transport;
2469
2470 if (transport && transport->notify_set_rcvlowat) {
2471 int err;
2472
2473 err = transport->notify_set_rcvlowat(vsk, val);
2474 if (err)
2475 return err;
2476 }
2477
2478 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
2479 return 0;
2480 }
2481
2482 static const struct proto_ops vsock_stream_ops = {
2483 .family = PF_VSOCK,
2484 .owner = THIS_MODULE,
2485 .release = vsock_release,
2486 .bind = vsock_bind,
2487 .connect = vsock_connect,
2488 .socketpair = sock_no_socketpair,
2489 .accept = vsock_accept,
2490 .getname = vsock_getname,
2491 .poll = vsock_poll,
2492 .ioctl = vsock_ioctl,
2493 .listen = vsock_listen,
2494 .shutdown = vsock_shutdown,
2495 .setsockopt = vsock_connectible_setsockopt,
2496 .getsockopt = vsock_connectible_getsockopt,
2497 .sendmsg = vsock_connectible_sendmsg,
2498 .recvmsg = vsock_connectible_recvmsg,
2499 .mmap = sock_no_mmap,
2500 .set_rcvlowat = vsock_set_rcvlowat,
2501 .read_skb = vsock_read_skb,
2502 };
2503
2504 static const struct proto_ops vsock_seqpacket_ops = {
2505 .family = PF_VSOCK,
2506 .owner = THIS_MODULE,
2507 .release = vsock_release,
2508 .bind = vsock_bind,
2509 .connect = vsock_connect,
2510 .socketpair = sock_no_socketpair,
2511 .accept = vsock_accept,
2512 .getname = vsock_getname,
2513 .poll = vsock_poll,
2514 .ioctl = vsock_ioctl,
2515 .listen = vsock_listen,
2516 .shutdown = vsock_shutdown,
2517 .setsockopt = vsock_connectible_setsockopt,
2518 .getsockopt = vsock_connectible_getsockopt,
2519 .sendmsg = vsock_connectible_sendmsg,
2520 .recvmsg = vsock_connectible_recvmsg,
2521 .mmap = sock_no_mmap,
2522 .read_skb = vsock_read_skb,
2523 };
2524
vsock_create(struct net * net,struct socket * sock,int protocol,int kern)2525 static int vsock_create(struct net *net, struct socket *sock,
2526 int protocol, int kern)
2527 {
2528 struct vsock_sock *vsk;
2529 struct sock *sk;
2530 int ret;
2531
2532 if (!sock)
2533 return -EINVAL;
2534
2535 if (protocol && protocol != PF_VSOCK)
2536 return -EPROTONOSUPPORT;
2537
2538 switch (sock->type) {
2539 case SOCK_DGRAM:
2540 sock->ops = &vsock_dgram_ops;
2541 break;
2542 case SOCK_STREAM:
2543 sock->ops = &vsock_stream_ops;
2544 break;
2545 case SOCK_SEQPACKET:
2546 sock->ops = &vsock_seqpacket_ops;
2547 break;
2548 default:
2549 return -ESOCKTNOSUPPORT;
2550 }
2551
2552 sock->state = SS_UNCONNECTED;
2553
2554 sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2555 if (!sk)
2556 return -ENOMEM;
2557
2558 vsk = vsock_sk(sk);
2559
2560 if (sock->type == SOCK_DGRAM) {
2561 ret = vsock_assign_transport(vsk, NULL);
2562 if (ret < 0) {
2563 sock->sk = NULL;
2564 sock_put(sk);
2565 return ret;
2566 }
2567 }
2568
2569 /* SOCK_DGRAM doesn't have 'setsockopt' callback set in its
2570 * proto_ops, so there is no handler for custom logic.
2571 */
2572 if (sock_type_connectible(sock->type))
2573 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
2574
2575 vsock_insert_unbound(vsk);
2576
2577 return 0;
2578 }
2579
2580 static const struct net_proto_family vsock_family_ops = {
2581 .family = AF_VSOCK,
2582 .create = vsock_create,
2583 .owner = THIS_MODULE,
2584 };
2585
vsock_dev_do_ioctl(struct file * filp,unsigned int cmd,void __user * ptr)2586 static long vsock_dev_do_ioctl(struct file *filp,
2587 unsigned int cmd, void __user *ptr)
2588 {
2589 u32 __user *p = ptr;
2590 int retval = 0;
2591 u32 cid;
2592
2593 switch (cmd) {
2594 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
2595 /* To be compatible with the VMCI behavior, we prioritize the
2596 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2597 */
2598 cid = vsock_registered_transport_cid(&transport_g2h);
2599 if (cid == VMADDR_CID_ANY)
2600 cid = vsock_registered_transport_cid(&transport_h2g);
2601 if (cid == VMADDR_CID_ANY)
2602 cid = vsock_registered_transport_cid(&transport_local);
2603
2604 if (put_user(cid, p) != 0)
2605 retval = -EFAULT;
2606 break;
2607
2608 default:
2609 retval = -ENOIOCTLCMD;
2610 }
2611
2612 return retval;
2613 }
2614
vsock_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2615 static long vsock_dev_ioctl(struct file *filp,
2616 unsigned int cmd, unsigned long arg)
2617 {
2618 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2619 }
2620
2621 #ifdef CONFIG_COMPAT
vsock_dev_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2622 static long vsock_dev_compat_ioctl(struct file *filp,
2623 unsigned int cmd, unsigned long arg)
2624 {
2625 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2626 }
2627 #endif
2628
2629 static const struct file_operations vsock_device_ops = {
2630 .owner = THIS_MODULE,
2631 .unlocked_ioctl = vsock_dev_ioctl,
2632 #ifdef CONFIG_COMPAT
2633 .compat_ioctl = vsock_dev_compat_ioctl,
2634 #endif
2635 .open = nonseekable_open,
2636 };
2637
2638 static struct miscdevice vsock_device = {
2639 .name = "vsock",
2640 .fops = &vsock_device_ops,
2641 };
2642
vsock_init(void)2643 static int __init vsock_init(void)
2644 {
2645 int err = 0;
2646
2647 vsock_init_tables();
2648
2649 vsock_proto.owner = THIS_MODULE;
2650 vsock_device.minor = MISC_DYNAMIC_MINOR;
2651 err = misc_register(&vsock_device);
2652 if (err) {
2653 pr_err("Failed to register misc device\n");
2654 goto err_reset_transport;
2655 }
2656
2657 err = proto_register(&vsock_proto, 1); /* we want our slab */
2658 if (err) {
2659 pr_err("Cannot register vsock protocol\n");
2660 goto err_deregister_misc;
2661 }
2662
2663 err = sock_register(&vsock_family_ops);
2664 if (err) {
2665 pr_err("could not register af_vsock (%d) address family: %d\n",
2666 AF_VSOCK, err);
2667 goto err_unregister_proto;
2668 }
2669
2670 vsock_bpf_build_proto();
2671
2672 return 0;
2673
2674 err_unregister_proto:
2675 proto_unregister(&vsock_proto);
2676 err_deregister_misc:
2677 misc_deregister(&vsock_device);
2678 err_reset_transport:
2679 return err;
2680 }
2681
vsock_exit(void)2682 static void __exit vsock_exit(void)
2683 {
2684 misc_deregister(&vsock_device);
2685 sock_unregister(AF_VSOCK);
2686 proto_unregister(&vsock_proto);
2687 }
2688
vsock_core_get_transport(struct vsock_sock * vsk)2689 const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
2690 {
2691 return vsk->transport;
2692 }
2693 EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2694
vsock_core_register(const struct vsock_transport * t,int features)2695 int vsock_core_register(const struct vsock_transport *t, int features)
2696 {
2697 const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
2698 int err = mutex_lock_interruptible(&vsock_register_mutex);
2699
2700 if (err)
2701 return err;
2702
2703 t_h2g = transport_h2g;
2704 t_g2h = transport_g2h;
2705 t_dgram = transport_dgram;
2706 t_local = transport_local;
2707
2708 if (features & VSOCK_TRANSPORT_F_H2G) {
2709 if (t_h2g) {
2710 err = -EBUSY;
2711 goto err_busy;
2712 }
2713 t_h2g = t;
2714 }
2715
2716 if (features & VSOCK_TRANSPORT_F_G2H) {
2717 if (t_g2h) {
2718 err = -EBUSY;
2719 goto err_busy;
2720 }
2721 t_g2h = t;
2722 }
2723
2724 if (features & VSOCK_TRANSPORT_F_DGRAM) {
2725 if (t_dgram) {
2726 err = -EBUSY;
2727 goto err_busy;
2728 }
2729 t_dgram = t;
2730 }
2731
2732 if (features & VSOCK_TRANSPORT_F_LOCAL) {
2733 if (t_local) {
2734 err = -EBUSY;
2735 goto err_busy;
2736 }
2737 t_local = t;
2738 }
2739
2740 transport_h2g = t_h2g;
2741 transport_g2h = t_g2h;
2742 transport_dgram = t_dgram;
2743 transport_local = t_local;
2744
2745 err_busy:
2746 mutex_unlock(&vsock_register_mutex);
2747 return err;
2748 }
2749 EXPORT_SYMBOL_GPL(vsock_core_register);
2750
vsock_core_unregister(const struct vsock_transport * t)2751 void vsock_core_unregister(const struct vsock_transport *t)
2752 {
2753 mutex_lock(&vsock_register_mutex);
2754
2755 if (transport_h2g == t)
2756 transport_h2g = NULL;
2757
2758 if (transport_g2h == t)
2759 transport_g2h = NULL;
2760
2761 if (transport_dgram == t)
2762 transport_dgram = NULL;
2763
2764 if (transport_local == t)
2765 transport_local = NULL;
2766
2767 mutex_unlock(&vsock_register_mutex);
2768 }
2769 EXPORT_SYMBOL_GPL(vsock_core_unregister);
2770
2771 module_init(vsock_init);
2772 module_exit(vsock_exit);
2773
2774 MODULE_AUTHOR("VMware, Inc.");
2775 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2776 MODULE_VERSION("1.0.2.0-k");
2777 MODULE_LICENSE("GPL v2");
2778