1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VMware vSockets Driver
4 *
5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 */
7
8 /* Implementation notes:
9 *
10 * - There are two kinds of sockets: those created by user action (such as
11 * calling socket(2)) and those created by incoming connection request packets.
12 *
13 * - There are two "global" tables, one for bound sockets (sockets that have
14 * specified an address that they are responsible for) and one for connected
15 * sockets (sockets that have established a connection with another socket).
16 * These tables are "global" in that all sockets on the system are placed
17 * within them. - Note, though, that the bound table contains an extra entry
18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19 * that list. The bound table is used solely for lookup of sockets when packets
20 * are received and that's not necessary for SOCK_DGRAM sockets since we create
21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
22 * sockets out of the bound hash buckets will reduce the chance of collisions
23 * when looking for SOCK_STREAM sockets and prevents us from having to check the
24 * socket type in the hash table lookups.
25 *
26 * - Sockets created by user action will either be "client" sockets that
27 * initiate a connection or "server" sockets that listen for connections; we do
28 * not support simultaneous connects (two "client" sockets connecting).
29 *
30 * - "Server" sockets are referred to as listener sockets throughout this
31 * implementation because they are in the TCP_LISTEN state. When a
32 * connection request is received (the second kind of socket mentioned above),
33 * we create a new socket and refer to it as a pending socket. These pending
34 * sockets are placed on the pending connection list of the listener socket.
35 * When future packets are received for the address the listener socket is
36 * bound to, we check if the source of the packet is from one that has an
37 * existing pending connection. If it does, we process the packet for the
38 * pending socket. When that socket reaches the connected state, it is removed
39 * from the listener socket's pending list and enqueued in the listener
40 * socket's accept queue. Callers of accept(2) will accept connected sockets
41 * from the listener socket's accept queue. If the socket cannot be accepted
42 * for some reason then it is marked rejected. Once the connection is
43 * accepted, it is owned by the user process and the responsibility for cleanup
44 * falls with that user process.
45 *
46 * - It is possible that these pending sockets will never reach the connected
47 * state; in fact, we may never receive another packet after the connection
48 * request. Because of this, we must schedule a cleanup function to run in the
49 * future, after some amount of time passes where a connection should have been
50 * established. This function ensures that the socket is off all lists so it
51 * cannot be retrieved, then drops all references to the socket so it is cleaned
52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
53 * function will also cleanup rejected sockets, those that reach the connected
54 * state but leave it before they have been accepted.
55 *
56 * - Lock ordering for pending or accept queue sockets is:
57 *
58 * lock_sock(listener);
59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60 *
61 * Using explicit nested locking keeps lockdep happy since normally only one
62 * lock of a given class may be taken at a time.
63 *
64 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our
67 * sk_destruct implementation is invoked. Our sk_destruct implementation will
68 * perform additional cleanup that's common for both types of sockets.
69 *
70 * - A socket's reference count is what ensures that the structure won't be
71 * freed. Each entry in a list (such as the "global" bound and connected tables
72 * and the listener socket's pending list and connected queue) ensures a
73 * reference. When we defer work until process context and pass a socket as our
74 * argument, we must ensure the reference count is increased to ensure the
75 * socket isn't freed before the function is run; the deferred function will
76 * then drop the reference.
77 *
78 * - sk->sk_state uses the TCP state constants because they are widely used by
79 * other address families and exposed to userspace tools like ss(8):
80 *
81 * TCP_CLOSE - unconnected
82 * TCP_SYN_SENT - connecting
83 * TCP_ESTABLISHED - connected
84 * TCP_CLOSING - disconnecting
85 * TCP_LISTEN - listening
86 */
87
88 #include <linux/compat.h>
89 #include <linux/types.h>
90 #include <linux/bitops.h>
91 #include <linux/cred.h>
92 #include <linux/errqueue.h>
93 #include <linux/init.h>
94 #include <linux/io.h>
95 #include <linux/kernel.h>
96 #include <linux/sched/signal.h>
97 #include <linux/kmod.h>
98 #include <linux/list.h>
99 #include <linux/miscdevice.h>
100 #include <linux/module.h>
101 #include <linux/mutex.h>
102 #include <linux/net.h>
103 #include <linux/poll.h>
104 #include <linux/random.h>
105 #include <linux/skbuff.h>
106 #include <linux/smp.h>
107 #include <linux/socket.h>
108 #include <linux/stddef.h>
109 #include <linux/unistd.h>
110 #include <linux/wait.h>
111 #include <linux/workqueue.h>
112 #include <net/sock.h>
113 #include <net/af_vsock.h>
114 #include <uapi/linux/vm_sockets.h>
115 #include <uapi/asm-generic/ioctls.h>
116
117 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
118 static void vsock_sk_destruct(struct sock *sk);
119 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
120 static void vsock_close(struct sock *sk, long timeout);
121
122 /* Protocol family. */
123 struct proto vsock_proto = {
124 .name = "AF_VSOCK",
125 .owner = THIS_MODULE,
126 .obj_size = sizeof(struct vsock_sock),
127 .close = vsock_close,
128 #ifdef CONFIG_BPF_SYSCALL
129 .psock_update_sk_prot = vsock_bpf_update_proto,
130 #endif
131 };
132
133 /* The default peer timeout indicates how long we will wait for a peer response
134 * to a control message.
135 */
136 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
137
138 #define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
139 #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
140 #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
141
142 /* Transport used for host->guest communication */
143 static const struct vsock_transport *transport_h2g;
144 /* Transport used for guest->host communication */
145 static const struct vsock_transport *transport_g2h;
146 /* Transport used for DGRAM communication */
147 static const struct vsock_transport *transport_dgram;
148 /* Transport used for local communication */
149 static const struct vsock_transport *transport_local;
150 static DEFINE_MUTEX(vsock_register_mutex);
151
152 /**** UTILS ****/
153
154 /* Each bound VSocket is stored in the bind hash table and each connected
155 * VSocket is stored in the connected hash table.
156 *
157 * Unbound sockets are all put on the same list attached to the end of the hash
158 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
159 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
160 * represents the list that addr hashes to).
161 *
162 * Specifically, we initialize the vsock_bind_table array to a size of
163 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
164 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
165 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
166 * mods with VSOCK_HASH_SIZE to ensure this.
167 */
168 #define MAX_PORT_RETRIES 24
169
170 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
171 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
172 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
173
174 /* XXX This can probably be implemented in a better way. */
175 #define VSOCK_CONN_HASH(src, dst) \
176 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
177 #define vsock_connected_sockets(src, dst) \
178 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
179 #define vsock_connected_sockets_vsk(vsk) \
180 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
181
182 struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
183 EXPORT_SYMBOL_GPL(vsock_bind_table);
184 struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
185 EXPORT_SYMBOL_GPL(vsock_connected_table);
186 DEFINE_SPINLOCK(vsock_table_lock);
187 EXPORT_SYMBOL_GPL(vsock_table_lock);
188
189 /* Autobind this socket to the local address if necessary. */
vsock_auto_bind(struct vsock_sock * vsk)190 static int vsock_auto_bind(struct vsock_sock *vsk)
191 {
192 struct sock *sk = sk_vsock(vsk);
193 struct sockaddr_vm local_addr;
194
195 if (vsock_addr_bound(&vsk->local_addr))
196 return 0;
197 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
198 return __vsock_bind(sk, &local_addr);
199 }
200
vsock_init_tables(void)201 static void vsock_init_tables(void)
202 {
203 int i;
204
205 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
206 INIT_LIST_HEAD(&vsock_bind_table[i]);
207
208 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
209 INIT_LIST_HEAD(&vsock_connected_table[i]);
210 }
211
__vsock_insert_bound(struct list_head * list,struct vsock_sock * vsk)212 static void __vsock_insert_bound(struct list_head *list,
213 struct vsock_sock *vsk)
214 {
215 sock_hold(&vsk->sk);
216 list_add(&vsk->bound_table, list);
217 }
218
__vsock_insert_connected(struct list_head * list,struct vsock_sock * vsk)219 static void __vsock_insert_connected(struct list_head *list,
220 struct vsock_sock *vsk)
221 {
222 sock_hold(&vsk->sk);
223 list_add(&vsk->connected_table, list);
224 }
225
__vsock_remove_bound(struct vsock_sock * vsk)226 static void __vsock_remove_bound(struct vsock_sock *vsk)
227 {
228 list_del_init(&vsk->bound_table);
229 sock_put(&vsk->sk);
230 }
231
__vsock_remove_connected(struct vsock_sock * vsk)232 static void __vsock_remove_connected(struct vsock_sock *vsk)
233 {
234 list_del_init(&vsk->connected_table);
235 sock_put(&vsk->sk);
236 }
237
__vsock_find_bound_socket(struct sockaddr_vm * addr)238 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
239 {
240 struct vsock_sock *vsk;
241
242 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
243 if (vsock_addr_equals_addr(addr, &vsk->local_addr))
244 return sk_vsock(vsk);
245
246 if (addr->svm_port == vsk->local_addr.svm_port &&
247 (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
248 addr->svm_cid == VMADDR_CID_ANY))
249 return sk_vsock(vsk);
250 }
251
252 return NULL;
253 }
254
__vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)255 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
256 struct sockaddr_vm *dst)
257 {
258 struct vsock_sock *vsk;
259
260 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
261 connected_table) {
262 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
263 dst->svm_port == vsk->local_addr.svm_port) {
264 return sk_vsock(vsk);
265 }
266 }
267
268 return NULL;
269 }
270
vsock_insert_unbound(struct vsock_sock * vsk)271 static void vsock_insert_unbound(struct vsock_sock *vsk)
272 {
273 spin_lock_bh(&vsock_table_lock);
274 __vsock_insert_bound(vsock_unbound_sockets, vsk);
275 spin_unlock_bh(&vsock_table_lock);
276 }
277
vsock_insert_connected(struct vsock_sock * vsk)278 void vsock_insert_connected(struct vsock_sock *vsk)
279 {
280 struct list_head *list = vsock_connected_sockets(
281 &vsk->remote_addr, &vsk->local_addr);
282
283 spin_lock_bh(&vsock_table_lock);
284 __vsock_insert_connected(list, vsk);
285 spin_unlock_bh(&vsock_table_lock);
286 }
287 EXPORT_SYMBOL_GPL(vsock_insert_connected);
288
vsock_remove_bound(struct vsock_sock * vsk)289 void vsock_remove_bound(struct vsock_sock *vsk)
290 {
291 spin_lock_bh(&vsock_table_lock);
292 if (__vsock_in_bound_table(vsk))
293 __vsock_remove_bound(vsk);
294 spin_unlock_bh(&vsock_table_lock);
295 }
296 EXPORT_SYMBOL_GPL(vsock_remove_bound);
297
vsock_remove_connected(struct vsock_sock * vsk)298 void vsock_remove_connected(struct vsock_sock *vsk)
299 {
300 spin_lock_bh(&vsock_table_lock);
301 if (__vsock_in_connected_table(vsk))
302 __vsock_remove_connected(vsk);
303 spin_unlock_bh(&vsock_table_lock);
304 }
305 EXPORT_SYMBOL_GPL(vsock_remove_connected);
306
vsock_find_bound_socket(struct sockaddr_vm * addr)307 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
308 {
309 struct sock *sk;
310
311 spin_lock_bh(&vsock_table_lock);
312 sk = __vsock_find_bound_socket(addr);
313 if (sk)
314 sock_hold(sk);
315
316 spin_unlock_bh(&vsock_table_lock);
317
318 return sk;
319 }
320 EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
321
vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)322 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
323 struct sockaddr_vm *dst)
324 {
325 struct sock *sk;
326
327 spin_lock_bh(&vsock_table_lock);
328 sk = __vsock_find_connected_socket(src, dst);
329 if (sk)
330 sock_hold(sk);
331
332 spin_unlock_bh(&vsock_table_lock);
333
334 return sk;
335 }
336 EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
337
vsock_remove_sock(struct vsock_sock * vsk)338 void vsock_remove_sock(struct vsock_sock *vsk)
339 {
340 /* Transport reassignment must not remove the binding. */
341 if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
342 vsock_remove_bound(vsk);
343
344 vsock_remove_connected(vsk);
345 }
346 EXPORT_SYMBOL_GPL(vsock_remove_sock);
347
vsock_for_each_connected_socket(struct vsock_transport * transport,void (* fn)(struct sock * sk))348 void vsock_for_each_connected_socket(struct vsock_transport *transport,
349 void (*fn)(struct sock *sk))
350 {
351 int i;
352
353 spin_lock_bh(&vsock_table_lock);
354
355 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
356 struct vsock_sock *vsk;
357 list_for_each_entry(vsk, &vsock_connected_table[i],
358 connected_table) {
359 if (vsk->transport != transport)
360 continue;
361
362 fn(sk_vsock(vsk));
363 }
364 }
365
366 spin_unlock_bh(&vsock_table_lock);
367 }
368 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
369
vsock_add_pending(struct sock * listener,struct sock * pending)370 void vsock_add_pending(struct sock *listener, struct sock *pending)
371 {
372 struct vsock_sock *vlistener;
373 struct vsock_sock *vpending;
374
375 vlistener = vsock_sk(listener);
376 vpending = vsock_sk(pending);
377
378 sock_hold(pending);
379 sock_hold(listener);
380 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
381 }
382 EXPORT_SYMBOL_GPL(vsock_add_pending);
383
vsock_remove_pending(struct sock * listener,struct sock * pending)384 void vsock_remove_pending(struct sock *listener, struct sock *pending)
385 {
386 struct vsock_sock *vpending = vsock_sk(pending);
387
388 list_del_init(&vpending->pending_links);
389 sock_put(listener);
390 sock_put(pending);
391 }
392 EXPORT_SYMBOL_GPL(vsock_remove_pending);
393
vsock_enqueue_accept(struct sock * listener,struct sock * connected)394 void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
395 {
396 struct vsock_sock *vlistener;
397 struct vsock_sock *vconnected;
398
399 vlistener = vsock_sk(listener);
400 vconnected = vsock_sk(connected);
401
402 sock_hold(connected);
403 sock_hold(listener);
404 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
405 }
406 EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
407
vsock_use_local_transport(unsigned int remote_cid)408 static bool vsock_use_local_transport(unsigned int remote_cid)
409 {
410 lockdep_assert_held(&vsock_register_mutex);
411
412 if (!transport_local)
413 return false;
414
415 if (remote_cid == VMADDR_CID_LOCAL)
416 return true;
417
418 if (transport_g2h) {
419 return remote_cid == transport_g2h->get_local_cid();
420 } else {
421 return remote_cid == VMADDR_CID_HOST;
422 }
423 }
424
vsock_deassign_transport(struct vsock_sock * vsk)425 static void vsock_deassign_transport(struct vsock_sock *vsk)
426 {
427 if (!vsk->transport)
428 return;
429
430 vsk->transport->destruct(vsk);
431 module_put(vsk->transport->module);
432 vsk->transport = NULL;
433 }
434
435 /* Assign a transport to a socket and call the .init transport callback.
436 *
437 * Note: for connection oriented socket this must be called when vsk->remote_addr
438 * is set (e.g. during the connect() or when a connection request on a listener
439 * socket is received).
440 * The vsk->remote_addr is used to decide which transport to use:
441 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
442 * g2h is not loaded, will use local transport;
443 * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
444 * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
445 * - remote CID > VMADDR_CID_HOST will use host->guest transport;
446 */
vsock_assign_transport(struct vsock_sock * vsk,struct vsock_sock * psk)447 int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
448 {
449 const struct vsock_transport *new_transport;
450 struct sock *sk = sk_vsock(vsk);
451 unsigned int remote_cid = vsk->remote_addr.svm_cid;
452 __u8 remote_flags;
453 int ret;
454
455 /* If the packet is coming with the source and destination CIDs higher
456 * than VMADDR_CID_HOST, then a vsock channel where all the packets are
457 * forwarded to the host should be established. Then the host will
458 * need to forward the packets to the guest.
459 *
460 * The flag is set on the (listen) receive path (psk is not NULL). On
461 * the connect path the flag can be set by the user space application.
462 */
463 if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
464 vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
465 vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
466
467 remote_flags = vsk->remote_addr.svm_flags;
468
469 mutex_lock(&vsock_register_mutex);
470
471 switch (sk->sk_type) {
472 case SOCK_DGRAM:
473 new_transport = transport_dgram;
474 break;
475 case SOCK_STREAM:
476 case SOCK_SEQPACKET:
477 if (vsock_use_local_transport(remote_cid))
478 new_transport = transport_local;
479 else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
480 (remote_flags & VMADDR_FLAG_TO_HOST))
481 new_transport = transport_g2h;
482 else
483 new_transport = transport_h2g;
484 break;
485 default:
486 ret = -ESOCKTNOSUPPORT;
487 goto err;
488 }
489
490 if (vsk->transport) {
491 if (vsk->transport == new_transport) {
492 ret = 0;
493 goto err;
494 }
495
496 /* transport->release() must be called with sock lock acquired.
497 * This path can only be taken during vsock_connect(), where we
498 * have already held the sock lock. In the other cases, this
499 * function is called on a new socket which is not assigned to
500 * any transport.
501 */
502 vsk->transport->release(vsk);
503 vsock_deassign_transport(vsk);
504
505 /* transport's release() and destruct() can touch some socket
506 * state, since we are reassigning the socket to a new transport
507 * during vsock_connect(), let's reset these fields to have a
508 * clean state.
509 */
510 sock_reset_flag(sk, SOCK_DONE);
511 sk->sk_state = TCP_CLOSE;
512 vsk->peer_shutdown = 0;
513 }
514
515 /* We increase the module refcnt to prevent the transport unloading
516 * while there are open sockets assigned to it.
517 */
518 if (!new_transport || !try_module_get(new_transport->module)) {
519 ret = -ENODEV;
520 goto err;
521 }
522
523 /* It's safe to release the mutex after a successful try_module_get().
524 * Whichever transport `new_transport` points at, it won't go away until
525 * the last module_put() below or in vsock_deassign_transport().
526 */
527 mutex_unlock(&vsock_register_mutex);
528
529 if (sk->sk_type == SOCK_SEQPACKET) {
530 if (!new_transport->seqpacket_allow ||
531 !new_transport->seqpacket_allow(remote_cid)) {
532 module_put(new_transport->module);
533 return -ESOCKTNOSUPPORT;
534 }
535 }
536
537 ret = new_transport->init(vsk, psk);
538 if (ret) {
539 module_put(new_transport->module);
540 return ret;
541 }
542
543 vsk->transport = new_transport;
544
545 return 0;
546 err:
547 mutex_unlock(&vsock_register_mutex);
548 return ret;
549 }
550 EXPORT_SYMBOL_GPL(vsock_assign_transport);
551
552 /*
553 * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks.
554 * Otherwise we may race with module removal. Do not use on `vsk->transport`.
555 */
vsock_registered_transport_cid(const struct vsock_transport ** transport)556 static u32 vsock_registered_transport_cid(const struct vsock_transport **transport)
557 {
558 u32 cid = VMADDR_CID_ANY;
559
560 mutex_lock(&vsock_register_mutex);
561 if (*transport)
562 cid = (*transport)->get_local_cid();
563 mutex_unlock(&vsock_register_mutex);
564
565 return cid;
566 }
567
vsock_find_cid(unsigned int cid)568 bool vsock_find_cid(unsigned int cid)
569 {
570 if (cid == vsock_registered_transport_cid(&transport_g2h))
571 return true;
572
573 if (transport_h2g && cid == VMADDR_CID_HOST)
574 return true;
575
576 if (transport_local && cid == VMADDR_CID_LOCAL)
577 return true;
578
579 return false;
580 }
581 EXPORT_SYMBOL_GPL(vsock_find_cid);
582
vsock_dequeue_accept(struct sock * listener)583 static struct sock *vsock_dequeue_accept(struct sock *listener)
584 {
585 struct vsock_sock *vlistener;
586 struct vsock_sock *vconnected;
587
588 vlistener = vsock_sk(listener);
589
590 if (list_empty(&vlistener->accept_queue))
591 return NULL;
592
593 vconnected = list_entry(vlistener->accept_queue.next,
594 struct vsock_sock, accept_queue);
595
596 list_del_init(&vconnected->accept_queue);
597 sock_put(listener);
598 /* The caller will need a reference on the connected socket so we let
599 * it call sock_put().
600 */
601
602 return sk_vsock(vconnected);
603 }
604
vsock_is_accept_queue_empty(struct sock * sk)605 static bool vsock_is_accept_queue_empty(struct sock *sk)
606 {
607 struct vsock_sock *vsk = vsock_sk(sk);
608 return list_empty(&vsk->accept_queue);
609 }
610
vsock_is_pending(struct sock * sk)611 static bool vsock_is_pending(struct sock *sk)
612 {
613 struct vsock_sock *vsk = vsock_sk(sk);
614 return !list_empty(&vsk->pending_links);
615 }
616
vsock_send_shutdown(struct sock * sk,int mode)617 static int vsock_send_shutdown(struct sock *sk, int mode)
618 {
619 struct vsock_sock *vsk = vsock_sk(sk);
620
621 if (!vsk->transport)
622 return -ENODEV;
623
624 return vsk->transport->shutdown(vsk, mode);
625 }
626
vsock_pending_work(struct work_struct * work)627 static void vsock_pending_work(struct work_struct *work)
628 {
629 struct sock *sk;
630 struct sock *listener;
631 struct vsock_sock *vsk;
632 bool cleanup;
633
634 vsk = container_of(work, struct vsock_sock, pending_work.work);
635 sk = sk_vsock(vsk);
636 listener = vsk->listener;
637 cleanup = true;
638
639 lock_sock(listener);
640 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
641
642 if (vsock_is_pending(sk)) {
643 vsock_remove_pending(listener, sk);
644
645 sk_acceptq_removed(listener);
646 } else if (!vsk->rejected) {
647 /* We are not on the pending list and accept() did not reject
648 * us, so we must have been accepted by our user process. We
649 * just need to drop our references to the sockets and be on
650 * our way.
651 */
652 cleanup = false;
653 goto out;
654 }
655
656 /* We need to remove ourself from the global connected sockets list so
657 * incoming packets can't find this socket, and to reduce the reference
658 * count.
659 */
660 vsock_remove_connected(vsk);
661
662 sk->sk_state = TCP_CLOSE;
663
664 out:
665 release_sock(sk);
666 release_sock(listener);
667 if (cleanup)
668 sock_put(sk);
669
670 sock_put(sk);
671 sock_put(listener);
672 }
673
674 /**** SOCKET OPERATIONS ****/
675
__vsock_bind_connectible(struct vsock_sock * vsk,struct sockaddr_vm * addr)676 static int __vsock_bind_connectible(struct vsock_sock *vsk,
677 struct sockaddr_vm *addr)
678 {
679 static u32 port;
680 struct sockaddr_vm new_addr;
681
682 if (!port)
683 port = get_random_u32_above(LAST_RESERVED_PORT);
684
685 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
686
687 if (addr->svm_port == VMADDR_PORT_ANY) {
688 bool found = false;
689 unsigned int i;
690
691 for (i = 0; i < MAX_PORT_RETRIES; i++) {
692 if (port <= LAST_RESERVED_PORT)
693 port = LAST_RESERVED_PORT + 1;
694
695 new_addr.svm_port = port++;
696
697 if (!__vsock_find_bound_socket(&new_addr)) {
698 found = true;
699 break;
700 }
701 }
702
703 if (!found)
704 return -EADDRNOTAVAIL;
705 } else {
706 /* If port is in reserved range, ensure caller
707 * has necessary privileges.
708 */
709 if (addr->svm_port <= LAST_RESERVED_PORT &&
710 !capable(CAP_NET_BIND_SERVICE)) {
711 return -EACCES;
712 }
713
714 if (__vsock_find_bound_socket(&new_addr))
715 return -EADDRINUSE;
716 }
717
718 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
719
720 /* Remove connection oriented sockets from the unbound list and add them
721 * to the hash table for easy lookup by its address. The unbound list
722 * is simply an extra entry at the end of the hash table, a trick used
723 * by AF_UNIX.
724 */
725 __vsock_remove_bound(vsk);
726 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
727
728 return 0;
729 }
730
__vsock_bind_dgram(struct vsock_sock * vsk,struct sockaddr_vm * addr)731 static int __vsock_bind_dgram(struct vsock_sock *vsk,
732 struct sockaddr_vm *addr)
733 {
734 return vsk->transport->dgram_bind(vsk, addr);
735 }
736
__vsock_bind(struct sock * sk,struct sockaddr_vm * addr)737 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
738 {
739 struct vsock_sock *vsk = vsock_sk(sk);
740 int retval;
741
742 /* First ensure this socket isn't already bound. */
743 if (vsock_addr_bound(&vsk->local_addr))
744 return -EINVAL;
745
746 /* Now bind to the provided address or select appropriate values if
747 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
748 * like AF_INET prevents binding to a non-local IP address (in most
749 * cases), we only allow binding to a local CID.
750 */
751 if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
752 return -EADDRNOTAVAIL;
753
754 switch (sk->sk_socket->type) {
755 case SOCK_STREAM:
756 case SOCK_SEQPACKET:
757 spin_lock_bh(&vsock_table_lock);
758 retval = __vsock_bind_connectible(vsk, addr);
759 spin_unlock_bh(&vsock_table_lock);
760 break;
761
762 case SOCK_DGRAM:
763 retval = __vsock_bind_dgram(vsk, addr);
764 break;
765
766 default:
767 retval = -EINVAL;
768 break;
769 }
770
771 return retval;
772 }
773
774 static void vsock_connect_timeout(struct work_struct *work);
775
__vsock_create(struct net * net,struct socket * sock,struct sock * parent,gfp_t priority,unsigned short type,int kern)776 static struct sock *__vsock_create(struct net *net,
777 struct socket *sock,
778 struct sock *parent,
779 gfp_t priority,
780 unsigned short type,
781 int kern)
782 {
783 struct sock *sk;
784 struct vsock_sock *psk;
785 struct vsock_sock *vsk;
786
787 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
788 if (!sk)
789 return NULL;
790
791 sock_init_data(sock, sk);
792
793 /* sk->sk_type is normally set in sock_init_data, but only if sock is
794 * non-NULL. We make sure that our sockets always have a type by
795 * setting it here if needed.
796 */
797 if (!sock)
798 sk->sk_type = type;
799
800 vsk = vsock_sk(sk);
801 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
802 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
803
804 sk->sk_destruct = vsock_sk_destruct;
805 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
806 sock_reset_flag(sk, SOCK_DONE);
807
808 INIT_LIST_HEAD(&vsk->bound_table);
809 INIT_LIST_HEAD(&vsk->connected_table);
810 vsk->listener = NULL;
811 INIT_LIST_HEAD(&vsk->pending_links);
812 INIT_LIST_HEAD(&vsk->accept_queue);
813 vsk->rejected = false;
814 vsk->sent_request = false;
815 vsk->ignore_connecting_rst = false;
816 vsk->peer_shutdown = 0;
817 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
818 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
819
820 psk = parent ? vsock_sk(parent) : NULL;
821 if (parent) {
822 vsk->trusted = psk->trusted;
823 vsk->owner = get_cred(psk->owner);
824 vsk->connect_timeout = psk->connect_timeout;
825 vsk->buffer_size = psk->buffer_size;
826 vsk->buffer_min_size = psk->buffer_min_size;
827 vsk->buffer_max_size = psk->buffer_max_size;
828 security_sk_clone(parent, sk);
829 } else {
830 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
831 vsk->owner = get_current_cred();
832 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
833 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
834 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
835 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
836 }
837
838 return sk;
839 }
840
sock_type_connectible(u16 type)841 static bool sock_type_connectible(u16 type)
842 {
843 return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
844 }
845
__vsock_release(struct sock * sk,int level)846 static void __vsock_release(struct sock *sk, int level)
847 {
848 struct vsock_sock *vsk;
849 struct sock *pending;
850
851 vsk = vsock_sk(sk);
852 pending = NULL; /* Compiler warning. */
853
854 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
855 * version to avoid the warning "possible recursive locking
856 * detected". When "level" is 0, lock_sock_nested(sk, level)
857 * is the same as lock_sock(sk).
858 */
859 lock_sock_nested(sk, level);
860
861 /* Indicate to vsock_remove_sock() that the socket is being released and
862 * can be removed from the bound_table. Unlike transport reassignment
863 * case, where the socket must remain bound despite vsock_remove_sock()
864 * being called from the transport release() callback.
865 */
866 sock_set_flag(sk, SOCK_DEAD);
867
868 if (vsk->transport)
869 vsk->transport->release(vsk);
870 else if (sock_type_connectible(sk->sk_type))
871 vsock_remove_sock(vsk);
872
873 sock_orphan(sk);
874 sk->sk_shutdown = SHUTDOWN_MASK;
875
876 skb_queue_purge(&sk->sk_receive_queue);
877
878 /* Clean up any sockets that never were accepted. */
879 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
880 __vsock_release(pending, SINGLE_DEPTH_NESTING);
881 sock_put(pending);
882 }
883
884 release_sock(sk);
885 sock_put(sk);
886 }
887
vsock_sk_destruct(struct sock * sk)888 static void vsock_sk_destruct(struct sock *sk)
889 {
890 struct vsock_sock *vsk = vsock_sk(sk);
891
892 /* Flush MSG_ZEROCOPY leftovers. */
893 __skb_queue_purge(&sk->sk_error_queue);
894
895 vsock_deassign_transport(vsk);
896
897 /* When clearing these addresses, there's no need to set the family and
898 * possibly register the address family with the kernel.
899 */
900 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
901 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
902
903 put_cred(vsk->owner);
904 }
905
vsock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)906 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
907 {
908 int err;
909
910 err = sock_queue_rcv_skb(sk, skb);
911 if (err)
912 kfree_skb(skb);
913
914 return err;
915 }
916
vsock_create_connected(struct sock * parent)917 struct sock *vsock_create_connected(struct sock *parent)
918 {
919 return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
920 parent->sk_type, 0);
921 }
922 EXPORT_SYMBOL_GPL(vsock_create_connected);
923
vsock_stream_has_data(struct vsock_sock * vsk)924 s64 vsock_stream_has_data(struct vsock_sock *vsk)
925 {
926 if (WARN_ON(!vsk->transport))
927 return 0;
928
929 return vsk->transport->stream_has_data(vsk);
930 }
931 EXPORT_SYMBOL_GPL(vsock_stream_has_data);
932
vsock_connectible_has_data(struct vsock_sock * vsk)933 s64 vsock_connectible_has_data(struct vsock_sock *vsk)
934 {
935 struct sock *sk = sk_vsock(vsk);
936
937 if (WARN_ON(!vsk->transport))
938 return 0;
939
940 if (sk->sk_type == SOCK_SEQPACKET)
941 return vsk->transport->seqpacket_has_data(vsk);
942 else
943 return vsock_stream_has_data(vsk);
944 }
945 EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
946
vsock_stream_has_space(struct vsock_sock * vsk)947 s64 vsock_stream_has_space(struct vsock_sock *vsk)
948 {
949 if (WARN_ON(!vsk->transport))
950 return 0;
951
952 return vsk->transport->stream_has_space(vsk);
953 }
954 EXPORT_SYMBOL_GPL(vsock_stream_has_space);
955
vsock_data_ready(struct sock * sk)956 void vsock_data_ready(struct sock *sk)
957 {
958 struct vsock_sock *vsk = vsock_sk(sk);
959
960 if (vsock_stream_has_data(vsk) >= sk->sk_rcvlowat ||
961 sock_flag(sk, SOCK_DONE))
962 sk->sk_data_ready(sk);
963 }
964 EXPORT_SYMBOL_GPL(vsock_data_ready);
965
966 /* Dummy callback required by sockmap.
967 * See unconditional call of saved_close() in sock_map_close().
968 */
vsock_close(struct sock * sk,long timeout)969 static void vsock_close(struct sock *sk, long timeout)
970 {
971 }
972
vsock_release(struct socket * sock)973 static int vsock_release(struct socket *sock)
974 {
975 struct sock *sk = sock->sk;
976
977 if (!sk)
978 return 0;
979
980 sk->sk_prot->close(sk, 0);
981 __vsock_release(sk, 0);
982 sock->sk = NULL;
983 sock->state = SS_FREE;
984
985 return 0;
986 }
987
988 static int
vsock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)989 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
990 {
991 int err;
992 struct sock *sk;
993 struct sockaddr_vm *vm_addr;
994
995 sk = sock->sk;
996
997 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
998 return -EINVAL;
999
1000 lock_sock(sk);
1001 err = __vsock_bind(sk, vm_addr);
1002 release_sock(sk);
1003
1004 return err;
1005 }
1006
vsock_getname(struct socket * sock,struct sockaddr * addr,int peer)1007 static int vsock_getname(struct socket *sock,
1008 struct sockaddr *addr, int peer)
1009 {
1010 int err;
1011 struct sock *sk;
1012 struct vsock_sock *vsk;
1013 struct sockaddr_vm *vm_addr;
1014
1015 sk = sock->sk;
1016 vsk = vsock_sk(sk);
1017 err = 0;
1018
1019 lock_sock(sk);
1020
1021 if (peer) {
1022 if (sock->state != SS_CONNECTED) {
1023 err = -ENOTCONN;
1024 goto out;
1025 }
1026 vm_addr = &vsk->remote_addr;
1027 } else {
1028 vm_addr = &vsk->local_addr;
1029 }
1030
1031 if (!vm_addr) {
1032 err = -EINVAL;
1033 goto out;
1034 }
1035
1036 /* sys_getsockname() and sys_getpeername() pass us a
1037 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
1038 * that macro is defined in socket.c instead of .h, so we hardcode its
1039 * value here.
1040 */
1041 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
1042 memcpy(addr, vm_addr, sizeof(*vm_addr));
1043 err = sizeof(*vm_addr);
1044
1045 out:
1046 release_sock(sk);
1047 return err;
1048 }
1049
vsock_linger(struct sock * sk)1050 void vsock_linger(struct sock *sk)
1051 {
1052 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1053 ssize_t (*unsent)(struct vsock_sock *vsk);
1054 struct vsock_sock *vsk = vsock_sk(sk);
1055 long timeout;
1056
1057 if (!sock_flag(sk, SOCK_LINGER))
1058 return;
1059
1060 timeout = sk->sk_lingertime;
1061 if (!timeout)
1062 return;
1063
1064 /* Transports must implement `unsent_bytes` if they want to support
1065 * SOCK_LINGER through `vsock_linger()` since we use it to check when
1066 * the socket can be closed.
1067 */
1068 unsent = vsk->transport->unsent_bytes;
1069 if (!unsent)
1070 return;
1071
1072 add_wait_queue(sk_sleep(sk), &wait);
1073
1074 do {
1075 if (sk_wait_event(sk, &timeout, unsent(vsk) == 0, &wait))
1076 break;
1077 } while (!signal_pending(current) && timeout);
1078
1079 remove_wait_queue(sk_sleep(sk), &wait);
1080 }
1081 EXPORT_SYMBOL_GPL(vsock_linger);
1082
vsock_shutdown(struct socket * sock,int mode)1083 static int vsock_shutdown(struct socket *sock, int mode)
1084 {
1085 int err;
1086 struct sock *sk;
1087
1088 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
1089 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
1090 * here like the other address families do. Note also that the
1091 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
1092 * which is what we want.
1093 */
1094 mode++;
1095
1096 if ((mode & ~SHUTDOWN_MASK) || !mode)
1097 return -EINVAL;
1098
1099 /* If this is a connection oriented socket and it is not connected then
1100 * bail out immediately. If it is a DGRAM socket then we must first
1101 * kick the socket so that it wakes up from any sleeping calls, for
1102 * example recv(), and then afterwards return the error.
1103 */
1104
1105 sk = sock->sk;
1106
1107 lock_sock(sk);
1108 if (sock->state == SS_UNCONNECTED) {
1109 err = -ENOTCONN;
1110 if (sock_type_connectible(sk->sk_type))
1111 goto out;
1112 } else {
1113 sock->state = SS_DISCONNECTING;
1114 err = 0;
1115 }
1116
1117 /* Receive and send shutdowns are treated alike. */
1118 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
1119 if (mode) {
1120 sk->sk_shutdown |= mode;
1121 sk->sk_state_change(sk);
1122
1123 if (sock_type_connectible(sk->sk_type)) {
1124 sock_reset_flag(sk, SOCK_DONE);
1125 vsock_send_shutdown(sk, mode);
1126 }
1127 }
1128
1129 out:
1130 release_sock(sk);
1131 return err;
1132 }
1133
vsock_poll(struct file * file,struct socket * sock,poll_table * wait)1134 static __poll_t vsock_poll(struct file *file, struct socket *sock,
1135 poll_table *wait)
1136 {
1137 struct sock *sk;
1138 __poll_t mask;
1139 struct vsock_sock *vsk;
1140
1141 sk = sock->sk;
1142 vsk = vsock_sk(sk);
1143
1144 poll_wait(file, sk_sleep(sk), wait);
1145 mask = 0;
1146
1147 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
1148 /* Signify that there has been an error on this socket. */
1149 mask |= EPOLLERR;
1150
1151 /* INET sockets treat local write shutdown and peer write shutdown as a
1152 * case of EPOLLHUP set.
1153 */
1154 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1155 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
1156 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
1157 mask |= EPOLLHUP;
1158 }
1159
1160 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1161 vsk->peer_shutdown & SEND_SHUTDOWN) {
1162 mask |= EPOLLRDHUP;
1163 }
1164
1165 if (sk_is_readable(sk))
1166 mask |= EPOLLIN | EPOLLRDNORM;
1167
1168 if (sock->type == SOCK_DGRAM) {
1169 /* For datagram sockets we can read if there is something in
1170 * the queue and write as long as the socket isn't shutdown for
1171 * sending.
1172 */
1173 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
1174 (sk->sk_shutdown & RCV_SHUTDOWN)) {
1175 mask |= EPOLLIN | EPOLLRDNORM;
1176 }
1177
1178 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1179 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1180
1181 } else if (sock_type_connectible(sk->sk_type)) {
1182 const struct vsock_transport *transport;
1183
1184 lock_sock(sk);
1185
1186 transport = vsk->transport;
1187
1188 /* Listening sockets that have connections in their accept
1189 * queue can be read.
1190 */
1191 if (sk->sk_state == TCP_LISTEN
1192 && !vsock_is_accept_queue_empty(sk))
1193 mask |= EPOLLIN | EPOLLRDNORM;
1194
1195 /* If there is something in the queue then we can read. */
1196 if (transport && transport->stream_is_active(vsk) &&
1197 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1198 bool data_ready_now = false;
1199 int target = sock_rcvlowat(sk, 0, INT_MAX);
1200 int ret = transport->notify_poll_in(
1201 vsk, target, &data_ready_now);
1202 if (ret < 0) {
1203 mask |= EPOLLERR;
1204 } else {
1205 if (data_ready_now)
1206 mask |= EPOLLIN | EPOLLRDNORM;
1207
1208 }
1209 }
1210
1211 /* Sockets whose connections have been closed, reset, or
1212 * terminated should also be considered read, and we check the
1213 * shutdown flag for that.
1214 */
1215 if (sk->sk_shutdown & RCV_SHUTDOWN ||
1216 vsk->peer_shutdown & SEND_SHUTDOWN) {
1217 mask |= EPOLLIN | EPOLLRDNORM;
1218 }
1219
1220 /* Connected sockets that can produce data can be written. */
1221 if (transport && sk->sk_state == TCP_ESTABLISHED) {
1222 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1223 bool space_avail_now = false;
1224 int ret = transport->notify_poll_out(
1225 vsk, 1, &space_avail_now);
1226 if (ret < 0) {
1227 mask |= EPOLLERR;
1228 } else {
1229 if (space_avail_now)
1230 /* Remove EPOLLWRBAND since INET
1231 * sockets are not setting it.
1232 */
1233 mask |= EPOLLOUT | EPOLLWRNORM;
1234
1235 }
1236 }
1237 }
1238
1239 /* Simulate INET socket poll behaviors, which sets
1240 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
1241 * but local send is not shutdown.
1242 */
1243 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
1244 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1245 mask |= EPOLLOUT | EPOLLWRNORM;
1246
1247 }
1248
1249 release_sock(sk);
1250 }
1251
1252 return mask;
1253 }
1254
vsock_read_skb(struct sock * sk,skb_read_actor_t read_actor)1255 static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor)
1256 {
1257 struct vsock_sock *vsk = vsock_sk(sk);
1258
1259 if (WARN_ON_ONCE(!vsk->transport))
1260 return -ENODEV;
1261
1262 return vsk->transport->read_skb(vsk, read_actor);
1263 }
1264
vsock_dgram_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1265 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1266 size_t len)
1267 {
1268 int err;
1269 struct sock *sk;
1270 struct vsock_sock *vsk;
1271 struct sockaddr_vm *remote_addr;
1272 const struct vsock_transport *transport;
1273
1274 if (msg->msg_flags & MSG_OOB)
1275 return -EOPNOTSUPP;
1276
1277 /* For now, MSG_DONTWAIT is always assumed... */
1278 err = 0;
1279 sk = sock->sk;
1280 vsk = vsock_sk(sk);
1281
1282 lock_sock(sk);
1283
1284 transport = vsk->transport;
1285
1286 err = vsock_auto_bind(vsk);
1287 if (err)
1288 goto out;
1289
1290
1291 /* If the provided message contains an address, use that. Otherwise
1292 * fall back on the socket's remote handle (if it has been connected).
1293 */
1294 if (msg->msg_name &&
1295 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1296 &remote_addr) == 0) {
1297 /* Ensure this address is of the right type and is a valid
1298 * destination.
1299 */
1300
1301 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1302 remote_addr->svm_cid = transport->get_local_cid();
1303
1304 if (!vsock_addr_bound(remote_addr)) {
1305 err = -EINVAL;
1306 goto out;
1307 }
1308 } else if (sock->state == SS_CONNECTED) {
1309 remote_addr = &vsk->remote_addr;
1310
1311 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1312 remote_addr->svm_cid = transport->get_local_cid();
1313
1314 /* XXX Should connect() or this function ensure remote_addr is
1315 * bound?
1316 */
1317 if (!vsock_addr_bound(&vsk->remote_addr)) {
1318 err = -EINVAL;
1319 goto out;
1320 }
1321 } else {
1322 err = -EINVAL;
1323 goto out;
1324 }
1325
1326 if (!transport->dgram_allow(remote_addr->svm_cid,
1327 remote_addr->svm_port)) {
1328 err = -EINVAL;
1329 goto out;
1330 }
1331
1332 err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
1333
1334 out:
1335 release_sock(sk);
1336 return err;
1337 }
1338
vsock_dgram_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1339 static int vsock_dgram_connect(struct socket *sock,
1340 struct sockaddr *addr, int addr_len, int flags)
1341 {
1342 int err;
1343 struct sock *sk;
1344 struct vsock_sock *vsk;
1345 struct sockaddr_vm *remote_addr;
1346
1347 sk = sock->sk;
1348 vsk = vsock_sk(sk);
1349
1350 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1351 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1352 lock_sock(sk);
1353 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1354 VMADDR_PORT_ANY);
1355 sock->state = SS_UNCONNECTED;
1356 release_sock(sk);
1357 return 0;
1358 } else if (err != 0)
1359 return -EINVAL;
1360
1361 lock_sock(sk);
1362
1363 err = vsock_auto_bind(vsk);
1364 if (err)
1365 goto out;
1366
1367 if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1368 remote_addr->svm_port)) {
1369 err = -EINVAL;
1370 goto out;
1371 }
1372
1373 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1374 sock->state = SS_CONNECTED;
1375
1376 /* sock map disallows redirection of non-TCP sockets with sk_state !=
1377 * TCP_ESTABLISHED (see sock_map_redirect_allowed()), so we set
1378 * TCP_ESTABLISHED here to allow redirection of connected vsock dgrams.
1379 *
1380 * This doesn't seem to be abnormal state for datagram sockets, as the
1381 * same approach can be see in other datagram socket types as well
1382 * (such as unix sockets).
1383 */
1384 sk->sk_state = TCP_ESTABLISHED;
1385
1386 out:
1387 release_sock(sk);
1388 return err;
1389 }
1390
__vsock_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1391 int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1392 size_t len, int flags)
1393 {
1394 struct sock *sk = sock->sk;
1395 struct vsock_sock *vsk = vsock_sk(sk);
1396
1397 return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
1398 }
1399
vsock_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1400 int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1401 size_t len, int flags)
1402 {
1403 #ifdef CONFIG_BPF_SYSCALL
1404 struct sock *sk = sock->sk;
1405 const struct proto *prot;
1406
1407 prot = READ_ONCE(sk->sk_prot);
1408 if (prot != &vsock_proto)
1409 return prot->recvmsg(sk, msg, len, flags, NULL);
1410 #endif
1411
1412 return __vsock_dgram_recvmsg(sock, msg, len, flags);
1413 }
1414 EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
1415
vsock_do_ioctl(struct socket * sock,unsigned int cmd,int __user * arg)1416 static int vsock_do_ioctl(struct socket *sock, unsigned int cmd,
1417 int __user *arg)
1418 {
1419 struct sock *sk = sock->sk;
1420 struct vsock_sock *vsk;
1421 int ret;
1422
1423 vsk = vsock_sk(sk);
1424
1425 switch (cmd) {
1426 case SIOCOUTQ: {
1427 ssize_t n_bytes;
1428
1429 if (!vsk->transport || !vsk->transport->unsent_bytes) {
1430 ret = -EOPNOTSUPP;
1431 break;
1432 }
1433
1434 if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) {
1435 ret = -EINVAL;
1436 break;
1437 }
1438
1439 n_bytes = vsk->transport->unsent_bytes(vsk);
1440 if (n_bytes < 0) {
1441 ret = n_bytes;
1442 break;
1443 }
1444
1445 ret = put_user(n_bytes, arg);
1446 break;
1447 }
1448 default:
1449 ret = -ENOIOCTLCMD;
1450 }
1451
1452 return ret;
1453 }
1454
vsock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1455 static int vsock_ioctl(struct socket *sock, unsigned int cmd,
1456 unsigned long arg)
1457 {
1458 int ret;
1459
1460 lock_sock(sock->sk);
1461 ret = vsock_do_ioctl(sock, cmd, (int __user *)arg);
1462 release_sock(sock->sk);
1463
1464 return ret;
1465 }
1466
1467 static const struct proto_ops vsock_dgram_ops = {
1468 .family = PF_VSOCK,
1469 .owner = THIS_MODULE,
1470 .release = vsock_release,
1471 .bind = vsock_bind,
1472 .connect = vsock_dgram_connect,
1473 .socketpair = sock_no_socketpair,
1474 .accept = sock_no_accept,
1475 .getname = vsock_getname,
1476 .poll = vsock_poll,
1477 .ioctl = vsock_ioctl,
1478 .listen = sock_no_listen,
1479 .shutdown = vsock_shutdown,
1480 .sendmsg = vsock_dgram_sendmsg,
1481 .recvmsg = vsock_dgram_recvmsg,
1482 .mmap = sock_no_mmap,
1483 .read_skb = vsock_read_skb,
1484 };
1485
vsock_transport_cancel_pkt(struct vsock_sock * vsk)1486 static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1487 {
1488 const struct vsock_transport *transport = vsk->transport;
1489
1490 if (!transport || !transport->cancel_pkt)
1491 return -EOPNOTSUPP;
1492
1493 return transport->cancel_pkt(vsk);
1494 }
1495
vsock_connect_timeout(struct work_struct * work)1496 static void vsock_connect_timeout(struct work_struct *work)
1497 {
1498 struct sock *sk;
1499 struct vsock_sock *vsk;
1500
1501 vsk = container_of(work, struct vsock_sock, connect_work.work);
1502 sk = sk_vsock(vsk);
1503
1504 lock_sock(sk);
1505 if (sk->sk_state == TCP_SYN_SENT &&
1506 (sk->sk_shutdown != SHUTDOWN_MASK)) {
1507 sk->sk_state = TCP_CLOSE;
1508 sk->sk_socket->state = SS_UNCONNECTED;
1509 sk->sk_err = ETIMEDOUT;
1510 sk_error_report(sk);
1511 vsock_transport_cancel_pkt(vsk);
1512 }
1513 release_sock(sk);
1514
1515 sock_put(sk);
1516 }
1517
vsock_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1518 static int vsock_connect(struct socket *sock, struct sockaddr *addr,
1519 int addr_len, int flags)
1520 {
1521 int err;
1522 struct sock *sk;
1523 struct vsock_sock *vsk;
1524 const struct vsock_transport *transport;
1525 struct sockaddr_vm *remote_addr;
1526 long timeout;
1527 DEFINE_WAIT(wait);
1528
1529 err = 0;
1530 sk = sock->sk;
1531 vsk = vsock_sk(sk);
1532
1533 lock_sock(sk);
1534
1535 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1536 switch (sock->state) {
1537 case SS_CONNECTED:
1538 err = -EISCONN;
1539 goto out;
1540 case SS_DISCONNECTING:
1541 err = -EINVAL;
1542 goto out;
1543 case SS_CONNECTING:
1544 /* This continues on so we can move sock into the SS_CONNECTED
1545 * state once the connection has completed (at which point err
1546 * will be set to zero also). Otherwise, we will either wait
1547 * for the connection or return -EALREADY should this be a
1548 * non-blocking call.
1549 */
1550 err = -EALREADY;
1551 if (flags & O_NONBLOCK)
1552 goto out;
1553 break;
1554 default:
1555 if ((sk->sk_state == TCP_LISTEN) ||
1556 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1557 err = -EINVAL;
1558 goto out;
1559 }
1560
1561 /* Set the remote address that we are connecting to. */
1562 memcpy(&vsk->remote_addr, remote_addr,
1563 sizeof(vsk->remote_addr));
1564
1565 err = vsock_assign_transport(vsk, NULL);
1566 if (err)
1567 goto out;
1568
1569 transport = vsk->transport;
1570
1571 /* The hypervisor and well-known contexts do not have socket
1572 * endpoints.
1573 */
1574 if (!transport ||
1575 !transport->stream_allow(remote_addr->svm_cid,
1576 remote_addr->svm_port)) {
1577 err = -ENETUNREACH;
1578 goto out;
1579 }
1580
1581 if (vsock_msgzerocopy_allow(transport)) {
1582 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
1583 } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1584 /* If this option was set before 'connect()',
1585 * when transport was unknown, check that this
1586 * feature is supported here.
1587 */
1588 err = -EOPNOTSUPP;
1589 goto out;
1590 }
1591
1592 err = vsock_auto_bind(vsk);
1593 if (err)
1594 goto out;
1595
1596 sk->sk_state = TCP_SYN_SENT;
1597
1598 err = transport->connect(vsk);
1599 if (err < 0)
1600 goto out;
1601
1602 /* sk_err might have been set as a result of an earlier
1603 * (failed) connect attempt.
1604 */
1605 sk->sk_err = 0;
1606
1607 /* Mark sock as connecting and set the error code to in
1608 * progress in case this is a non-blocking connect.
1609 */
1610 sock->state = SS_CONNECTING;
1611 err = -EINPROGRESS;
1612 }
1613
1614 /* The receive path will handle all communication until we are able to
1615 * enter the connected state. Here we wait for the connection to be
1616 * completed or a notification of an error.
1617 */
1618 timeout = vsk->connect_timeout;
1619 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1620
1621 /* If the socket is already closing or it is in an error state, there
1622 * is no point in waiting.
1623 */
1624 while (sk->sk_state != TCP_ESTABLISHED &&
1625 sk->sk_state != TCP_CLOSING && sk->sk_err == 0) {
1626 if (flags & O_NONBLOCK) {
1627 /* If we're not going to block, we schedule a timeout
1628 * function to generate a timeout on the connection
1629 * attempt, in case the peer doesn't respond in a
1630 * timely manner. We hold on to the socket until the
1631 * timeout fires.
1632 */
1633 sock_hold(sk);
1634
1635 /* If the timeout function is already scheduled,
1636 * reschedule it, then ungrab the socket refcount to
1637 * keep it balanced.
1638 */
1639 if (mod_delayed_work(system_wq, &vsk->connect_work,
1640 timeout))
1641 sock_put(sk);
1642
1643 /* Skip ahead to preserve error code set above. */
1644 goto out_wait;
1645 }
1646
1647 release_sock(sk);
1648 timeout = schedule_timeout(timeout);
1649 lock_sock(sk);
1650
1651 if (signal_pending(current)) {
1652 err = sock_intr_errno(timeout);
1653 sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
1654 sock->state = SS_UNCONNECTED;
1655 vsock_transport_cancel_pkt(vsk);
1656 vsock_remove_connected(vsk);
1657 goto out_wait;
1658 } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
1659 err = -ETIMEDOUT;
1660 sk->sk_state = TCP_CLOSE;
1661 sock->state = SS_UNCONNECTED;
1662 vsock_transport_cancel_pkt(vsk);
1663 goto out_wait;
1664 }
1665
1666 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1667 }
1668
1669 if (sk->sk_err) {
1670 err = -sk->sk_err;
1671 sk->sk_state = TCP_CLOSE;
1672 sock->state = SS_UNCONNECTED;
1673 } else {
1674 err = 0;
1675 }
1676
1677 out_wait:
1678 finish_wait(sk_sleep(sk), &wait);
1679 out:
1680 release_sock(sk);
1681 return err;
1682 }
1683
vsock_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)1684 static int vsock_accept(struct socket *sock, struct socket *newsock,
1685 struct proto_accept_arg *arg)
1686 {
1687 struct sock *listener;
1688 int err;
1689 struct sock *connected;
1690 struct vsock_sock *vconnected;
1691 long timeout;
1692 DEFINE_WAIT(wait);
1693
1694 err = 0;
1695 listener = sock->sk;
1696
1697 lock_sock(listener);
1698
1699 if (!sock_type_connectible(sock->type)) {
1700 err = -EOPNOTSUPP;
1701 goto out;
1702 }
1703
1704 if (listener->sk_state != TCP_LISTEN) {
1705 err = -EINVAL;
1706 goto out;
1707 }
1708
1709 /* Wait for children sockets to appear; these are the new sockets
1710 * created upon connection establishment.
1711 */
1712 timeout = sock_rcvtimeo(listener, arg->flags & O_NONBLOCK);
1713 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1714
1715 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1716 listener->sk_err == 0) {
1717 release_sock(listener);
1718 timeout = schedule_timeout(timeout);
1719 finish_wait(sk_sleep(listener), &wait);
1720 lock_sock(listener);
1721
1722 if (signal_pending(current)) {
1723 err = sock_intr_errno(timeout);
1724 goto out;
1725 } else if (timeout == 0) {
1726 err = -EAGAIN;
1727 goto out;
1728 }
1729
1730 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1731 }
1732 finish_wait(sk_sleep(listener), &wait);
1733
1734 if (listener->sk_err)
1735 err = -listener->sk_err;
1736
1737 if (connected) {
1738 sk_acceptq_removed(listener);
1739
1740 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
1741 vconnected = vsock_sk(connected);
1742
1743 /* If the listener socket has received an error, then we should
1744 * reject this socket and return. Note that we simply mark the
1745 * socket rejected, drop our reference, and let the cleanup
1746 * function handle the cleanup; the fact that we found it in
1747 * the listener's accept queue guarantees that the cleanup
1748 * function hasn't run yet.
1749 */
1750 if (err) {
1751 vconnected->rejected = true;
1752 } else {
1753 newsock->state = SS_CONNECTED;
1754 sock_graft(connected, newsock);
1755 if (vsock_msgzerocopy_allow(vconnected->transport))
1756 set_bit(SOCK_SUPPORT_ZC,
1757 &connected->sk_socket->flags);
1758 }
1759
1760 release_sock(connected);
1761 sock_put(connected);
1762 }
1763
1764 out:
1765 release_sock(listener);
1766 return err;
1767 }
1768
vsock_listen(struct socket * sock,int backlog)1769 static int vsock_listen(struct socket *sock, int backlog)
1770 {
1771 int err;
1772 struct sock *sk;
1773 struct vsock_sock *vsk;
1774
1775 sk = sock->sk;
1776
1777 lock_sock(sk);
1778
1779 if (!sock_type_connectible(sk->sk_type)) {
1780 err = -EOPNOTSUPP;
1781 goto out;
1782 }
1783
1784 if (sock->state != SS_UNCONNECTED) {
1785 err = -EINVAL;
1786 goto out;
1787 }
1788
1789 vsk = vsock_sk(sk);
1790
1791 if (!vsock_addr_bound(&vsk->local_addr)) {
1792 err = -EINVAL;
1793 goto out;
1794 }
1795
1796 sk->sk_max_ack_backlog = backlog;
1797 sk->sk_state = TCP_LISTEN;
1798
1799 err = 0;
1800
1801 out:
1802 release_sock(sk);
1803 return err;
1804 }
1805
vsock_update_buffer_size(struct vsock_sock * vsk,const struct vsock_transport * transport,u64 val)1806 static void vsock_update_buffer_size(struct vsock_sock *vsk,
1807 const struct vsock_transport *transport,
1808 u64 val)
1809 {
1810 if (val > vsk->buffer_max_size)
1811 val = vsk->buffer_max_size;
1812
1813 if (val < vsk->buffer_min_size)
1814 val = vsk->buffer_min_size;
1815
1816 if (val != vsk->buffer_size &&
1817 transport && transport->notify_buffer_size)
1818 transport->notify_buffer_size(vsk, &val);
1819
1820 vsk->buffer_size = val;
1821 }
1822
vsock_connectible_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1823 static int vsock_connectible_setsockopt(struct socket *sock,
1824 int level,
1825 int optname,
1826 sockptr_t optval,
1827 unsigned int optlen)
1828 {
1829 int err;
1830 struct sock *sk;
1831 struct vsock_sock *vsk;
1832 const struct vsock_transport *transport;
1833 u64 val;
1834
1835 if (level != AF_VSOCK && level != SOL_SOCKET)
1836 return -ENOPROTOOPT;
1837
1838 #define COPY_IN(_v) \
1839 do { \
1840 if (optlen < sizeof(_v)) { \
1841 err = -EINVAL; \
1842 goto exit; \
1843 } \
1844 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \
1845 err = -EFAULT; \
1846 goto exit; \
1847 } \
1848 } while (0)
1849
1850 err = 0;
1851 sk = sock->sk;
1852 vsk = vsock_sk(sk);
1853
1854 lock_sock(sk);
1855
1856 transport = vsk->transport;
1857
1858 if (level == SOL_SOCKET) {
1859 int zerocopy;
1860
1861 if (optname != SO_ZEROCOPY) {
1862 release_sock(sk);
1863 return sock_setsockopt(sock, level, optname, optval, optlen);
1864 }
1865
1866 /* Use 'int' type here, because variable to
1867 * set this option usually has this type.
1868 */
1869 COPY_IN(zerocopy);
1870
1871 if (zerocopy < 0 || zerocopy > 1) {
1872 err = -EINVAL;
1873 goto exit;
1874 }
1875
1876 if (transport && !vsock_msgzerocopy_allow(transport)) {
1877 err = -EOPNOTSUPP;
1878 goto exit;
1879 }
1880
1881 sock_valbool_flag(sk, SOCK_ZEROCOPY, zerocopy);
1882 goto exit;
1883 }
1884
1885 switch (optname) {
1886 case SO_VM_SOCKETS_BUFFER_SIZE:
1887 COPY_IN(val);
1888 vsock_update_buffer_size(vsk, transport, val);
1889 break;
1890
1891 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1892 COPY_IN(val);
1893 vsk->buffer_max_size = val;
1894 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1895 break;
1896
1897 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1898 COPY_IN(val);
1899 vsk->buffer_min_size = val;
1900 vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1901 break;
1902
1903 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1904 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: {
1905 struct __kernel_sock_timeval tv;
1906
1907 err = sock_copy_user_timeval(&tv, optval, optlen,
1908 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1909 if (err)
1910 break;
1911 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1912 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1913 vsk->connect_timeout = tv.tv_sec * HZ +
1914 DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ));
1915 if (vsk->connect_timeout == 0)
1916 vsk->connect_timeout =
1917 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1918
1919 } else {
1920 err = -ERANGE;
1921 }
1922 break;
1923 }
1924
1925 default:
1926 err = -ENOPROTOOPT;
1927 break;
1928 }
1929
1930 #undef COPY_IN
1931
1932 exit:
1933 release_sock(sk);
1934 return err;
1935 }
1936
vsock_connectible_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1937 static int vsock_connectible_getsockopt(struct socket *sock,
1938 int level, int optname,
1939 char __user *optval,
1940 int __user *optlen)
1941 {
1942 struct sock *sk = sock->sk;
1943 struct vsock_sock *vsk = vsock_sk(sk);
1944
1945 union {
1946 u64 val64;
1947 struct old_timeval32 tm32;
1948 struct __kernel_old_timeval tm;
1949 struct __kernel_sock_timeval stm;
1950 } v;
1951
1952 int lv = sizeof(v.val64);
1953 int len;
1954
1955 if (level != AF_VSOCK)
1956 return -ENOPROTOOPT;
1957
1958 if (get_user(len, optlen))
1959 return -EFAULT;
1960
1961 memset(&v, 0, sizeof(v));
1962
1963 switch (optname) {
1964 case SO_VM_SOCKETS_BUFFER_SIZE:
1965 v.val64 = vsk->buffer_size;
1966 break;
1967
1968 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1969 v.val64 = vsk->buffer_max_size;
1970 break;
1971
1972 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1973 v.val64 = vsk->buffer_min_size;
1974 break;
1975
1976 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1977 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD:
1978 lv = sock_get_timeout(vsk->connect_timeout, &v,
1979 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1980 break;
1981
1982 default:
1983 return -ENOPROTOOPT;
1984 }
1985
1986 if (len < lv)
1987 return -EINVAL;
1988 if (len > lv)
1989 len = lv;
1990 if (copy_to_user(optval, &v, len))
1991 return -EFAULT;
1992
1993 if (put_user(len, optlen))
1994 return -EFAULT;
1995
1996 return 0;
1997 }
1998
vsock_connectible_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1999 static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
2000 size_t len)
2001 {
2002 struct sock *sk;
2003 struct vsock_sock *vsk;
2004 const struct vsock_transport *transport;
2005 ssize_t total_written;
2006 long timeout;
2007 int err;
2008 struct vsock_transport_send_notify_data send_data;
2009 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2010
2011 sk = sock->sk;
2012 vsk = vsock_sk(sk);
2013 total_written = 0;
2014 err = 0;
2015
2016 if (msg->msg_flags & MSG_OOB)
2017 return -EOPNOTSUPP;
2018
2019 lock_sock(sk);
2020
2021 transport = vsk->transport;
2022
2023 /* Callers should not provide a destination with connection oriented
2024 * sockets.
2025 */
2026 if (msg->msg_namelen) {
2027 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2028 goto out;
2029 }
2030
2031 /* Send data only if both sides are not shutdown in the direction. */
2032 if (sk->sk_shutdown & SEND_SHUTDOWN ||
2033 vsk->peer_shutdown & RCV_SHUTDOWN) {
2034 err = -EPIPE;
2035 goto out;
2036 }
2037
2038 if (!transport || sk->sk_state != TCP_ESTABLISHED ||
2039 !vsock_addr_bound(&vsk->local_addr)) {
2040 err = -ENOTCONN;
2041 goto out;
2042 }
2043
2044 if (!vsock_addr_bound(&vsk->remote_addr)) {
2045 err = -EDESTADDRREQ;
2046 goto out;
2047 }
2048
2049 if (msg->msg_flags & MSG_ZEROCOPY &&
2050 !vsock_msgzerocopy_allow(transport)) {
2051 err = -EOPNOTSUPP;
2052 goto out;
2053 }
2054
2055 /* Wait for room in the produce queue to enqueue our user's data. */
2056 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2057
2058 err = transport->notify_send_init(vsk, &send_data);
2059 if (err < 0)
2060 goto out;
2061
2062 while (total_written < len) {
2063 ssize_t written;
2064
2065 add_wait_queue(sk_sleep(sk), &wait);
2066 while (vsock_stream_has_space(vsk) == 0 &&
2067 sk->sk_err == 0 &&
2068 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
2069 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
2070
2071 /* Don't wait for non-blocking sockets. */
2072 if (timeout == 0) {
2073 err = -EAGAIN;
2074 remove_wait_queue(sk_sleep(sk), &wait);
2075 goto out_err;
2076 }
2077
2078 err = transport->notify_send_pre_block(vsk, &send_data);
2079 if (err < 0) {
2080 remove_wait_queue(sk_sleep(sk), &wait);
2081 goto out_err;
2082 }
2083
2084 release_sock(sk);
2085 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
2086 lock_sock(sk);
2087 if (signal_pending(current)) {
2088 err = sock_intr_errno(timeout);
2089 remove_wait_queue(sk_sleep(sk), &wait);
2090 goto out_err;
2091 } else if (timeout == 0) {
2092 err = -EAGAIN;
2093 remove_wait_queue(sk_sleep(sk), &wait);
2094 goto out_err;
2095 }
2096 }
2097 remove_wait_queue(sk_sleep(sk), &wait);
2098
2099 /* These checks occur both as part of and after the loop
2100 * conditional since we need to check before and after
2101 * sleeping.
2102 */
2103 if (sk->sk_err) {
2104 err = -sk->sk_err;
2105 goto out_err;
2106 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
2107 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
2108 err = -EPIPE;
2109 goto out_err;
2110 }
2111
2112 err = transport->notify_send_pre_enqueue(vsk, &send_data);
2113 if (err < 0)
2114 goto out_err;
2115
2116 /* Note that enqueue will only write as many bytes as are free
2117 * in the produce queue, so we don't need to ensure len is
2118 * smaller than the queue size. It is the caller's
2119 * responsibility to check how many bytes we were able to send.
2120 */
2121
2122 if (sk->sk_type == SOCK_SEQPACKET) {
2123 written = transport->seqpacket_enqueue(vsk,
2124 msg, len - total_written);
2125 } else {
2126 written = transport->stream_enqueue(vsk,
2127 msg, len - total_written);
2128 }
2129
2130 if (written < 0) {
2131 err = written;
2132 goto out_err;
2133 }
2134
2135 total_written += written;
2136
2137 err = transport->notify_send_post_enqueue(
2138 vsk, written, &send_data);
2139 if (err < 0)
2140 goto out_err;
2141
2142 }
2143
2144 out_err:
2145 if (total_written > 0) {
2146 /* Return number of written bytes only if:
2147 * 1) SOCK_STREAM socket.
2148 * 2) SOCK_SEQPACKET socket when whole buffer is sent.
2149 */
2150 if (sk->sk_type == SOCK_STREAM || total_written == len)
2151 err = total_written;
2152 }
2153 out:
2154 if (sk->sk_type == SOCK_STREAM)
2155 err = sk_stream_error(sk, msg->msg_flags, err);
2156
2157 release_sock(sk);
2158 return err;
2159 }
2160
vsock_connectible_wait_data(struct sock * sk,struct wait_queue_entry * wait,long timeout,struct vsock_transport_recv_notify_data * recv_data,size_t target)2161 static int vsock_connectible_wait_data(struct sock *sk,
2162 struct wait_queue_entry *wait,
2163 long timeout,
2164 struct vsock_transport_recv_notify_data *recv_data,
2165 size_t target)
2166 {
2167 const struct vsock_transport *transport;
2168 struct vsock_sock *vsk;
2169 s64 data;
2170 int err;
2171
2172 vsk = vsock_sk(sk);
2173 err = 0;
2174 transport = vsk->transport;
2175
2176 while (1) {
2177 prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
2178 data = vsock_connectible_has_data(vsk);
2179 if (data != 0)
2180 break;
2181
2182 if (sk->sk_err != 0 ||
2183 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2184 (vsk->peer_shutdown & SEND_SHUTDOWN)) {
2185 break;
2186 }
2187
2188 /* Don't wait for non-blocking sockets. */
2189 if (timeout == 0) {
2190 err = -EAGAIN;
2191 break;
2192 }
2193
2194 if (recv_data) {
2195 err = transport->notify_recv_pre_block(vsk, target, recv_data);
2196 if (err < 0)
2197 break;
2198 }
2199
2200 release_sock(sk);
2201 timeout = schedule_timeout(timeout);
2202 lock_sock(sk);
2203
2204 if (signal_pending(current)) {
2205 err = sock_intr_errno(timeout);
2206 break;
2207 } else if (timeout == 0) {
2208 err = -EAGAIN;
2209 break;
2210 }
2211 }
2212
2213 finish_wait(sk_sleep(sk), wait);
2214
2215 if (err)
2216 return err;
2217
2218 /* Internal transport error when checking for available
2219 * data. XXX This should be changed to a connection
2220 * reset in a later change.
2221 */
2222 if (data < 0)
2223 return -ENOMEM;
2224
2225 return data;
2226 }
2227
__vsock_stream_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2228 static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2229 size_t len, int flags)
2230 {
2231 struct vsock_transport_recv_notify_data recv_data;
2232 const struct vsock_transport *transport;
2233 struct vsock_sock *vsk;
2234 ssize_t copied;
2235 size_t target;
2236 long timeout;
2237 int err;
2238
2239 DEFINE_WAIT(wait);
2240
2241 vsk = vsock_sk(sk);
2242 transport = vsk->transport;
2243
2244 /* We must not copy less than target bytes into the user's buffer
2245 * before returning successfully, so we wait for the consume queue to
2246 * have that much data to consume before dequeueing. Note that this
2247 * makes it impossible to handle cases where target is greater than the
2248 * queue size.
2249 */
2250 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2251 if (target >= transport->stream_rcvhiwat(vsk)) {
2252 err = -ENOMEM;
2253 goto out;
2254 }
2255 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2256 copied = 0;
2257
2258 err = transport->notify_recv_init(vsk, target, &recv_data);
2259 if (err < 0)
2260 goto out;
2261
2262
2263 while (1) {
2264 ssize_t read;
2265
2266 err = vsock_connectible_wait_data(sk, &wait, timeout,
2267 &recv_data, target);
2268 if (err <= 0)
2269 break;
2270
2271 err = transport->notify_recv_pre_dequeue(vsk, target,
2272 &recv_data);
2273 if (err < 0)
2274 break;
2275
2276 read = transport->stream_dequeue(vsk, msg, len - copied, flags);
2277 if (read < 0) {
2278 err = read;
2279 break;
2280 }
2281
2282 copied += read;
2283
2284 err = transport->notify_recv_post_dequeue(vsk, target, read,
2285 !(flags & MSG_PEEK), &recv_data);
2286 if (err < 0)
2287 goto out;
2288
2289 if (read >= target || flags & MSG_PEEK)
2290 break;
2291
2292 target -= read;
2293 }
2294
2295 if (sk->sk_err)
2296 err = -sk->sk_err;
2297 else if (sk->sk_shutdown & RCV_SHUTDOWN)
2298 err = 0;
2299
2300 if (copied > 0)
2301 err = copied;
2302
2303 out:
2304 return err;
2305 }
2306
__vsock_seqpacket_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2307 static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
2308 size_t len, int flags)
2309 {
2310 const struct vsock_transport *transport;
2311 struct vsock_sock *vsk;
2312 ssize_t msg_len;
2313 long timeout;
2314 int err = 0;
2315 DEFINE_WAIT(wait);
2316
2317 vsk = vsock_sk(sk);
2318 transport = vsk->transport;
2319
2320 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2321
2322 err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0);
2323 if (err <= 0)
2324 goto out;
2325
2326 msg_len = transport->seqpacket_dequeue(vsk, msg, flags);
2327
2328 if (msg_len < 0) {
2329 err = msg_len;
2330 goto out;
2331 }
2332
2333 if (sk->sk_err) {
2334 err = -sk->sk_err;
2335 } else if (sk->sk_shutdown & RCV_SHUTDOWN) {
2336 err = 0;
2337 } else {
2338 /* User sets MSG_TRUNC, so return real length of
2339 * packet.
2340 */
2341 if (flags & MSG_TRUNC)
2342 err = msg_len;
2343 else
2344 err = len - msg_data_left(msg);
2345
2346 /* Always set MSG_TRUNC if real length of packet is
2347 * bigger than user's buffer.
2348 */
2349 if (msg_len > len)
2350 msg->msg_flags |= MSG_TRUNC;
2351 }
2352
2353 out:
2354 return err;
2355 }
2356
2357 int
__vsock_connectible_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)2358 __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2359 int flags)
2360 {
2361 struct sock *sk;
2362 struct vsock_sock *vsk;
2363 const struct vsock_transport *transport;
2364 int err;
2365
2366 sk = sock->sk;
2367
2368 if (unlikely(flags & MSG_ERRQUEUE))
2369 return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
2370
2371 vsk = vsock_sk(sk);
2372 err = 0;
2373
2374 lock_sock(sk);
2375
2376 transport = vsk->transport;
2377
2378 if (!transport || sk->sk_state != TCP_ESTABLISHED) {
2379 /* Recvmsg is supposed to return 0 if a peer performs an
2380 * orderly shutdown. Differentiate between that case and when a
2381 * peer has not connected or a local shutdown occurred with the
2382 * SOCK_DONE flag.
2383 */
2384 if (sock_flag(sk, SOCK_DONE))
2385 err = 0;
2386 else
2387 err = -ENOTCONN;
2388
2389 goto out;
2390 }
2391
2392 if (flags & MSG_OOB) {
2393 err = -EOPNOTSUPP;
2394 goto out;
2395 }
2396
2397 /* We don't check peer_shutdown flag here since peer may actually shut
2398 * down, but there can be data in the queue that a local socket can
2399 * receive.
2400 */
2401 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2402 err = 0;
2403 goto out;
2404 }
2405
2406 /* It is valid on Linux to pass in a zero-length receive buffer. This
2407 * is not an error. We may as well bail out now.
2408 */
2409 if (!len) {
2410 err = 0;
2411 goto out;
2412 }
2413
2414 if (sk->sk_type == SOCK_STREAM)
2415 err = __vsock_stream_recvmsg(sk, msg, len, flags);
2416 else
2417 err = __vsock_seqpacket_recvmsg(sk, msg, len, flags);
2418
2419 out:
2420 release_sock(sk);
2421 return err;
2422 }
2423
2424 int
vsock_connectible_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)2425 vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2426 int flags)
2427 {
2428 #ifdef CONFIG_BPF_SYSCALL
2429 struct sock *sk = sock->sk;
2430 const struct proto *prot;
2431
2432 prot = READ_ONCE(sk->sk_prot);
2433 if (prot != &vsock_proto)
2434 return prot->recvmsg(sk, msg, len, flags, NULL);
2435 #endif
2436
2437 return __vsock_connectible_recvmsg(sock, msg, len, flags);
2438 }
2439 EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
2440
vsock_set_rcvlowat(struct sock * sk,int val)2441 static int vsock_set_rcvlowat(struct sock *sk, int val)
2442 {
2443 const struct vsock_transport *transport;
2444 struct vsock_sock *vsk;
2445
2446 vsk = vsock_sk(sk);
2447
2448 if (val > vsk->buffer_size)
2449 return -EINVAL;
2450
2451 transport = vsk->transport;
2452
2453 if (transport && transport->notify_set_rcvlowat) {
2454 int err;
2455
2456 err = transport->notify_set_rcvlowat(vsk, val);
2457 if (err)
2458 return err;
2459 }
2460
2461 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
2462 return 0;
2463 }
2464
2465 static const struct proto_ops vsock_stream_ops = {
2466 .family = PF_VSOCK,
2467 .owner = THIS_MODULE,
2468 .release = vsock_release,
2469 .bind = vsock_bind,
2470 .connect = vsock_connect,
2471 .socketpair = sock_no_socketpair,
2472 .accept = vsock_accept,
2473 .getname = vsock_getname,
2474 .poll = vsock_poll,
2475 .ioctl = vsock_ioctl,
2476 .listen = vsock_listen,
2477 .shutdown = vsock_shutdown,
2478 .setsockopt = vsock_connectible_setsockopt,
2479 .getsockopt = vsock_connectible_getsockopt,
2480 .sendmsg = vsock_connectible_sendmsg,
2481 .recvmsg = vsock_connectible_recvmsg,
2482 .mmap = sock_no_mmap,
2483 .set_rcvlowat = vsock_set_rcvlowat,
2484 .read_skb = vsock_read_skb,
2485 };
2486
2487 static const struct proto_ops vsock_seqpacket_ops = {
2488 .family = PF_VSOCK,
2489 .owner = THIS_MODULE,
2490 .release = vsock_release,
2491 .bind = vsock_bind,
2492 .connect = vsock_connect,
2493 .socketpair = sock_no_socketpair,
2494 .accept = vsock_accept,
2495 .getname = vsock_getname,
2496 .poll = vsock_poll,
2497 .ioctl = vsock_ioctl,
2498 .listen = vsock_listen,
2499 .shutdown = vsock_shutdown,
2500 .setsockopt = vsock_connectible_setsockopt,
2501 .getsockopt = vsock_connectible_getsockopt,
2502 .sendmsg = vsock_connectible_sendmsg,
2503 .recvmsg = vsock_connectible_recvmsg,
2504 .mmap = sock_no_mmap,
2505 .read_skb = vsock_read_skb,
2506 };
2507
vsock_create(struct net * net,struct socket * sock,int protocol,int kern)2508 static int vsock_create(struct net *net, struct socket *sock,
2509 int protocol, int kern)
2510 {
2511 struct vsock_sock *vsk;
2512 struct sock *sk;
2513 int ret;
2514
2515 if (!sock)
2516 return -EINVAL;
2517
2518 if (protocol && protocol != PF_VSOCK)
2519 return -EPROTONOSUPPORT;
2520
2521 switch (sock->type) {
2522 case SOCK_DGRAM:
2523 sock->ops = &vsock_dgram_ops;
2524 break;
2525 case SOCK_STREAM:
2526 sock->ops = &vsock_stream_ops;
2527 break;
2528 case SOCK_SEQPACKET:
2529 sock->ops = &vsock_seqpacket_ops;
2530 break;
2531 default:
2532 return -ESOCKTNOSUPPORT;
2533 }
2534
2535 sock->state = SS_UNCONNECTED;
2536
2537 sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2538 if (!sk)
2539 return -ENOMEM;
2540
2541 vsk = vsock_sk(sk);
2542
2543 if (sock->type == SOCK_DGRAM) {
2544 ret = vsock_assign_transport(vsk, NULL);
2545 if (ret < 0) {
2546 sock->sk = NULL;
2547 sock_put(sk);
2548 return ret;
2549 }
2550 }
2551
2552 /* SOCK_DGRAM doesn't have 'setsockopt' callback set in its
2553 * proto_ops, so there is no handler for custom logic.
2554 */
2555 if (sock_type_connectible(sock->type))
2556 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
2557
2558 vsock_insert_unbound(vsk);
2559
2560 return 0;
2561 }
2562
2563 static const struct net_proto_family vsock_family_ops = {
2564 .family = AF_VSOCK,
2565 .create = vsock_create,
2566 .owner = THIS_MODULE,
2567 };
2568
vsock_dev_do_ioctl(struct file * filp,unsigned int cmd,void __user * ptr)2569 static long vsock_dev_do_ioctl(struct file *filp,
2570 unsigned int cmd, void __user *ptr)
2571 {
2572 u32 __user *p = ptr;
2573 int retval = 0;
2574 u32 cid;
2575
2576 switch (cmd) {
2577 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
2578 /* To be compatible with the VMCI behavior, we prioritize the
2579 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2580 */
2581 cid = vsock_registered_transport_cid(&transport_g2h);
2582 if (cid == VMADDR_CID_ANY)
2583 cid = vsock_registered_transport_cid(&transport_h2g);
2584 if (cid == VMADDR_CID_ANY)
2585 cid = vsock_registered_transport_cid(&transport_local);
2586
2587 if (put_user(cid, p) != 0)
2588 retval = -EFAULT;
2589 break;
2590
2591 default:
2592 retval = -ENOIOCTLCMD;
2593 }
2594
2595 return retval;
2596 }
2597
vsock_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2598 static long vsock_dev_ioctl(struct file *filp,
2599 unsigned int cmd, unsigned long arg)
2600 {
2601 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2602 }
2603
2604 #ifdef CONFIG_COMPAT
vsock_dev_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2605 static long vsock_dev_compat_ioctl(struct file *filp,
2606 unsigned int cmd, unsigned long arg)
2607 {
2608 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2609 }
2610 #endif
2611
2612 static const struct file_operations vsock_device_ops = {
2613 .owner = THIS_MODULE,
2614 .unlocked_ioctl = vsock_dev_ioctl,
2615 #ifdef CONFIG_COMPAT
2616 .compat_ioctl = vsock_dev_compat_ioctl,
2617 #endif
2618 .open = nonseekable_open,
2619 };
2620
2621 static struct miscdevice vsock_device = {
2622 .name = "vsock",
2623 .fops = &vsock_device_ops,
2624 };
2625
vsock_init(void)2626 static int __init vsock_init(void)
2627 {
2628 int err = 0;
2629
2630 vsock_init_tables();
2631
2632 vsock_proto.owner = THIS_MODULE;
2633 vsock_device.minor = MISC_DYNAMIC_MINOR;
2634 err = misc_register(&vsock_device);
2635 if (err) {
2636 pr_err("Failed to register misc device\n");
2637 goto err_reset_transport;
2638 }
2639
2640 err = proto_register(&vsock_proto, 1); /* we want our slab */
2641 if (err) {
2642 pr_err("Cannot register vsock protocol\n");
2643 goto err_deregister_misc;
2644 }
2645
2646 err = sock_register(&vsock_family_ops);
2647 if (err) {
2648 pr_err("could not register af_vsock (%d) address family: %d\n",
2649 AF_VSOCK, err);
2650 goto err_unregister_proto;
2651 }
2652
2653 vsock_bpf_build_proto();
2654
2655 return 0;
2656
2657 err_unregister_proto:
2658 proto_unregister(&vsock_proto);
2659 err_deregister_misc:
2660 misc_deregister(&vsock_device);
2661 err_reset_transport:
2662 return err;
2663 }
2664
vsock_exit(void)2665 static void __exit vsock_exit(void)
2666 {
2667 misc_deregister(&vsock_device);
2668 sock_unregister(AF_VSOCK);
2669 proto_unregister(&vsock_proto);
2670 }
2671
vsock_core_get_transport(struct vsock_sock * vsk)2672 const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
2673 {
2674 return vsk->transport;
2675 }
2676 EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2677
vsock_core_register(const struct vsock_transport * t,int features)2678 int vsock_core_register(const struct vsock_transport *t, int features)
2679 {
2680 const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
2681 int err = mutex_lock_interruptible(&vsock_register_mutex);
2682
2683 if (err)
2684 return err;
2685
2686 t_h2g = transport_h2g;
2687 t_g2h = transport_g2h;
2688 t_dgram = transport_dgram;
2689 t_local = transport_local;
2690
2691 if (features & VSOCK_TRANSPORT_F_H2G) {
2692 if (t_h2g) {
2693 err = -EBUSY;
2694 goto err_busy;
2695 }
2696 t_h2g = t;
2697 }
2698
2699 if (features & VSOCK_TRANSPORT_F_G2H) {
2700 if (t_g2h) {
2701 err = -EBUSY;
2702 goto err_busy;
2703 }
2704 t_g2h = t;
2705 }
2706
2707 if (features & VSOCK_TRANSPORT_F_DGRAM) {
2708 if (t_dgram) {
2709 err = -EBUSY;
2710 goto err_busy;
2711 }
2712 t_dgram = t;
2713 }
2714
2715 if (features & VSOCK_TRANSPORT_F_LOCAL) {
2716 if (t_local) {
2717 err = -EBUSY;
2718 goto err_busy;
2719 }
2720 t_local = t;
2721 }
2722
2723 transport_h2g = t_h2g;
2724 transport_g2h = t_g2h;
2725 transport_dgram = t_dgram;
2726 transport_local = t_local;
2727
2728 err_busy:
2729 mutex_unlock(&vsock_register_mutex);
2730 return err;
2731 }
2732 EXPORT_SYMBOL_GPL(vsock_core_register);
2733
vsock_core_unregister(const struct vsock_transport * t)2734 void vsock_core_unregister(const struct vsock_transport *t)
2735 {
2736 mutex_lock(&vsock_register_mutex);
2737
2738 if (transport_h2g == t)
2739 transport_h2g = NULL;
2740
2741 if (transport_g2h == t)
2742 transport_g2h = NULL;
2743
2744 if (transport_dgram == t)
2745 transport_dgram = NULL;
2746
2747 if (transport_local == t)
2748 transport_local = NULL;
2749
2750 mutex_unlock(&vsock_register_mutex);
2751 }
2752 EXPORT_SYMBOL_GPL(vsock_core_unregister);
2753
2754 module_init(vsock_init);
2755 module_exit(vsock_exit);
2756
2757 MODULE_AUTHOR("VMware, Inc.");
2758 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2759 MODULE_VERSION("1.0.2.0-k");
2760 MODULE_LICENSE("GPL v2");
2761