xref: /linux/net/vmw_vsock/af_vsock.c (revision 27eddbf3449026a73d6ed52d55b192bfcf526a03)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VMware vSockets Driver
4  *
5  * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6  */
7 
8 /* Implementation notes:
9  *
10  * - There are two kinds of sockets: those created by user action (such as
11  * calling socket(2)) and those created by incoming connection request packets.
12  *
13  * - There are two "global" tables, one for bound sockets (sockets that have
14  * specified an address that they are responsible for) and one for connected
15  * sockets (sockets that have established a connection with another socket).
16  * These tables are "global" in that all sockets on the system are placed
17  * within them. - Note, though, that the bound table contains an extra entry
18  * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19  * that list. The bound table is used solely for lookup of sockets when packets
20  * are received and that's not necessary for SOCK_DGRAM sockets since we create
21  * a datagram handle for each and need not perform a lookup.  Keeping SOCK_DGRAM
22  * sockets out of the bound hash buckets will reduce the chance of collisions
23  * when looking for SOCK_STREAM sockets and prevents us from having to check the
24  * socket type in the hash table lookups.
25  *
26  * - Sockets created by user action will either be "client" sockets that
27  * initiate a connection or "server" sockets that listen for connections; we do
28  * not support simultaneous connects (two "client" sockets connecting).
29  *
30  * - "Server" sockets are referred to as listener sockets throughout this
31  * implementation because they are in the TCP_LISTEN state.  When a
32  * connection request is received (the second kind of socket mentioned above),
33  * we create a new socket and refer to it as a pending socket.  These pending
34  * sockets are placed on the pending connection list of the listener socket.
35  * When future packets are received for the address the listener socket is
36  * bound to, we check if the source of the packet is from one that has an
37  * existing pending connection.  If it does, we process the packet for the
38  * pending socket.  When that socket reaches the connected state, it is removed
39  * from the listener socket's pending list and enqueued in the listener
40  * socket's accept queue.  Callers of accept(2) will accept connected sockets
41  * from the listener socket's accept queue.  If the socket cannot be accepted
42  * for some reason then it is marked rejected.  Once the connection is
43  * accepted, it is owned by the user process and the responsibility for cleanup
44  * falls with that user process.
45  *
46  * - It is possible that these pending sockets will never reach the connected
47  * state; in fact, we may never receive another packet after the connection
48  * request.  Because of this, we must schedule a cleanup function to run in the
49  * future, after some amount of time passes where a connection should have been
50  * established.  This function ensures that the socket is off all lists so it
51  * cannot be retrieved, then drops all references to the socket so it is cleaned
52  * up (sock_put() -> sk_free() -> our sk_destruct implementation).  Note this
53  * function will also cleanup rejected sockets, those that reach the connected
54  * state but leave it before they have been accepted.
55  *
56  * - Lock ordering for pending or accept queue sockets is:
57  *
58  *     lock_sock(listener);
59  *     lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60  *
61  * Using explicit nested locking keeps lockdep happy since normally only one
62  * lock of a given class may be taken at a time.
63  *
64  * - Sockets created by user action will be cleaned up when the user process
65  * calls close(2), causing our release implementation to be called. Our release
66  * implementation will perform some cleanup then drop the last reference so our
67  * sk_destruct implementation is invoked.  Our sk_destruct implementation will
68  * perform additional cleanup that's common for both types of sockets.
69  *
70  * - A socket's reference count is what ensures that the structure won't be
71  * freed.  Each entry in a list (such as the "global" bound and connected tables
72  * and the listener socket's pending list and connected queue) ensures a
73  * reference.  When we defer work until process context and pass a socket as our
74  * argument, we must ensure the reference count is increased to ensure the
75  * socket isn't freed before the function is run; the deferred function will
76  * then drop the reference.
77  *
78  * - sk->sk_state uses the TCP state constants because they are widely used by
79  * other address families and exposed to userspace tools like ss(8):
80  *
81  *   TCP_CLOSE - unconnected
82  *   TCP_SYN_SENT - connecting
83  *   TCP_ESTABLISHED - connected
84  *   TCP_CLOSING - disconnecting
85  *   TCP_LISTEN - listening
86  */
87 
88 #include <linux/compat.h>
89 #include <linux/types.h>
90 #include <linux/bitops.h>
91 #include <linux/cred.h>
92 #include <linux/errqueue.h>
93 #include <linux/init.h>
94 #include <linux/io.h>
95 #include <linux/kernel.h>
96 #include <linux/sched/signal.h>
97 #include <linux/kmod.h>
98 #include <linux/list.h>
99 #include <linux/miscdevice.h>
100 #include <linux/module.h>
101 #include <linux/mutex.h>
102 #include <linux/net.h>
103 #include <linux/poll.h>
104 #include <linux/random.h>
105 #include <linux/skbuff.h>
106 #include <linux/smp.h>
107 #include <linux/socket.h>
108 #include <linux/stddef.h>
109 #include <linux/unistd.h>
110 #include <linux/wait.h>
111 #include <linux/workqueue.h>
112 #include <net/sock.h>
113 #include <net/af_vsock.h>
114 #include <uapi/linux/vm_sockets.h>
115 #include <uapi/asm-generic/ioctls.h>
116 
117 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
118 static void vsock_sk_destruct(struct sock *sk);
119 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
120 static void vsock_close(struct sock *sk, long timeout);
121 
122 /* Protocol family. */
123 struct proto vsock_proto = {
124 	.name = "AF_VSOCK",
125 	.owner = THIS_MODULE,
126 	.obj_size = sizeof(struct vsock_sock),
127 	.close = vsock_close,
128 #ifdef CONFIG_BPF_SYSCALL
129 	.psock_update_sk_prot = vsock_bpf_update_proto,
130 #endif
131 };
132 
133 /* The default peer timeout indicates how long we will wait for a peer response
134  * to a control message.
135  */
136 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
137 
138 #define VSOCK_DEFAULT_BUFFER_SIZE     (1024 * 256)
139 #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
140 #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
141 
142 /* Transport used for host->guest communication */
143 static const struct vsock_transport *transport_h2g;
144 /* Transport used for guest->host communication */
145 static const struct vsock_transport *transport_g2h;
146 /* Transport used for DGRAM communication */
147 static const struct vsock_transport *transport_dgram;
148 /* Transport used for local communication */
149 static const struct vsock_transport *transport_local;
150 static DEFINE_MUTEX(vsock_register_mutex);
151 
152 /**** UTILS ****/
153 
154 /* Each bound VSocket is stored in the bind hash table and each connected
155  * VSocket is stored in the connected hash table.
156  *
157  * Unbound sockets are all put on the same list attached to the end of the hash
158  * table (vsock_unbound_sockets).  Bound sockets are added to the hash table in
159  * the bucket that their local address hashes to (vsock_bound_sockets(addr)
160  * represents the list that addr hashes to).
161  *
162  * Specifically, we initialize the vsock_bind_table array to a size of
163  * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
164  * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
165  * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets.  The hash function
166  * mods with VSOCK_HASH_SIZE to ensure this.
167  */
168 #define MAX_PORT_RETRIES        24
169 
170 #define VSOCK_HASH(addr)        ((addr)->svm_port % VSOCK_HASH_SIZE)
171 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
172 #define vsock_unbound_sockets     (&vsock_bind_table[VSOCK_HASH_SIZE])
173 
174 /* XXX This can probably be implemented in a better way. */
175 #define VSOCK_CONN_HASH(src, dst)				\
176 	(((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
177 #define vsock_connected_sockets(src, dst)		\
178 	(&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
179 #define vsock_connected_sockets_vsk(vsk)				\
180 	vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
181 
182 struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
183 EXPORT_SYMBOL_GPL(vsock_bind_table);
184 struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
185 EXPORT_SYMBOL_GPL(vsock_connected_table);
186 DEFINE_SPINLOCK(vsock_table_lock);
187 EXPORT_SYMBOL_GPL(vsock_table_lock);
188 
189 /* Autobind this socket to the local address if necessary. */
vsock_auto_bind(struct vsock_sock * vsk)190 static int vsock_auto_bind(struct vsock_sock *vsk)
191 {
192 	struct sock *sk = sk_vsock(vsk);
193 	struct sockaddr_vm local_addr;
194 
195 	if (vsock_addr_bound(&vsk->local_addr))
196 		return 0;
197 	vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
198 	return __vsock_bind(sk, &local_addr);
199 }
200 
vsock_init_tables(void)201 static void vsock_init_tables(void)
202 {
203 	int i;
204 
205 	for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
206 		INIT_LIST_HEAD(&vsock_bind_table[i]);
207 
208 	for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
209 		INIT_LIST_HEAD(&vsock_connected_table[i]);
210 }
211 
__vsock_insert_bound(struct list_head * list,struct vsock_sock * vsk)212 static void __vsock_insert_bound(struct list_head *list,
213 				 struct vsock_sock *vsk)
214 {
215 	sock_hold(&vsk->sk);
216 	list_add(&vsk->bound_table, list);
217 }
218 
__vsock_insert_connected(struct list_head * list,struct vsock_sock * vsk)219 static void __vsock_insert_connected(struct list_head *list,
220 				     struct vsock_sock *vsk)
221 {
222 	sock_hold(&vsk->sk);
223 	list_add(&vsk->connected_table, list);
224 }
225 
__vsock_remove_bound(struct vsock_sock * vsk)226 static void __vsock_remove_bound(struct vsock_sock *vsk)
227 {
228 	list_del_init(&vsk->bound_table);
229 	sock_put(&vsk->sk);
230 }
231 
__vsock_remove_connected(struct vsock_sock * vsk)232 static void __vsock_remove_connected(struct vsock_sock *vsk)
233 {
234 	list_del_init(&vsk->connected_table);
235 	sock_put(&vsk->sk);
236 }
237 
__vsock_find_bound_socket(struct sockaddr_vm * addr)238 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
239 {
240 	struct vsock_sock *vsk;
241 
242 	list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
243 		if (vsock_addr_equals_addr(addr, &vsk->local_addr))
244 			return sk_vsock(vsk);
245 
246 		if (addr->svm_port == vsk->local_addr.svm_port &&
247 		    (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
248 		     addr->svm_cid == VMADDR_CID_ANY))
249 			return sk_vsock(vsk);
250 	}
251 
252 	return NULL;
253 }
254 
__vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)255 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
256 						  struct sockaddr_vm *dst)
257 {
258 	struct vsock_sock *vsk;
259 
260 	list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
261 			    connected_table) {
262 		if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
263 		    dst->svm_port == vsk->local_addr.svm_port) {
264 			return sk_vsock(vsk);
265 		}
266 	}
267 
268 	return NULL;
269 }
270 
vsock_insert_unbound(struct vsock_sock * vsk)271 static void vsock_insert_unbound(struct vsock_sock *vsk)
272 {
273 	spin_lock_bh(&vsock_table_lock);
274 	__vsock_insert_bound(vsock_unbound_sockets, vsk);
275 	spin_unlock_bh(&vsock_table_lock);
276 }
277 
vsock_insert_connected(struct vsock_sock * vsk)278 void vsock_insert_connected(struct vsock_sock *vsk)
279 {
280 	struct list_head *list = vsock_connected_sockets(
281 		&vsk->remote_addr, &vsk->local_addr);
282 
283 	spin_lock_bh(&vsock_table_lock);
284 	__vsock_insert_connected(list, vsk);
285 	spin_unlock_bh(&vsock_table_lock);
286 }
287 EXPORT_SYMBOL_GPL(vsock_insert_connected);
288 
vsock_remove_bound(struct vsock_sock * vsk)289 void vsock_remove_bound(struct vsock_sock *vsk)
290 {
291 	spin_lock_bh(&vsock_table_lock);
292 	if (__vsock_in_bound_table(vsk))
293 		__vsock_remove_bound(vsk);
294 	spin_unlock_bh(&vsock_table_lock);
295 }
296 EXPORT_SYMBOL_GPL(vsock_remove_bound);
297 
vsock_remove_connected(struct vsock_sock * vsk)298 void vsock_remove_connected(struct vsock_sock *vsk)
299 {
300 	spin_lock_bh(&vsock_table_lock);
301 	if (__vsock_in_connected_table(vsk))
302 		__vsock_remove_connected(vsk);
303 	spin_unlock_bh(&vsock_table_lock);
304 }
305 EXPORT_SYMBOL_GPL(vsock_remove_connected);
306 
vsock_find_bound_socket(struct sockaddr_vm * addr)307 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
308 {
309 	struct sock *sk;
310 
311 	spin_lock_bh(&vsock_table_lock);
312 	sk = __vsock_find_bound_socket(addr);
313 	if (sk)
314 		sock_hold(sk);
315 
316 	spin_unlock_bh(&vsock_table_lock);
317 
318 	return sk;
319 }
320 EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
321 
vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)322 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
323 					 struct sockaddr_vm *dst)
324 {
325 	struct sock *sk;
326 
327 	spin_lock_bh(&vsock_table_lock);
328 	sk = __vsock_find_connected_socket(src, dst);
329 	if (sk)
330 		sock_hold(sk);
331 
332 	spin_unlock_bh(&vsock_table_lock);
333 
334 	return sk;
335 }
336 EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
337 
vsock_remove_sock(struct vsock_sock * vsk)338 void vsock_remove_sock(struct vsock_sock *vsk)
339 {
340 	/* Transport reassignment must not remove the binding. */
341 	if (sock_flag(sk_vsock(vsk), SOCK_DEAD))
342 		vsock_remove_bound(vsk);
343 
344 	vsock_remove_connected(vsk);
345 }
346 EXPORT_SYMBOL_GPL(vsock_remove_sock);
347 
vsock_for_each_connected_socket(struct vsock_transport * transport,void (* fn)(struct sock * sk))348 void vsock_for_each_connected_socket(struct vsock_transport *transport,
349 				     void (*fn)(struct sock *sk))
350 {
351 	int i;
352 
353 	spin_lock_bh(&vsock_table_lock);
354 
355 	for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
356 		struct vsock_sock *vsk;
357 		list_for_each_entry(vsk, &vsock_connected_table[i],
358 				    connected_table) {
359 			if (vsk->transport != transport)
360 				continue;
361 
362 			fn(sk_vsock(vsk));
363 		}
364 	}
365 
366 	spin_unlock_bh(&vsock_table_lock);
367 }
368 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
369 
vsock_add_pending(struct sock * listener,struct sock * pending)370 void vsock_add_pending(struct sock *listener, struct sock *pending)
371 {
372 	struct vsock_sock *vlistener;
373 	struct vsock_sock *vpending;
374 
375 	vlistener = vsock_sk(listener);
376 	vpending = vsock_sk(pending);
377 
378 	sock_hold(pending);
379 	sock_hold(listener);
380 	list_add_tail(&vpending->pending_links, &vlistener->pending_links);
381 }
382 EXPORT_SYMBOL_GPL(vsock_add_pending);
383 
vsock_remove_pending(struct sock * listener,struct sock * pending)384 void vsock_remove_pending(struct sock *listener, struct sock *pending)
385 {
386 	struct vsock_sock *vpending = vsock_sk(pending);
387 
388 	list_del_init(&vpending->pending_links);
389 	sock_put(listener);
390 	sock_put(pending);
391 }
392 EXPORT_SYMBOL_GPL(vsock_remove_pending);
393 
vsock_enqueue_accept(struct sock * listener,struct sock * connected)394 void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
395 {
396 	struct vsock_sock *vlistener;
397 	struct vsock_sock *vconnected;
398 
399 	vlistener = vsock_sk(listener);
400 	vconnected = vsock_sk(connected);
401 
402 	sock_hold(connected);
403 	sock_hold(listener);
404 	list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
405 }
406 EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
407 
vsock_use_local_transport(unsigned int remote_cid)408 static bool vsock_use_local_transport(unsigned int remote_cid)
409 {
410 	if (!transport_local)
411 		return false;
412 
413 	if (remote_cid == VMADDR_CID_LOCAL)
414 		return true;
415 
416 	if (transport_g2h) {
417 		return remote_cid == transport_g2h->get_local_cid();
418 	} else {
419 		return remote_cid == VMADDR_CID_HOST;
420 	}
421 }
422 
vsock_deassign_transport(struct vsock_sock * vsk)423 static void vsock_deassign_transport(struct vsock_sock *vsk)
424 {
425 	if (!vsk->transport)
426 		return;
427 
428 	vsk->transport->destruct(vsk);
429 	module_put(vsk->transport->module);
430 	vsk->transport = NULL;
431 }
432 
433 /* Assign a transport to a socket and call the .init transport callback.
434  *
435  * Note: for connection oriented socket this must be called when vsk->remote_addr
436  * is set (e.g. during the connect() or when a connection request on a listener
437  * socket is received).
438  * The vsk->remote_addr is used to decide which transport to use:
439  *  - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
440  *    g2h is not loaded, will use local transport;
441  *  - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
442  *    includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
443  *  - remote CID > VMADDR_CID_HOST will use host->guest transport;
444  */
vsock_assign_transport(struct vsock_sock * vsk,struct vsock_sock * psk)445 int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
446 {
447 	const struct vsock_transport *new_transport;
448 	struct sock *sk = sk_vsock(vsk);
449 	unsigned int remote_cid = vsk->remote_addr.svm_cid;
450 	__u8 remote_flags;
451 	int ret;
452 
453 	/* If the packet is coming with the source and destination CIDs higher
454 	 * than VMADDR_CID_HOST, then a vsock channel where all the packets are
455 	 * forwarded to the host should be established. Then the host will
456 	 * need to forward the packets to the guest.
457 	 *
458 	 * The flag is set on the (listen) receive path (psk is not NULL). On
459 	 * the connect path the flag can be set by the user space application.
460 	 */
461 	if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST &&
462 	    vsk->remote_addr.svm_cid > VMADDR_CID_HOST)
463 		vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST;
464 
465 	remote_flags = vsk->remote_addr.svm_flags;
466 
467 	switch (sk->sk_type) {
468 	case SOCK_DGRAM:
469 		new_transport = transport_dgram;
470 		break;
471 	case SOCK_STREAM:
472 	case SOCK_SEQPACKET:
473 		if (vsock_use_local_transport(remote_cid))
474 			new_transport = transport_local;
475 		else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g ||
476 			 (remote_flags & VMADDR_FLAG_TO_HOST))
477 			new_transport = transport_g2h;
478 		else
479 			new_transport = transport_h2g;
480 		break;
481 	default:
482 		return -ESOCKTNOSUPPORT;
483 	}
484 
485 	if (vsk->transport) {
486 		if (vsk->transport == new_transport)
487 			return 0;
488 
489 		/* transport->release() must be called with sock lock acquired.
490 		 * This path can only be taken during vsock_connect(), where we
491 		 * have already held the sock lock. In the other cases, this
492 		 * function is called on a new socket which is not assigned to
493 		 * any transport.
494 		 */
495 		vsk->transport->release(vsk);
496 		vsock_deassign_transport(vsk);
497 
498 		/* transport's release() and destruct() can touch some socket
499 		 * state, since we are reassigning the socket to a new transport
500 		 * during vsock_connect(), let's reset these fields to have a
501 		 * clean state.
502 		 */
503 		sock_reset_flag(sk, SOCK_DONE);
504 		sk->sk_state = TCP_CLOSE;
505 		vsk->peer_shutdown = 0;
506 	}
507 
508 	/* We increase the module refcnt to prevent the transport unloading
509 	 * while there are open sockets assigned to it.
510 	 */
511 	if (!new_transport || !try_module_get(new_transport->module))
512 		return -ENODEV;
513 
514 	if (sk->sk_type == SOCK_SEQPACKET) {
515 		if (!new_transport->seqpacket_allow ||
516 		    !new_transport->seqpacket_allow(remote_cid)) {
517 			module_put(new_transport->module);
518 			return -ESOCKTNOSUPPORT;
519 		}
520 	}
521 
522 	ret = new_transport->init(vsk, psk);
523 	if (ret) {
524 		module_put(new_transport->module);
525 		return ret;
526 	}
527 
528 	vsk->transport = new_transport;
529 
530 	return 0;
531 }
532 EXPORT_SYMBOL_GPL(vsock_assign_transport);
533 
vsock_find_cid(unsigned int cid)534 bool vsock_find_cid(unsigned int cid)
535 {
536 	if (transport_g2h && cid == transport_g2h->get_local_cid())
537 		return true;
538 
539 	if (transport_h2g && cid == VMADDR_CID_HOST)
540 		return true;
541 
542 	if (transport_local && cid == VMADDR_CID_LOCAL)
543 		return true;
544 
545 	return false;
546 }
547 EXPORT_SYMBOL_GPL(vsock_find_cid);
548 
vsock_dequeue_accept(struct sock * listener)549 static struct sock *vsock_dequeue_accept(struct sock *listener)
550 {
551 	struct vsock_sock *vlistener;
552 	struct vsock_sock *vconnected;
553 
554 	vlistener = vsock_sk(listener);
555 
556 	if (list_empty(&vlistener->accept_queue))
557 		return NULL;
558 
559 	vconnected = list_entry(vlistener->accept_queue.next,
560 				struct vsock_sock, accept_queue);
561 
562 	list_del_init(&vconnected->accept_queue);
563 	sock_put(listener);
564 	/* The caller will need a reference on the connected socket so we let
565 	 * it call sock_put().
566 	 */
567 
568 	return sk_vsock(vconnected);
569 }
570 
vsock_is_accept_queue_empty(struct sock * sk)571 static bool vsock_is_accept_queue_empty(struct sock *sk)
572 {
573 	struct vsock_sock *vsk = vsock_sk(sk);
574 	return list_empty(&vsk->accept_queue);
575 }
576 
vsock_is_pending(struct sock * sk)577 static bool vsock_is_pending(struct sock *sk)
578 {
579 	struct vsock_sock *vsk = vsock_sk(sk);
580 	return !list_empty(&vsk->pending_links);
581 }
582 
vsock_send_shutdown(struct sock * sk,int mode)583 static int vsock_send_shutdown(struct sock *sk, int mode)
584 {
585 	struct vsock_sock *vsk = vsock_sk(sk);
586 
587 	if (!vsk->transport)
588 		return -ENODEV;
589 
590 	return vsk->transport->shutdown(vsk, mode);
591 }
592 
vsock_pending_work(struct work_struct * work)593 static void vsock_pending_work(struct work_struct *work)
594 {
595 	struct sock *sk;
596 	struct sock *listener;
597 	struct vsock_sock *vsk;
598 	bool cleanup;
599 
600 	vsk = container_of(work, struct vsock_sock, pending_work.work);
601 	sk = sk_vsock(vsk);
602 	listener = vsk->listener;
603 	cleanup = true;
604 
605 	lock_sock(listener);
606 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
607 
608 	if (vsock_is_pending(sk)) {
609 		vsock_remove_pending(listener, sk);
610 
611 		sk_acceptq_removed(listener);
612 	} else if (!vsk->rejected) {
613 		/* We are not on the pending list and accept() did not reject
614 		 * us, so we must have been accepted by our user process.  We
615 		 * just need to drop our references to the sockets and be on
616 		 * our way.
617 		 */
618 		cleanup = false;
619 		goto out;
620 	}
621 
622 	/* We need to remove ourself from the global connected sockets list so
623 	 * incoming packets can't find this socket, and to reduce the reference
624 	 * count.
625 	 */
626 	vsock_remove_connected(vsk);
627 
628 	sk->sk_state = TCP_CLOSE;
629 
630 out:
631 	release_sock(sk);
632 	release_sock(listener);
633 	if (cleanup)
634 		sock_put(sk);
635 
636 	sock_put(sk);
637 	sock_put(listener);
638 }
639 
640 /**** SOCKET OPERATIONS ****/
641 
__vsock_bind_connectible(struct vsock_sock * vsk,struct sockaddr_vm * addr)642 static int __vsock_bind_connectible(struct vsock_sock *vsk,
643 				    struct sockaddr_vm *addr)
644 {
645 	static u32 port;
646 	struct sockaddr_vm new_addr;
647 
648 	if (!port)
649 		port = get_random_u32_above(LAST_RESERVED_PORT);
650 
651 	vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
652 
653 	if (addr->svm_port == VMADDR_PORT_ANY) {
654 		bool found = false;
655 		unsigned int i;
656 
657 		for (i = 0; i < MAX_PORT_RETRIES; i++) {
658 			if (port <= LAST_RESERVED_PORT)
659 				port = LAST_RESERVED_PORT + 1;
660 
661 			new_addr.svm_port = port++;
662 
663 			if (!__vsock_find_bound_socket(&new_addr)) {
664 				found = true;
665 				break;
666 			}
667 		}
668 
669 		if (!found)
670 			return -EADDRNOTAVAIL;
671 	} else {
672 		/* If port is in reserved range, ensure caller
673 		 * has necessary privileges.
674 		 */
675 		if (addr->svm_port <= LAST_RESERVED_PORT &&
676 		    !capable(CAP_NET_BIND_SERVICE)) {
677 			return -EACCES;
678 		}
679 
680 		if (__vsock_find_bound_socket(&new_addr))
681 			return -EADDRINUSE;
682 	}
683 
684 	vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
685 
686 	/* Remove connection oriented sockets from the unbound list and add them
687 	 * to the hash table for easy lookup by its address.  The unbound list
688 	 * is simply an extra entry at the end of the hash table, a trick used
689 	 * by AF_UNIX.
690 	 */
691 	__vsock_remove_bound(vsk);
692 	__vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
693 
694 	return 0;
695 }
696 
__vsock_bind_dgram(struct vsock_sock * vsk,struct sockaddr_vm * addr)697 static int __vsock_bind_dgram(struct vsock_sock *vsk,
698 			      struct sockaddr_vm *addr)
699 {
700 	return vsk->transport->dgram_bind(vsk, addr);
701 }
702 
__vsock_bind(struct sock * sk,struct sockaddr_vm * addr)703 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
704 {
705 	struct vsock_sock *vsk = vsock_sk(sk);
706 	int retval;
707 
708 	/* First ensure this socket isn't already bound. */
709 	if (vsock_addr_bound(&vsk->local_addr))
710 		return -EINVAL;
711 
712 	/* Now bind to the provided address or select appropriate values if
713 	 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY).  Note that
714 	 * like AF_INET prevents binding to a non-local IP address (in most
715 	 * cases), we only allow binding to a local CID.
716 	 */
717 	if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
718 		return -EADDRNOTAVAIL;
719 
720 	switch (sk->sk_socket->type) {
721 	case SOCK_STREAM:
722 	case SOCK_SEQPACKET:
723 		spin_lock_bh(&vsock_table_lock);
724 		retval = __vsock_bind_connectible(vsk, addr);
725 		spin_unlock_bh(&vsock_table_lock);
726 		break;
727 
728 	case SOCK_DGRAM:
729 		retval = __vsock_bind_dgram(vsk, addr);
730 		break;
731 
732 	default:
733 		retval = -EINVAL;
734 		break;
735 	}
736 
737 	return retval;
738 }
739 
740 static void vsock_connect_timeout(struct work_struct *work);
741 
__vsock_create(struct net * net,struct socket * sock,struct sock * parent,gfp_t priority,unsigned short type,int kern)742 static struct sock *__vsock_create(struct net *net,
743 				   struct socket *sock,
744 				   struct sock *parent,
745 				   gfp_t priority,
746 				   unsigned short type,
747 				   int kern)
748 {
749 	struct sock *sk;
750 	struct vsock_sock *psk;
751 	struct vsock_sock *vsk;
752 
753 	sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
754 	if (!sk)
755 		return NULL;
756 
757 	sock_init_data(sock, sk);
758 
759 	/* sk->sk_type is normally set in sock_init_data, but only if sock is
760 	 * non-NULL. We make sure that our sockets always have a type by
761 	 * setting it here if needed.
762 	 */
763 	if (!sock)
764 		sk->sk_type = type;
765 
766 	vsk = vsock_sk(sk);
767 	vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
768 	vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
769 
770 	sk->sk_destruct = vsock_sk_destruct;
771 	sk->sk_backlog_rcv = vsock_queue_rcv_skb;
772 	sock_reset_flag(sk, SOCK_DONE);
773 
774 	INIT_LIST_HEAD(&vsk->bound_table);
775 	INIT_LIST_HEAD(&vsk->connected_table);
776 	vsk->listener = NULL;
777 	INIT_LIST_HEAD(&vsk->pending_links);
778 	INIT_LIST_HEAD(&vsk->accept_queue);
779 	vsk->rejected = false;
780 	vsk->sent_request = false;
781 	vsk->ignore_connecting_rst = false;
782 	vsk->peer_shutdown = 0;
783 	INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
784 	INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
785 
786 	psk = parent ? vsock_sk(parent) : NULL;
787 	if (parent) {
788 		vsk->trusted = psk->trusted;
789 		vsk->owner = get_cred(psk->owner);
790 		vsk->connect_timeout = psk->connect_timeout;
791 		vsk->buffer_size = psk->buffer_size;
792 		vsk->buffer_min_size = psk->buffer_min_size;
793 		vsk->buffer_max_size = psk->buffer_max_size;
794 		security_sk_clone(parent, sk);
795 	} else {
796 		vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
797 		vsk->owner = get_current_cred();
798 		vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
799 		vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
800 		vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
801 		vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
802 	}
803 
804 	return sk;
805 }
806 
sock_type_connectible(u16 type)807 static bool sock_type_connectible(u16 type)
808 {
809 	return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
810 }
811 
__vsock_release(struct sock * sk,int level)812 static void __vsock_release(struct sock *sk, int level)
813 {
814 	struct vsock_sock *vsk;
815 	struct sock *pending;
816 
817 	vsk = vsock_sk(sk);
818 	pending = NULL;	/* Compiler warning. */
819 
820 	/* When "level" is SINGLE_DEPTH_NESTING, use the nested
821 	 * version to avoid the warning "possible recursive locking
822 	 * detected". When "level" is 0, lock_sock_nested(sk, level)
823 	 * is the same as lock_sock(sk).
824 	 */
825 	lock_sock_nested(sk, level);
826 
827 	/* Indicate to vsock_remove_sock() that the socket is being released and
828 	 * can be removed from the bound_table. Unlike transport reassignment
829 	 * case, where the socket must remain bound despite vsock_remove_sock()
830 	 * being called from the transport release() callback.
831 	 */
832 	sock_set_flag(sk, SOCK_DEAD);
833 
834 	if (vsk->transport)
835 		vsk->transport->release(vsk);
836 	else if (sock_type_connectible(sk->sk_type))
837 		vsock_remove_sock(vsk);
838 
839 	sock_orphan(sk);
840 	sk->sk_shutdown = SHUTDOWN_MASK;
841 
842 	skb_queue_purge(&sk->sk_receive_queue);
843 
844 	/* Clean up any sockets that never were accepted. */
845 	while ((pending = vsock_dequeue_accept(sk)) != NULL) {
846 		__vsock_release(pending, SINGLE_DEPTH_NESTING);
847 		sock_put(pending);
848 	}
849 
850 	release_sock(sk);
851 	sock_put(sk);
852 }
853 
vsock_sk_destruct(struct sock * sk)854 static void vsock_sk_destruct(struct sock *sk)
855 {
856 	struct vsock_sock *vsk = vsock_sk(sk);
857 
858 	/* Flush MSG_ZEROCOPY leftovers. */
859 	__skb_queue_purge(&sk->sk_error_queue);
860 
861 	vsock_deassign_transport(vsk);
862 
863 	/* When clearing these addresses, there's no need to set the family and
864 	 * possibly register the address family with the kernel.
865 	 */
866 	vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
867 	vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
868 
869 	put_cred(vsk->owner);
870 }
871 
vsock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)872 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
873 {
874 	int err;
875 
876 	err = sock_queue_rcv_skb(sk, skb);
877 	if (err)
878 		kfree_skb(skb);
879 
880 	return err;
881 }
882 
vsock_create_connected(struct sock * parent)883 struct sock *vsock_create_connected(struct sock *parent)
884 {
885 	return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
886 			      parent->sk_type, 0);
887 }
888 EXPORT_SYMBOL_GPL(vsock_create_connected);
889 
vsock_stream_has_data(struct vsock_sock * vsk)890 s64 vsock_stream_has_data(struct vsock_sock *vsk)
891 {
892 	if (WARN_ON(!vsk->transport))
893 		return 0;
894 
895 	return vsk->transport->stream_has_data(vsk);
896 }
897 EXPORT_SYMBOL_GPL(vsock_stream_has_data);
898 
vsock_connectible_has_data(struct vsock_sock * vsk)899 s64 vsock_connectible_has_data(struct vsock_sock *vsk)
900 {
901 	struct sock *sk = sk_vsock(vsk);
902 
903 	if (WARN_ON(!vsk->transport))
904 		return 0;
905 
906 	if (sk->sk_type == SOCK_SEQPACKET)
907 		return vsk->transport->seqpacket_has_data(vsk);
908 	else
909 		return vsock_stream_has_data(vsk);
910 }
911 EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
912 
vsock_stream_has_space(struct vsock_sock * vsk)913 s64 vsock_stream_has_space(struct vsock_sock *vsk)
914 {
915 	if (WARN_ON(!vsk->transport))
916 		return 0;
917 
918 	return vsk->transport->stream_has_space(vsk);
919 }
920 EXPORT_SYMBOL_GPL(vsock_stream_has_space);
921 
vsock_data_ready(struct sock * sk)922 void vsock_data_ready(struct sock *sk)
923 {
924 	struct vsock_sock *vsk = vsock_sk(sk);
925 
926 	if (vsock_stream_has_data(vsk) >= sk->sk_rcvlowat ||
927 	    sock_flag(sk, SOCK_DONE))
928 		sk->sk_data_ready(sk);
929 }
930 EXPORT_SYMBOL_GPL(vsock_data_ready);
931 
932 /* Dummy callback required by sockmap.
933  * See unconditional call of saved_close() in sock_map_close().
934  */
vsock_close(struct sock * sk,long timeout)935 static void vsock_close(struct sock *sk, long timeout)
936 {
937 }
938 
vsock_release(struct socket * sock)939 static int vsock_release(struct socket *sock)
940 {
941 	struct sock *sk = sock->sk;
942 
943 	if (!sk)
944 		return 0;
945 
946 	sk->sk_prot->close(sk, 0);
947 	__vsock_release(sk, 0);
948 	sock->sk = NULL;
949 	sock->state = SS_FREE;
950 
951 	return 0;
952 }
953 
954 static int
vsock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)955 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
956 {
957 	int err;
958 	struct sock *sk;
959 	struct sockaddr_vm *vm_addr;
960 
961 	sk = sock->sk;
962 
963 	if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
964 		return -EINVAL;
965 
966 	lock_sock(sk);
967 	err = __vsock_bind(sk, vm_addr);
968 	release_sock(sk);
969 
970 	return err;
971 }
972 
vsock_getname(struct socket * sock,struct sockaddr * addr,int peer)973 static int vsock_getname(struct socket *sock,
974 			 struct sockaddr *addr, int peer)
975 {
976 	int err;
977 	struct sock *sk;
978 	struct vsock_sock *vsk;
979 	struct sockaddr_vm *vm_addr;
980 
981 	sk = sock->sk;
982 	vsk = vsock_sk(sk);
983 	err = 0;
984 
985 	lock_sock(sk);
986 
987 	if (peer) {
988 		if (sock->state != SS_CONNECTED) {
989 			err = -ENOTCONN;
990 			goto out;
991 		}
992 		vm_addr = &vsk->remote_addr;
993 	} else {
994 		vm_addr = &vsk->local_addr;
995 	}
996 
997 	if (!vm_addr) {
998 		err = -EINVAL;
999 		goto out;
1000 	}
1001 
1002 	/* sys_getsockname() and sys_getpeername() pass us a
1003 	 * MAX_SOCK_ADDR-sized buffer and don't set addr_len.  Unfortunately
1004 	 * that macro is defined in socket.c instead of .h, so we hardcode its
1005 	 * value here.
1006 	 */
1007 	BUILD_BUG_ON(sizeof(*vm_addr) > 128);
1008 	memcpy(addr, vm_addr, sizeof(*vm_addr));
1009 	err = sizeof(*vm_addr);
1010 
1011 out:
1012 	release_sock(sk);
1013 	return err;
1014 }
1015 
vsock_shutdown(struct socket * sock,int mode)1016 static int vsock_shutdown(struct socket *sock, int mode)
1017 {
1018 	int err;
1019 	struct sock *sk;
1020 
1021 	/* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
1022 	 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
1023 	 * here like the other address families do.  Note also that the
1024 	 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
1025 	 * which is what we want.
1026 	 */
1027 	mode++;
1028 
1029 	if ((mode & ~SHUTDOWN_MASK) || !mode)
1030 		return -EINVAL;
1031 
1032 	/* If this is a connection oriented socket and it is not connected then
1033 	 * bail out immediately.  If it is a DGRAM socket then we must first
1034 	 * kick the socket so that it wakes up from any sleeping calls, for
1035 	 * example recv(), and then afterwards return the error.
1036 	 */
1037 
1038 	sk = sock->sk;
1039 
1040 	lock_sock(sk);
1041 	if (sock->state == SS_UNCONNECTED) {
1042 		err = -ENOTCONN;
1043 		if (sock_type_connectible(sk->sk_type))
1044 			goto out;
1045 	} else {
1046 		sock->state = SS_DISCONNECTING;
1047 		err = 0;
1048 	}
1049 
1050 	/* Receive and send shutdowns are treated alike. */
1051 	mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
1052 	if (mode) {
1053 		sk->sk_shutdown |= mode;
1054 		sk->sk_state_change(sk);
1055 
1056 		if (sock_type_connectible(sk->sk_type)) {
1057 			sock_reset_flag(sk, SOCK_DONE);
1058 			vsock_send_shutdown(sk, mode);
1059 		}
1060 	}
1061 
1062 out:
1063 	release_sock(sk);
1064 	return err;
1065 }
1066 
vsock_poll(struct file * file,struct socket * sock,poll_table * wait)1067 static __poll_t vsock_poll(struct file *file, struct socket *sock,
1068 			       poll_table *wait)
1069 {
1070 	struct sock *sk;
1071 	__poll_t mask;
1072 	struct vsock_sock *vsk;
1073 
1074 	sk = sock->sk;
1075 	vsk = vsock_sk(sk);
1076 
1077 	poll_wait(file, sk_sleep(sk), wait);
1078 	mask = 0;
1079 
1080 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
1081 		/* Signify that there has been an error on this socket. */
1082 		mask |= EPOLLERR;
1083 
1084 	/* INET sockets treat local write shutdown and peer write shutdown as a
1085 	 * case of EPOLLHUP set.
1086 	 */
1087 	if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1088 	    ((sk->sk_shutdown & SEND_SHUTDOWN) &&
1089 	     (vsk->peer_shutdown & SEND_SHUTDOWN))) {
1090 		mask |= EPOLLHUP;
1091 	}
1092 
1093 	if (sk->sk_shutdown & RCV_SHUTDOWN ||
1094 	    vsk->peer_shutdown & SEND_SHUTDOWN) {
1095 		mask |= EPOLLRDHUP;
1096 	}
1097 
1098 	if (sk_is_readable(sk))
1099 		mask |= EPOLLIN | EPOLLRDNORM;
1100 
1101 	if (sock->type == SOCK_DGRAM) {
1102 		/* For datagram sockets we can read if there is something in
1103 		 * the queue and write as long as the socket isn't shutdown for
1104 		 * sending.
1105 		 */
1106 		if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
1107 		    (sk->sk_shutdown & RCV_SHUTDOWN)) {
1108 			mask |= EPOLLIN | EPOLLRDNORM;
1109 		}
1110 
1111 		if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1112 			mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1113 
1114 	} else if (sock_type_connectible(sk->sk_type)) {
1115 		const struct vsock_transport *transport;
1116 
1117 		lock_sock(sk);
1118 
1119 		transport = vsk->transport;
1120 
1121 		/* Listening sockets that have connections in their accept
1122 		 * queue can be read.
1123 		 */
1124 		if (sk->sk_state == TCP_LISTEN
1125 		    && !vsock_is_accept_queue_empty(sk))
1126 			mask |= EPOLLIN | EPOLLRDNORM;
1127 
1128 		/* If there is something in the queue then we can read. */
1129 		if (transport && transport->stream_is_active(vsk) &&
1130 		    !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1131 			bool data_ready_now = false;
1132 			int target = sock_rcvlowat(sk, 0, INT_MAX);
1133 			int ret = transport->notify_poll_in(
1134 					vsk, target, &data_ready_now);
1135 			if (ret < 0) {
1136 				mask |= EPOLLERR;
1137 			} else {
1138 				if (data_ready_now)
1139 					mask |= EPOLLIN | EPOLLRDNORM;
1140 
1141 			}
1142 		}
1143 
1144 		/* Sockets whose connections have been closed, reset, or
1145 		 * terminated should also be considered read, and we check the
1146 		 * shutdown flag for that.
1147 		 */
1148 		if (sk->sk_shutdown & RCV_SHUTDOWN ||
1149 		    vsk->peer_shutdown & SEND_SHUTDOWN) {
1150 			mask |= EPOLLIN | EPOLLRDNORM;
1151 		}
1152 
1153 		/* Connected sockets that can produce data can be written. */
1154 		if (transport && sk->sk_state == TCP_ESTABLISHED) {
1155 			if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1156 				bool space_avail_now = false;
1157 				int ret = transport->notify_poll_out(
1158 						vsk, 1, &space_avail_now);
1159 				if (ret < 0) {
1160 					mask |= EPOLLERR;
1161 				} else {
1162 					if (space_avail_now)
1163 						/* Remove EPOLLWRBAND since INET
1164 						 * sockets are not setting it.
1165 						 */
1166 						mask |= EPOLLOUT | EPOLLWRNORM;
1167 
1168 				}
1169 			}
1170 		}
1171 
1172 		/* Simulate INET socket poll behaviors, which sets
1173 		 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
1174 		 * but local send is not shutdown.
1175 		 */
1176 		if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
1177 			if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1178 				mask |= EPOLLOUT | EPOLLWRNORM;
1179 
1180 		}
1181 
1182 		release_sock(sk);
1183 	}
1184 
1185 	return mask;
1186 }
1187 
vsock_read_skb(struct sock * sk,skb_read_actor_t read_actor)1188 static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor)
1189 {
1190 	struct vsock_sock *vsk = vsock_sk(sk);
1191 
1192 	if (WARN_ON_ONCE(!vsk->transport))
1193 		return -ENODEV;
1194 
1195 	return vsk->transport->read_skb(vsk, read_actor);
1196 }
1197 
vsock_dgram_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1198 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1199 			       size_t len)
1200 {
1201 	int err;
1202 	struct sock *sk;
1203 	struct vsock_sock *vsk;
1204 	struct sockaddr_vm *remote_addr;
1205 	const struct vsock_transport *transport;
1206 
1207 	if (msg->msg_flags & MSG_OOB)
1208 		return -EOPNOTSUPP;
1209 
1210 	/* For now, MSG_DONTWAIT is always assumed... */
1211 	err = 0;
1212 	sk = sock->sk;
1213 	vsk = vsock_sk(sk);
1214 
1215 	lock_sock(sk);
1216 
1217 	transport = vsk->transport;
1218 
1219 	err = vsock_auto_bind(vsk);
1220 	if (err)
1221 		goto out;
1222 
1223 
1224 	/* If the provided message contains an address, use that.  Otherwise
1225 	 * fall back on the socket's remote handle (if it has been connected).
1226 	 */
1227 	if (msg->msg_name &&
1228 	    vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1229 			    &remote_addr) == 0) {
1230 		/* Ensure this address is of the right type and is a valid
1231 		 * destination.
1232 		 */
1233 
1234 		if (remote_addr->svm_cid == VMADDR_CID_ANY)
1235 			remote_addr->svm_cid = transport->get_local_cid();
1236 
1237 		if (!vsock_addr_bound(remote_addr)) {
1238 			err = -EINVAL;
1239 			goto out;
1240 		}
1241 	} else if (sock->state == SS_CONNECTED) {
1242 		remote_addr = &vsk->remote_addr;
1243 
1244 		if (remote_addr->svm_cid == VMADDR_CID_ANY)
1245 			remote_addr->svm_cid = transport->get_local_cid();
1246 
1247 		/* XXX Should connect() or this function ensure remote_addr is
1248 		 * bound?
1249 		 */
1250 		if (!vsock_addr_bound(&vsk->remote_addr)) {
1251 			err = -EINVAL;
1252 			goto out;
1253 		}
1254 	} else {
1255 		err = -EINVAL;
1256 		goto out;
1257 	}
1258 
1259 	if (!transport->dgram_allow(remote_addr->svm_cid,
1260 				    remote_addr->svm_port)) {
1261 		err = -EINVAL;
1262 		goto out;
1263 	}
1264 
1265 	err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
1266 
1267 out:
1268 	release_sock(sk);
1269 	return err;
1270 }
1271 
vsock_dgram_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1272 static int vsock_dgram_connect(struct socket *sock,
1273 			       struct sockaddr *addr, int addr_len, int flags)
1274 {
1275 	int err;
1276 	struct sock *sk;
1277 	struct vsock_sock *vsk;
1278 	struct sockaddr_vm *remote_addr;
1279 
1280 	sk = sock->sk;
1281 	vsk = vsock_sk(sk);
1282 
1283 	err = vsock_addr_cast(addr, addr_len, &remote_addr);
1284 	if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1285 		lock_sock(sk);
1286 		vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1287 				VMADDR_PORT_ANY);
1288 		sock->state = SS_UNCONNECTED;
1289 		release_sock(sk);
1290 		return 0;
1291 	} else if (err != 0)
1292 		return -EINVAL;
1293 
1294 	lock_sock(sk);
1295 
1296 	err = vsock_auto_bind(vsk);
1297 	if (err)
1298 		goto out;
1299 
1300 	if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1301 					 remote_addr->svm_port)) {
1302 		err = -EINVAL;
1303 		goto out;
1304 	}
1305 
1306 	memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1307 	sock->state = SS_CONNECTED;
1308 
1309 	/* sock map disallows redirection of non-TCP sockets with sk_state !=
1310 	 * TCP_ESTABLISHED (see sock_map_redirect_allowed()), so we set
1311 	 * TCP_ESTABLISHED here to allow redirection of connected vsock dgrams.
1312 	 *
1313 	 * This doesn't seem to be abnormal state for datagram sockets, as the
1314 	 * same approach can be see in other datagram socket types as well
1315 	 * (such as unix sockets).
1316 	 */
1317 	sk->sk_state = TCP_ESTABLISHED;
1318 
1319 out:
1320 	release_sock(sk);
1321 	return err;
1322 }
1323 
__vsock_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1324 int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1325 			  size_t len, int flags)
1326 {
1327 	struct sock *sk = sock->sk;
1328 	struct vsock_sock *vsk = vsock_sk(sk);
1329 
1330 	return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
1331 }
1332 
vsock_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1333 int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1334 			size_t len, int flags)
1335 {
1336 #ifdef CONFIG_BPF_SYSCALL
1337 	struct sock *sk = sock->sk;
1338 	const struct proto *prot;
1339 
1340 	prot = READ_ONCE(sk->sk_prot);
1341 	if (prot != &vsock_proto)
1342 		return prot->recvmsg(sk, msg, len, flags, NULL);
1343 #endif
1344 
1345 	return __vsock_dgram_recvmsg(sock, msg, len, flags);
1346 }
1347 EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
1348 
vsock_do_ioctl(struct socket * sock,unsigned int cmd,int __user * arg)1349 static int vsock_do_ioctl(struct socket *sock, unsigned int cmd,
1350 			  int __user *arg)
1351 {
1352 	struct sock *sk = sock->sk;
1353 	struct vsock_sock *vsk;
1354 	int ret;
1355 
1356 	vsk = vsock_sk(sk);
1357 
1358 	switch (cmd) {
1359 	case SIOCOUTQ: {
1360 		ssize_t n_bytes;
1361 
1362 		if (!vsk->transport || !vsk->transport->unsent_bytes) {
1363 			ret = -EOPNOTSUPP;
1364 			break;
1365 		}
1366 
1367 		if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) {
1368 			ret = -EINVAL;
1369 			break;
1370 		}
1371 
1372 		n_bytes = vsk->transport->unsent_bytes(vsk);
1373 		if (n_bytes < 0) {
1374 			ret = n_bytes;
1375 			break;
1376 		}
1377 
1378 		ret = put_user(n_bytes, arg);
1379 		break;
1380 	}
1381 	default:
1382 		ret = -ENOIOCTLCMD;
1383 	}
1384 
1385 	return ret;
1386 }
1387 
vsock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1388 static int vsock_ioctl(struct socket *sock, unsigned int cmd,
1389 		       unsigned long arg)
1390 {
1391 	int ret;
1392 
1393 	lock_sock(sock->sk);
1394 	ret = vsock_do_ioctl(sock, cmd, (int __user *)arg);
1395 	release_sock(sock->sk);
1396 
1397 	return ret;
1398 }
1399 
1400 static const struct proto_ops vsock_dgram_ops = {
1401 	.family = PF_VSOCK,
1402 	.owner = THIS_MODULE,
1403 	.release = vsock_release,
1404 	.bind = vsock_bind,
1405 	.connect = vsock_dgram_connect,
1406 	.socketpair = sock_no_socketpair,
1407 	.accept = sock_no_accept,
1408 	.getname = vsock_getname,
1409 	.poll = vsock_poll,
1410 	.ioctl = vsock_ioctl,
1411 	.listen = sock_no_listen,
1412 	.shutdown = vsock_shutdown,
1413 	.sendmsg = vsock_dgram_sendmsg,
1414 	.recvmsg = vsock_dgram_recvmsg,
1415 	.mmap = sock_no_mmap,
1416 	.read_skb = vsock_read_skb,
1417 };
1418 
vsock_transport_cancel_pkt(struct vsock_sock * vsk)1419 static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1420 {
1421 	const struct vsock_transport *transport = vsk->transport;
1422 
1423 	if (!transport || !transport->cancel_pkt)
1424 		return -EOPNOTSUPP;
1425 
1426 	return transport->cancel_pkt(vsk);
1427 }
1428 
vsock_connect_timeout(struct work_struct * work)1429 static void vsock_connect_timeout(struct work_struct *work)
1430 {
1431 	struct sock *sk;
1432 	struct vsock_sock *vsk;
1433 
1434 	vsk = container_of(work, struct vsock_sock, connect_work.work);
1435 	sk = sk_vsock(vsk);
1436 
1437 	lock_sock(sk);
1438 	if (sk->sk_state == TCP_SYN_SENT &&
1439 	    (sk->sk_shutdown != SHUTDOWN_MASK)) {
1440 		sk->sk_state = TCP_CLOSE;
1441 		sk->sk_socket->state = SS_UNCONNECTED;
1442 		sk->sk_err = ETIMEDOUT;
1443 		sk_error_report(sk);
1444 		vsock_transport_cancel_pkt(vsk);
1445 	}
1446 	release_sock(sk);
1447 
1448 	sock_put(sk);
1449 }
1450 
vsock_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1451 static int vsock_connect(struct socket *sock, struct sockaddr *addr,
1452 			 int addr_len, int flags)
1453 {
1454 	int err;
1455 	struct sock *sk;
1456 	struct vsock_sock *vsk;
1457 	const struct vsock_transport *transport;
1458 	struct sockaddr_vm *remote_addr;
1459 	long timeout;
1460 	DEFINE_WAIT(wait);
1461 
1462 	err = 0;
1463 	sk = sock->sk;
1464 	vsk = vsock_sk(sk);
1465 
1466 	lock_sock(sk);
1467 
1468 	/* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1469 	switch (sock->state) {
1470 	case SS_CONNECTED:
1471 		err = -EISCONN;
1472 		goto out;
1473 	case SS_DISCONNECTING:
1474 		err = -EINVAL;
1475 		goto out;
1476 	case SS_CONNECTING:
1477 		/* This continues on so we can move sock into the SS_CONNECTED
1478 		 * state once the connection has completed (at which point err
1479 		 * will be set to zero also).  Otherwise, we will either wait
1480 		 * for the connection or return -EALREADY should this be a
1481 		 * non-blocking call.
1482 		 */
1483 		err = -EALREADY;
1484 		if (flags & O_NONBLOCK)
1485 			goto out;
1486 		break;
1487 	default:
1488 		if ((sk->sk_state == TCP_LISTEN) ||
1489 		    vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1490 			err = -EINVAL;
1491 			goto out;
1492 		}
1493 
1494 		/* Set the remote address that we are connecting to. */
1495 		memcpy(&vsk->remote_addr, remote_addr,
1496 		       sizeof(vsk->remote_addr));
1497 
1498 		err = vsock_assign_transport(vsk, NULL);
1499 		if (err)
1500 			goto out;
1501 
1502 		transport = vsk->transport;
1503 
1504 		/* The hypervisor and well-known contexts do not have socket
1505 		 * endpoints.
1506 		 */
1507 		if (!transport ||
1508 		    !transport->stream_allow(remote_addr->svm_cid,
1509 					     remote_addr->svm_port)) {
1510 			err = -ENETUNREACH;
1511 			goto out;
1512 		}
1513 
1514 		if (vsock_msgzerocopy_allow(transport)) {
1515 			set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
1516 		} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1517 			/* If this option was set before 'connect()',
1518 			 * when transport was unknown, check that this
1519 			 * feature is supported here.
1520 			 */
1521 			err = -EOPNOTSUPP;
1522 			goto out;
1523 		}
1524 
1525 		err = vsock_auto_bind(vsk);
1526 		if (err)
1527 			goto out;
1528 
1529 		sk->sk_state = TCP_SYN_SENT;
1530 
1531 		err = transport->connect(vsk);
1532 		if (err < 0)
1533 			goto out;
1534 
1535 		/* sk_err might have been set as a result of an earlier
1536 		 * (failed) connect attempt.
1537 		 */
1538 		sk->sk_err = 0;
1539 
1540 		/* Mark sock as connecting and set the error code to in
1541 		 * progress in case this is a non-blocking connect.
1542 		 */
1543 		sock->state = SS_CONNECTING;
1544 		err = -EINPROGRESS;
1545 	}
1546 
1547 	/* The receive path will handle all communication until we are able to
1548 	 * enter the connected state.  Here we wait for the connection to be
1549 	 * completed or a notification of an error.
1550 	 */
1551 	timeout = vsk->connect_timeout;
1552 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1553 
1554 	while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
1555 		if (flags & O_NONBLOCK) {
1556 			/* If we're not going to block, we schedule a timeout
1557 			 * function to generate a timeout on the connection
1558 			 * attempt, in case the peer doesn't respond in a
1559 			 * timely manner. We hold on to the socket until the
1560 			 * timeout fires.
1561 			 */
1562 			sock_hold(sk);
1563 
1564 			/* If the timeout function is already scheduled,
1565 			 * reschedule it, then ungrab the socket refcount to
1566 			 * keep it balanced.
1567 			 */
1568 			if (mod_delayed_work(system_wq, &vsk->connect_work,
1569 					     timeout))
1570 				sock_put(sk);
1571 
1572 			/* Skip ahead to preserve error code set above. */
1573 			goto out_wait;
1574 		}
1575 
1576 		release_sock(sk);
1577 		timeout = schedule_timeout(timeout);
1578 		lock_sock(sk);
1579 
1580 		if (signal_pending(current)) {
1581 			err = sock_intr_errno(timeout);
1582 			sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
1583 			sock->state = SS_UNCONNECTED;
1584 			vsock_transport_cancel_pkt(vsk);
1585 			vsock_remove_connected(vsk);
1586 			goto out_wait;
1587 		} else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
1588 			err = -ETIMEDOUT;
1589 			sk->sk_state = TCP_CLOSE;
1590 			sock->state = SS_UNCONNECTED;
1591 			vsock_transport_cancel_pkt(vsk);
1592 			goto out_wait;
1593 		}
1594 
1595 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1596 	}
1597 
1598 	if (sk->sk_err) {
1599 		err = -sk->sk_err;
1600 		sk->sk_state = TCP_CLOSE;
1601 		sock->state = SS_UNCONNECTED;
1602 	} else {
1603 		err = 0;
1604 	}
1605 
1606 out_wait:
1607 	finish_wait(sk_sleep(sk), &wait);
1608 out:
1609 	release_sock(sk);
1610 	return err;
1611 }
1612 
vsock_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)1613 static int vsock_accept(struct socket *sock, struct socket *newsock,
1614 			struct proto_accept_arg *arg)
1615 {
1616 	struct sock *listener;
1617 	int err;
1618 	struct sock *connected;
1619 	struct vsock_sock *vconnected;
1620 	long timeout;
1621 	DEFINE_WAIT(wait);
1622 
1623 	err = 0;
1624 	listener = sock->sk;
1625 
1626 	lock_sock(listener);
1627 
1628 	if (!sock_type_connectible(sock->type)) {
1629 		err = -EOPNOTSUPP;
1630 		goto out;
1631 	}
1632 
1633 	if (listener->sk_state != TCP_LISTEN) {
1634 		err = -EINVAL;
1635 		goto out;
1636 	}
1637 
1638 	/* Wait for children sockets to appear; these are the new sockets
1639 	 * created upon connection establishment.
1640 	 */
1641 	timeout = sock_rcvtimeo(listener, arg->flags & O_NONBLOCK);
1642 	prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1643 
1644 	while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1645 	       listener->sk_err == 0) {
1646 		release_sock(listener);
1647 		timeout = schedule_timeout(timeout);
1648 		finish_wait(sk_sleep(listener), &wait);
1649 		lock_sock(listener);
1650 
1651 		if (signal_pending(current)) {
1652 			err = sock_intr_errno(timeout);
1653 			goto out;
1654 		} else if (timeout == 0) {
1655 			err = -EAGAIN;
1656 			goto out;
1657 		}
1658 
1659 		prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1660 	}
1661 	finish_wait(sk_sleep(listener), &wait);
1662 
1663 	if (listener->sk_err)
1664 		err = -listener->sk_err;
1665 
1666 	if (connected) {
1667 		sk_acceptq_removed(listener);
1668 
1669 		lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
1670 		vconnected = vsock_sk(connected);
1671 
1672 		/* If the listener socket has received an error, then we should
1673 		 * reject this socket and return.  Note that we simply mark the
1674 		 * socket rejected, drop our reference, and let the cleanup
1675 		 * function handle the cleanup; the fact that we found it in
1676 		 * the listener's accept queue guarantees that the cleanup
1677 		 * function hasn't run yet.
1678 		 */
1679 		if (err) {
1680 			vconnected->rejected = true;
1681 		} else {
1682 			newsock->state = SS_CONNECTED;
1683 			sock_graft(connected, newsock);
1684 			if (vsock_msgzerocopy_allow(vconnected->transport))
1685 				set_bit(SOCK_SUPPORT_ZC,
1686 					&connected->sk_socket->flags);
1687 		}
1688 
1689 		release_sock(connected);
1690 		sock_put(connected);
1691 	}
1692 
1693 out:
1694 	release_sock(listener);
1695 	return err;
1696 }
1697 
vsock_listen(struct socket * sock,int backlog)1698 static int vsock_listen(struct socket *sock, int backlog)
1699 {
1700 	int err;
1701 	struct sock *sk;
1702 	struct vsock_sock *vsk;
1703 
1704 	sk = sock->sk;
1705 
1706 	lock_sock(sk);
1707 
1708 	if (!sock_type_connectible(sk->sk_type)) {
1709 		err = -EOPNOTSUPP;
1710 		goto out;
1711 	}
1712 
1713 	if (sock->state != SS_UNCONNECTED) {
1714 		err = -EINVAL;
1715 		goto out;
1716 	}
1717 
1718 	vsk = vsock_sk(sk);
1719 
1720 	if (!vsock_addr_bound(&vsk->local_addr)) {
1721 		err = -EINVAL;
1722 		goto out;
1723 	}
1724 
1725 	sk->sk_max_ack_backlog = backlog;
1726 	sk->sk_state = TCP_LISTEN;
1727 
1728 	err = 0;
1729 
1730 out:
1731 	release_sock(sk);
1732 	return err;
1733 }
1734 
vsock_update_buffer_size(struct vsock_sock * vsk,const struct vsock_transport * transport,u64 val)1735 static void vsock_update_buffer_size(struct vsock_sock *vsk,
1736 				     const struct vsock_transport *transport,
1737 				     u64 val)
1738 {
1739 	if (val > vsk->buffer_max_size)
1740 		val = vsk->buffer_max_size;
1741 
1742 	if (val < vsk->buffer_min_size)
1743 		val = vsk->buffer_min_size;
1744 
1745 	if (val != vsk->buffer_size &&
1746 	    transport && transport->notify_buffer_size)
1747 		transport->notify_buffer_size(vsk, &val);
1748 
1749 	vsk->buffer_size = val;
1750 }
1751 
vsock_connectible_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1752 static int vsock_connectible_setsockopt(struct socket *sock,
1753 					int level,
1754 					int optname,
1755 					sockptr_t optval,
1756 					unsigned int optlen)
1757 {
1758 	int err;
1759 	struct sock *sk;
1760 	struct vsock_sock *vsk;
1761 	const struct vsock_transport *transport;
1762 	u64 val;
1763 
1764 	if (level != AF_VSOCK && level != SOL_SOCKET)
1765 		return -ENOPROTOOPT;
1766 
1767 #define COPY_IN(_v)                                       \
1768 	do {						  \
1769 		if (optlen < sizeof(_v)) {		  \
1770 			err = -EINVAL;			  \
1771 			goto exit;			  \
1772 		}					  \
1773 		if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) {	\
1774 			err = -EFAULT;					\
1775 			goto exit;					\
1776 		}							\
1777 	} while (0)
1778 
1779 	err = 0;
1780 	sk = sock->sk;
1781 	vsk = vsock_sk(sk);
1782 
1783 	lock_sock(sk);
1784 
1785 	transport = vsk->transport;
1786 
1787 	if (level == SOL_SOCKET) {
1788 		int zerocopy;
1789 
1790 		if (optname != SO_ZEROCOPY) {
1791 			release_sock(sk);
1792 			return sock_setsockopt(sock, level, optname, optval, optlen);
1793 		}
1794 
1795 		/* Use 'int' type here, because variable to
1796 		 * set this option usually has this type.
1797 		 */
1798 		COPY_IN(zerocopy);
1799 
1800 		if (zerocopy < 0 || zerocopy > 1) {
1801 			err = -EINVAL;
1802 			goto exit;
1803 		}
1804 
1805 		if (transport && !vsock_msgzerocopy_allow(transport)) {
1806 			err = -EOPNOTSUPP;
1807 			goto exit;
1808 		}
1809 
1810 		sock_valbool_flag(sk, SOCK_ZEROCOPY, zerocopy);
1811 		goto exit;
1812 	}
1813 
1814 	switch (optname) {
1815 	case SO_VM_SOCKETS_BUFFER_SIZE:
1816 		COPY_IN(val);
1817 		vsock_update_buffer_size(vsk, transport, val);
1818 		break;
1819 
1820 	case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1821 		COPY_IN(val);
1822 		vsk->buffer_max_size = val;
1823 		vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1824 		break;
1825 
1826 	case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1827 		COPY_IN(val);
1828 		vsk->buffer_min_size = val;
1829 		vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1830 		break;
1831 
1832 	case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1833 	case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: {
1834 		struct __kernel_sock_timeval tv;
1835 
1836 		err = sock_copy_user_timeval(&tv, optval, optlen,
1837 					     optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1838 		if (err)
1839 			break;
1840 		if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1841 		    tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1842 			vsk->connect_timeout = tv.tv_sec * HZ +
1843 				DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ));
1844 			if (vsk->connect_timeout == 0)
1845 				vsk->connect_timeout =
1846 				    VSOCK_DEFAULT_CONNECT_TIMEOUT;
1847 
1848 		} else {
1849 			err = -ERANGE;
1850 		}
1851 		break;
1852 	}
1853 
1854 	default:
1855 		err = -ENOPROTOOPT;
1856 		break;
1857 	}
1858 
1859 #undef COPY_IN
1860 
1861 exit:
1862 	release_sock(sk);
1863 	return err;
1864 }
1865 
vsock_connectible_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1866 static int vsock_connectible_getsockopt(struct socket *sock,
1867 					int level, int optname,
1868 					char __user *optval,
1869 					int __user *optlen)
1870 {
1871 	struct sock *sk = sock->sk;
1872 	struct vsock_sock *vsk = vsock_sk(sk);
1873 
1874 	union {
1875 		u64 val64;
1876 		struct old_timeval32 tm32;
1877 		struct __kernel_old_timeval tm;
1878 		struct  __kernel_sock_timeval stm;
1879 	} v;
1880 
1881 	int lv = sizeof(v.val64);
1882 	int len;
1883 
1884 	if (level != AF_VSOCK)
1885 		return -ENOPROTOOPT;
1886 
1887 	if (get_user(len, optlen))
1888 		return -EFAULT;
1889 
1890 	memset(&v, 0, sizeof(v));
1891 
1892 	switch (optname) {
1893 	case SO_VM_SOCKETS_BUFFER_SIZE:
1894 		v.val64 = vsk->buffer_size;
1895 		break;
1896 
1897 	case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1898 		v.val64 = vsk->buffer_max_size;
1899 		break;
1900 
1901 	case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1902 		v.val64 = vsk->buffer_min_size;
1903 		break;
1904 
1905 	case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW:
1906 	case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD:
1907 		lv = sock_get_timeout(vsk->connect_timeout, &v,
1908 				      optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD);
1909 		break;
1910 
1911 	default:
1912 		return -ENOPROTOOPT;
1913 	}
1914 
1915 	if (len < lv)
1916 		return -EINVAL;
1917 	if (len > lv)
1918 		len = lv;
1919 	if (copy_to_user(optval, &v, len))
1920 		return -EFAULT;
1921 
1922 	if (put_user(len, optlen))
1923 		return -EFAULT;
1924 
1925 	return 0;
1926 }
1927 
vsock_connectible_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1928 static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg,
1929 				     size_t len)
1930 {
1931 	struct sock *sk;
1932 	struct vsock_sock *vsk;
1933 	const struct vsock_transport *transport;
1934 	ssize_t total_written;
1935 	long timeout;
1936 	int err;
1937 	struct vsock_transport_send_notify_data send_data;
1938 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1939 
1940 	sk = sock->sk;
1941 	vsk = vsock_sk(sk);
1942 	total_written = 0;
1943 	err = 0;
1944 
1945 	if (msg->msg_flags & MSG_OOB)
1946 		return -EOPNOTSUPP;
1947 
1948 	lock_sock(sk);
1949 
1950 	transport = vsk->transport;
1951 
1952 	/* Callers should not provide a destination with connection oriented
1953 	 * sockets.
1954 	 */
1955 	if (msg->msg_namelen) {
1956 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1957 		goto out;
1958 	}
1959 
1960 	/* Send data only if both sides are not shutdown in the direction. */
1961 	if (sk->sk_shutdown & SEND_SHUTDOWN ||
1962 	    vsk->peer_shutdown & RCV_SHUTDOWN) {
1963 		err = -EPIPE;
1964 		goto out;
1965 	}
1966 
1967 	if (!transport || sk->sk_state != TCP_ESTABLISHED ||
1968 	    !vsock_addr_bound(&vsk->local_addr)) {
1969 		err = -ENOTCONN;
1970 		goto out;
1971 	}
1972 
1973 	if (!vsock_addr_bound(&vsk->remote_addr)) {
1974 		err = -EDESTADDRREQ;
1975 		goto out;
1976 	}
1977 
1978 	if (msg->msg_flags & MSG_ZEROCOPY &&
1979 	    !vsock_msgzerocopy_allow(transport)) {
1980 		err = -EOPNOTSUPP;
1981 		goto out;
1982 	}
1983 
1984 	/* Wait for room in the produce queue to enqueue our user's data. */
1985 	timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1986 
1987 	err = transport->notify_send_init(vsk, &send_data);
1988 	if (err < 0)
1989 		goto out;
1990 
1991 	while (total_written < len) {
1992 		ssize_t written;
1993 
1994 		add_wait_queue(sk_sleep(sk), &wait);
1995 		while (vsock_stream_has_space(vsk) == 0 &&
1996 		       sk->sk_err == 0 &&
1997 		       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1998 		       !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1999 
2000 			/* Don't wait for non-blocking sockets. */
2001 			if (timeout == 0) {
2002 				err = -EAGAIN;
2003 				remove_wait_queue(sk_sleep(sk), &wait);
2004 				goto out_err;
2005 			}
2006 
2007 			err = transport->notify_send_pre_block(vsk, &send_data);
2008 			if (err < 0) {
2009 				remove_wait_queue(sk_sleep(sk), &wait);
2010 				goto out_err;
2011 			}
2012 
2013 			release_sock(sk);
2014 			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
2015 			lock_sock(sk);
2016 			if (signal_pending(current)) {
2017 				err = sock_intr_errno(timeout);
2018 				remove_wait_queue(sk_sleep(sk), &wait);
2019 				goto out_err;
2020 			} else if (timeout == 0) {
2021 				err = -EAGAIN;
2022 				remove_wait_queue(sk_sleep(sk), &wait);
2023 				goto out_err;
2024 			}
2025 		}
2026 		remove_wait_queue(sk_sleep(sk), &wait);
2027 
2028 		/* These checks occur both as part of and after the loop
2029 		 * conditional since we need to check before and after
2030 		 * sleeping.
2031 		 */
2032 		if (sk->sk_err) {
2033 			err = -sk->sk_err;
2034 			goto out_err;
2035 		} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
2036 			   (vsk->peer_shutdown & RCV_SHUTDOWN)) {
2037 			err = -EPIPE;
2038 			goto out_err;
2039 		}
2040 
2041 		err = transport->notify_send_pre_enqueue(vsk, &send_data);
2042 		if (err < 0)
2043 			goto out_err;
2044 
2045 		/* Note that enqueue will only write as many bytes as are free
2046 		 * in the produce queue, so we don't need to ensure len is
2047 		 * smaller than the queue size.  It is the caller's
2048 		 * responsibility to check how many bytes we were able to send.
2049 		 */
2050 
2051 		if (sk->sk_type == SOCK_SEQPACKET) {
2052 			written = transport->seqpacket_enqueue(vsk,
2053 						msg, len - total_written);
2054 		} else {
2055 			written = transport->stream_enqueue(vsk,
2056 					msg, len - total_written);
2057 		}
2058 
2059 		if (written < 0) {
2060 			err = written;
2061 			goto out_err;
2062 		}
2063 
2064 		total_written += written;
2065 
2066 		err = transport->notify_send_post_enqueue(
2067 				vsk, written, &send_data);
2068 		if (err < 0)
2069 			goto out_err;
2070 
2071 	}
2072 
2073 out_err:
2074 	if (total_written > 0) {
2075 		/* Return number of written bytes only if:
2076 		 * 1) SOCK_STREAM socket.
2077 		 * 2) SOCK_SEQPACKET socket when whole buffer is sent.
2078 		 */
2079 		if (sk->sk_type == SOCK_STREAM || total_written == len)
2080 			err = total_written;
2081 	}
2082 out:
2083 	if (sk->sk_type == SOCK_STREAM)
2084 		err = sk_stream_error(sk, msg->msg_flags, err);
2085 
2086 	release_sock(sk);
2087 	return err;
2088 }
2089 
vsock_connectible_wait_data(struct sock * sk,struct wait_queue_entry * wait,long timeout,struct vsock_transport_recv_notify_data * recv_data,size_t target)2090 static int vsock_connectible_wait_data(struct sock *sk,
2091 				       struct wait_queue_entry *wait,
2092 				       long timeout,
2093 				       struct vsock_transport_recv_notify_data *recv_data,
2094 				       size_t target)
2095 {
2096 	const struct vsock_transport *transport;
2097 	struct vsock_sock *vsk;
2098 	s64 data;
2099 	int err;
2100 
2101 	vsk = vsock_sk(sk);
2102 	err = 0;
2103 	transport = vsk->transport;
2104 
2105 	while (1) {
2106 		prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE);
2107 		data = vsock_connectible_has_data(vsk);
2108 		if (data != 0)
2109 			break;
2110 
2111 		if (sk->sk_err != 0 ||
2112 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2113 		    (vsk->peer_shutdown & SEND_SHUTDOWN)) {
2114 			break;
2115 		}
2116 
2117 		/* Don't wait for non-blocking sockets. */
2118 		if (timeout == 0) {
2119 			err = -EAGAIN;
2120 			break;
2121 		}
2122 
2123 		if (recv_data) {
2124 			err = transport->notify_recv_pre_block(vsk, target, recv_data);
2125 			if (err < 0)
2126 				break;
2127 		}
2128 
2129 		release_sock(sk);
2130 		timeout = schedule_timeout(timeout);
2131 		lock_sock(sk);
2132 
2133 		if (signal_pending(current)) {
2134 			err = sock_intr_errno(timeout);
2135 			break;
2136 		} else if (timeout == 0) {
2137 			err = -EAGAIN;
2138 			break;
2139 		}
2140 	}
2141 
2142 	finish_wait(sk_sleep(sk), wait);
2143 
2144 	if (err)
2145 		return err;
2146 
2147 	/* Internal transport error when checking for available
2148 	 * data. XXX This should be changed to a connection
2149 	 * reset in a later change.
2150 	 */
2151 	if (data < 0)
2152 		return -ENOMEM;
2153 
2154 	return data;
2155 }
2156 
__vsock_stream_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2157 static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2158 				  size_t len, int flags)
2159 {
2160 	struct vsock_transport_recv_notify_data recv_data;
2161 	const struct vsock_transport *transport;
2162 	struct vsock_sock *vsk;
2163 	ssize_t copied;
2164 	size_t target;
2165 	long timeout;
2166 	int err;
2167 
2168 	DEFINE_WAIT(wait);
2169 
2170 	vsk = vsock_sk(sk);
2171 	transport = vsk->transport;
2172 
2173 	/* We must not copy less than target bytes into the user's buffer
2174 	 * before returning successfully, so we wait for the consume queue to
2175 	 * have that much data to consume before dequeueing.  Note that this
2176 	 * makes it impossible to handle cases where target is greater than the
2177 	 * queue size.
2178 	 */
2179 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2180 	if (target >= transport->stream_rcvhiwat(vsk)) {
2181 		err = -ENOMEM;
2182 		goto out;
2183 	}
2184 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2185 	copied = 0;
2186 
2187 	err = transport->notify_recv_init(vsk, target, &recv_data);
2188 	if (err < 0)
2189 		goto out;
2190 
2191 
2192 	while (1) {
2193 		ssize_t read;
2194 
2195 		err = vsock_connectible_wait_data(sk, &wait, timeout,
2196 						  &recv_data, target);
2197 		if (err <= 0)
2198 			break;
2199 
2200 		err = transport->notify_recv_pre_dequeue(vsk, target,
2201 							 &recv_data);
2202 		if (err < 0)
2203 			break;
2204 
2205 		read = transport->stream_dequeue(vsk, msg, len - copied, flags);
2206 		if (read < 0) {
2207 			err = read;
2208 			break;
2209 		}
2210 
2211 		copied += read;
2212 
2213 		err = transport->notify_recv_post_dequeue(vsk, target, read,
2214 						!(flags & MSG_PEEK), &recv_data);
2215 		if (err < 0)
2216 			goto out;
2217 
2218 		if (read >= target || flags & MSG_PEEK)
2219 			break;
2220 
2221 		target -= read;
2222 	}
2223 
2224 	if (sk->sk_err)
2225 		err = -sk->sk_err;
2226 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
2227 		err = 0;
2228 
2229 	if (copied > 0)
2230 		err = copied;
2231 
2232 out:
2233 	return err;
2234 }
2235 
__vsock_seqpacket_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags)2236 static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg,
2237 				     size_t len, int flags)
2238 {
2239 	const struct vsock_transport *transport;
2240 	struct vsock_sock *vsk;
2241 	ssize_t msg_len;
2242 	long timeout;
2243 	int err = 0;
2244 	DEFINE_WAIT(wait);
2245 
2246 	vsk = vsock_sk(sk);
2247 	transport = vsk->transport;
2248 
2249 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2250 
2251 	err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0);
2252 	if (err <= 0)
2253 		goto out;
2254 
2255 	msg_len = transport->seqpacket_dequeue(vsk, msg, flags);
2256 
2257 	if (msg_len < 0) {
2258 		err = msg_len;
2259 		goto out;
2260 	}
2261 
2262 	if (sk->sk_err) {
2263 		err = -sk->sk_err;
2264 	} else if (sk->sk_shutdown & RCV_SHUTDOWN) {
2265 		err = 0;
2266 	} else {
2267 		/* User sets MSG_TRUNC, so return real length of
2268 		 * packet.
2269 		 */
2270 		if (flags & MSG_TRUNC)
2271 			err = msg_len;
2272 		else
2273 			err = len - msg_data_left(msg);
2274 
2275 		/* Always set MSG_TRUNC if real length of packet is
2276 		 * bigger than user's buffer.
2277 		 */
2278 		if (msg_len > len)
2279 			msg->msg_flags |= MSG_TRUNC;
2280 	}
2281 
2282 out:
2283 	return err;
2284 }
2285 
2286 int
__vsock_connectible_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)2287 __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2288 			    int flags)
2289 {
2290 	struct sock *sk;
2291 	struct vsock_sock *vsk;
2292 	const struct vsock_transport *transport;
2293 	int err;
2294 
2295 	sk = sock->sk;
2296 
2297 	if (unlikely(flags & MSG_ERRQUEUE))
2298 		return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR);
2299 
2300 	vsk = vsock_sk(sk);
2301 	err = 0;
2302 
2303 	lock_sock(sk);
2304 
2305 	transport = vsk->transport;
2306 
2307 	if (!transport || sk->sk_state != TCP_ESTABLISHED) {
2308 		/* Recvmsg is supposed to return 0 if a peer performs an
2309 		 * orderly shutdown. Differentiate between that case and when a
2310 		 * peer has not connected or a local shutdown occurred with the
2311 		 * SOCK_DONE flag.
2312 		 */
2313 		if (sock_flag(sk, SOCK_DONE))
2314 			err = 0;
2315 		else
2316 			err = -ENOTCONN;
2317 
2318 		goto out;
2319 	}
2320 
2321 	if (flags & MSG_OOB) {
2322 		err = -EOPNOTSUPP;
2323 		goto out;
2324 	}
2325 
2326 	/* We don't check peer_shutdown flag here since peer may actually shut
2327 	 * down, but there can be data in the queue that a local socket can
2328 	 * receive.
2329 	 */
2330 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
2331 		err = 0;
2332 		goto out;
2333 	}
2334 
2335 	/* It is valid on Linux to pass in a zero-length receive buffer.  This
2336 	 * is not an error.  We may as well bail out now.
2337 	 */
2338 	if (!len) {
2339 		err = 0;
2340 		goto out;
2341 	}
2342 
2343 	if (sk->sk_type == SOCK_STREAM)
2344 		err = __vsock_stream_recvmsg(sk, msg, len, flags);
2345 	else
2346 		err = __vsock_seqpacket_recvmsg(sk, msg, len, flags);
2347 
2348 out:
2349 	release_sock(sk);
2350 	return err;
2351 }
2352 
2353 int
vsock_connectible_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)2354 vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2355 			  int flags)
2356 {
2357 #ifdef CONFIG_BPF_SYSCALL
2358 	struct sock *sk = sock->sk;
2359 	const struct proto *prot;
2360 
2361 	prot = READ_ONCE(sk->sk_prot);
2362 	if (prot != &vsock_proto)
2363 		return prot->recvmsg(sk, msg, len, flags, NULL);
2364 #endif
2365 
2366 	return __vsock_connectible_recvmsg(sock, msg, len, flags);
2367 }
2368 EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
2369 
vsock_set_rcvlowat(struct sock * sk,int val)2370 static int vsock_set_rcvlowat(struct sock *sk, int val)
2371 {
2372 	const struct vsock_transport *transport;
2373 	struct vsock_sock *vsk;
2374 
2375 	vsk = vsock_sk(sk);
2376 
2377 	if (val > vsk->buffer_size)
2378 		return -EINVAL;
2379 
2380 	transport = vsk->transport;
2381 
2382 	if (transport && transport->notify_set_rcvlowat) {
2383 		int err;
2384 
2385 		err = transport->notify_set_rcvlowat(vsk, val);
2386 		if (err)
2387 			return err;
2388 	}
2389 
2390 	WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
2391 	return 0;
2392 }
2393 
2394 static const struct proto_ops vsock_stream_ops = {
2395 	.family = PF_VSOCK,
2396 	.owner = THIS_MODULE,
2397 	.release = vsock_release,
2398 	.bind = vsock_bind,
2399 	.connect = vsock_connect,
2400 	.socketpair = sock_no_socketpair,
2401 	.accept = vsock_accept,
2402 	.getname = vsock_getname,
2403 	.poll = vsock_poll,
2404 	.ioctl = vsock_ioctl,
2405 	.listen = vsock_listen,
2406 	.shutdown = vsock_shutdown,
2407 	.setsockopt = vsock_connectible_setsockopt,
2408 	.getsockopt = vsock_connectible_getsockopt,
2409 	.sendmsg = vsock_connectible_sendmsg,
2410 	.recvmsg = vsock_connectible_recvmsg,
2411 	.mmap = sock_no_mmap,
2412 	.set_rcvlowat = vsock_set_rcvlowat,
2413 	.read_skb = vsock_read_skb,
2414 };
2415 
2416 static const struct proto_ops vsock_seqpacket_ops = {
2417 	.family = PF_VSOCK,
2418 	.owner = THIS_MODULE,
2419 	.release = vsock_release,
2420 	.bind = vsock_bind,
2421 	.connect = vsock_connect,
2422 	.socketpair = sock_no_socketpair,
2423 	.accept = vsock_accept,
2424 	.getname = vsock_getname,
2425 	.poll = vsock_poll,
2426 	.ioctl = vsock_ioctl,
2427 	.listen = vsock_listen,
2428 	.shutdown = vsock_shutdown,
2429 	.setsockopt = vsock_connectible_setsockopt,
2430 	.getsockopt = vsock_connectible_getsockopt,
2431 	.sendmsg = vsock_connectible_sendmsg,
2432 	.recvmsg = vsock_connectible_recvmsg,
2433 	.mmap = sock_no_mmap,
2434 	.read_skb = vsock_read_skb,
2435 };
2436 
vsock_create(struct net * net,struct socket * sock,int protocol,int kern)2437 static int vsock_create(struct net *net, struct socket *sock,
2438 			int protocol, int kern)
2439 {
2440 	struct vsock_sock *vsk;
2441 	struct sock *sk;
2442 	int ret;
2443 
2444 	if (!sock)
2445 		return -EINVAL;
2446 
2447 	if (protocol && protocol != PF_VSOCK)
2448 		return -EPROTONOSUPPORT;
2449 
2450 	switch (sock->type) {
2451 	case SOCK_DGRAM:
2452 		sock->ops = &vsock_dgram_ops;
2453 		break;
2454 	case SOCK_STREAM:
2455 		sock->ops = &vsock_stream_ops;
2456 		break;
2457 	case SOCK_SEQPACKET:
2458 		sock->ops = &vsock_seqpacket_ops;
2459 		break;
2460 	default:
2461 		return -ESOCKTNOSUPPORT;
2462 	}
2463 
2464 	sock->state = SS_UNCONNECTED;
2465 
2466 	sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2467 	if (!sk)
2468 		return -ENOMEM;
2469 
2470 	vsk = vsock_sk(sk);
2471 
2472 	if (sock->type == SOCK_DGRAM) {
2473 		ret = vsock_assign_transport(vsk, NULL);
2474 		if (ret < 0) {
2475 			sock->sk = NULL;
2476 			sock_put(sk);
2477 			return ret;
2478 		}
2479 	}
2480 
2481 	/* SOCK_DGRAM doesn't have 'setsockopt' callback set in its
2482 	 * proto_ops, so there is no handler for custom logic.
2483 	 */
2484 	if (sock_type_connectible(sock->type))
2485 		set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
2486 
2487 	vsock_insert_unbound(vsk);
2488 
2489 	return 0;
2490 }
2491 
2492 static const struct net_proto_family vsock_family_ops = {
2493 	.family = AF_VSOCK,
2494 	.create = vsock_create,
2495 	.owner = THIS_MODULE,
2496 };
2497 
vsock_dev_do_ioctl(struct file * filp,unsigned int cmd,void __user * ptr)2498 static long vsock_dev_do_ioctl(struct file *filp,
2499 			       unsigned int cmd, void __user *ptr)
2500 {
2501 	u32 __user *p = ptr;
2502 	u32 cid = VMADDR_CID_ANY;
2503 	int retval = 0;
2504 
2505 	switch (cmd) {
2506 	case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
2507 		/* To be compatible with the VMCI behavior, we prioritize the
2508 		 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2509 		 */
2510 		if (transport_g2h)
2511 			cid = transport_g2h->get_local_cid();
2512 		else if (transport_h2g)
2513 			cid = transport_h2g->get_local_cid();
2514 
2515 		if (put_user(cid, p) != 0)
2516 			retval = -EFAULT;
2517 		break;
2518 
2519 	default:
2520 		retval = -ENOIOCTLCMD;
2521 	}
2522 
2523 	return retval;
2524 }
2525 
vsock_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2526 static long vsock_dev_ioctl(struct file *filp,
2527 			    unsigned int cmd, unsigned long arg)
2528 {
2529 	return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2530 }
2531 
2532 #ifdef CONFIG_COMPAT
vsock_dev_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2533 static long vsock_dev_compat_ioctl(struct file *filp,
2534 				   unsigned int cmd, unsigned long arg)
2535 {
2536 	return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2537 }
2538 #endif
2539 
2540 static const struct file_operations vsock_device_ops = {
2541 	.owner		= THIS_MODULE,
2542 	.unlocked_ioctl	= vsock_dev_ioctl,
2543 #ifdef CONFIG_COMPAT
2544 	.compat_ioctl	= vsock_dev_compat_ioctl,
2545 #endif
2546 	.open		= nonseekable_open,
2547 };
2548 
2549 static struct miscdevice vsock_device = {
2550 	.name		= "vsock",
2551 	.fops		= &vsock_device_ops,
2552 };
2553 
vsock_init(void)2554 static int __init vsock_init(void)
2555 {
2556 	int err = 0;
2557 
2558 	vsock_init_tables();
2559 
2560 	vsock_proto.owner = THIS_MODULE;
2561 	vsock_device.minor = MISC_DYNAMIC_MINOR;
2562 	err = misc_register(&vsock_device);
2563 	if (err) {
2564 		pr_err("Failed to register misc device\n");
2565 		goto err_reset_transport;
2566 	}
2567 
2568 	err = proto_register(&vsock_proto, 1);	/* we want our slab */
2569 	if (err) {
2570 		pr_err("Cannot register vsock protocol\n");
2571 		goto err_deregister_misc;
2572 	}
2573 
2574 	err = sock_register(&vsock_family_ops);
2575 	if (err) {
2576 		pr_err("could not register af_vsock (%d) address family: %d\n",
2577 		       AF_VSOCK, err);
2578 		goto err_unregister_proto;
2579 	}
2580 
2581 	vsock_bpf_build_proto();
2582 
2583 	return 0;
2584 
2585 err_unregister_proto:
2586 	proto_unregister(&vsock_proto);
2587 err_deregister_misc:
2588 	misc_deregister(&vsock_device);
2589 err_reset_transport:
2590 	return err;
2591 }
2592 
vsock_exit(void)2593 static void __exit vsock_exit(void)
2594 {
2595 	misc_deregister(&vsock_device);
2596 	sock_unregister(AF_VSOCK);
2597 	proto_unregister(&vsock_proto);
2598 }
2599 
vsock_core_get_transport(struct vsock_sock * vsk)2600 const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
2601 {
2602 	return vsk->transport;
2603 }
2604 EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2605 
vsock_core_register(const struct vsock_transport * t,int features)2606 int vsock_core_register(const struct vsock_transport *t, int features)
2607 {
2608 	const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
2609 	int err = mutex_lock_interruptible(&vsock_register_mutex);
2610 
2611 	if (err)
2612 		return err;
2613 
2614 	t_h2g = transport_h2g;
2615 	t_g2h = transport_g2h;
2616 	t_dgram = transport_dgram;
2617 	t_local = transport_local;
2618 
2619 	if (features & VSOCK_TRANSPORT_F_H2G) {
2620 		if (t_h2g) {
2621 			err = -EBUSY;
2622 			goto err_busy;
2623 		}
2624 		t_h2g = t;
2625 	}
2626 
2627 	if (features & VSOCK_TRANSPORT_F_G2H) {
2628 		if (t_g2h) {
2629 			err = -EBUSY;
2630 			goto err_busy;
2631 		}
2632 		t_g2h = t;
2633 	}
2634 
2635 	if (features & VSOCK_TRANSPORT_F_DGRAM) {
2636 		if (t_dgram) {
2637 			err = -EBUSY;
2638 			goto err_busy;
2639 		}
2640 		t_dgram = t;
2641 	}
2642 
2643 	if (features & VSOCK_TRANSPORT_F_LOCAL) {
2644 		if (t_local) {
2645 			err = -EBUSY;
2646 			goto err_busy;
2647 		}
2648 		t_local = t;
2649 	}
2650 
2651 	transport_h2g = t_h2g;
2652 	transport_g2h = t_g2h;
2653 	transport_dgram = t_dgram;
2654 	transport_local = t_local;
2655 
2656 err_busy:
2657 	mutex_unlock(&vsock_register_mutex);
2658 	return err;
2659 }
2660 EXPORT_SYMBOL_GPL(vsock_core_register);
2661 
vsock_core_unregister(const struct vsock_transport * t)2662 void vsock_core_unregister(const struct vsock_transport *t)
2663 {
2664 	mutex_lock(&vsock_register_mutex);
2665 
2666 	if (transport_h2g == t)
2667 		transport_h2g = NULL;
2668 
2669 	if (transport_g2h == t)
2670 		transport_g2h = NULL;
2671 
2672 	if (transport_dgram == t)
2673 		transport_dgram = NULL;
2674 
2675 	if (transport_local == t)
2676 		transport_local = NULL;
2677 
2678 	mutex_unlock(&vsock_register_mutex);
2679 }
2680 EXPORT_SYMBOL_GPL(vsock_core_unregister);
2681 
2682 module_init(vsock_init);
2683 module_exit(vsock_exit);
2684 
2685 MODULE_AUTHOR("VMware, Inc.");
2686 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2687 MODULE_VERSION("1.0.2.0-k");
2688 MODULE_LICENSE("GPL v2");
2689