xref: /linux/net/vmw_vsock/vmci_transport.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VMware vSockets Driver
4  *
5  * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6  */
7 
8 #include <linux/types.h>
9 #include <linux/bitops.h>
10 #include <linux/cred.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/kernel.h>
14 #include <linux/kmod.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/net.h>
19 #include <linux/poll.h>
20 #include <linux/skbuff.h>
21 #include <linux/smp.h>
22 #include <linux/socket.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/wait.h>
26 #include <linux/workqueue.h>
27 #include <net/sock.h>
28 #include <net/af_vsock.h>
29 
30 #include "vmci_transport_notify.h"
31 
32 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
33 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
34 static void vmci_transport_peer_detach_cb(u32 sub_id,
35 					  const struct vmci_event_data *ed,
36 					  void *client_data);
37 static void vmci_transport_recv_pkt_work(struct work_struct *work);
38 static void vmci_transport_cleanup(struct work_struct *work);
39 static int vmci_transport_recv_listen(struct sock *sk,
40 				      struct vmci_transport_packet *pkt);
41 static int vmci_transport_recv_connecting_server(
42 					struct sock *sk,
43 					struct sock *pending,
44 					struct vmci_transport_packet *pkt);
45 static int vmci_transport_recv_connecting_client(
46 					struct sock *sk,
47 					struct vmci_transport_packet *pkt);
48 static int vmci_transport_recv_connecting_client_negotiate(
49 					struct sock *sk,
50 					struct vmci_transport_packet *pkt);
51 static int vmci_transport_recv_connecting_client_invalid(
52 					struct sock *sk,
53 					struct vmci_transport_packet *pkt);
54 static int vmci_transport_recv_connected(struct sock *sk,
55 					 struct vmci_transport_packet *pkt);
56 static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
57 static u16 vmci_transport_new_proto_supported_versions(void);
58 static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
59 						  bool old_pkt_proto);
60 static bool vmci_check_transport(struct vsock_sock *vsk);
61 
62 struct vmci_transport_recv_pkt_info {
63 	struct work_struct work;
64 	struct sock *sk;
65 	struct vmci_transport_packet pkt;
66 };
67 
68 static LIST_HEAD(vmci_transport_cleanup_list);
69 static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
70 static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
71 
72 static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
73 							   VMCI_INVALID_ID };
74 static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
75 
76 static int PROTOCOL_OVERRIDE = -1;
77 
78 static struct vsock_transport vmci_transport; /* forward declaration */
79 
80 /* Helper function to convert from a VMCI error code to a VSock error code. */
81 
82 static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
83 {
84 	switch (vmci_error) {
85 	case VMCI_ERROR_NO_MEM:
86 		return -ENOMEM;
87 	case VMCI_ERROR_DUPLICATE_ENTRY:
88 	case VMCI_ERROR_ALREADY_EXISTS:
89 		return -EADDRINUSE;
90 	case VMCI_ERROR_NO_ACCESS:
91 		return -EPERM;
92 	case VMCI_ERROR_NO_RESOURCES:
93 		return -ENOBUFS;
94 	case VMCI_ERROR_INVALID_RESOURCE:
95 		return -EHOSTUNREACH;
96 	case VMCI_ERROR_INVALID_ARGS:
97 	default:
98 		break;
99 	}
100 	return -EINVAL;
101 }
102 
103 static u32 vmci_transport_peer_rid(u32 peer_cid)
104 {
105 	if (VMADDR_CID_HYPERVISOR == peer_cid)
106 		return VMCI_TRANSPORT_HYPERVISOR_PACKET_RID;
107 
108 	return VMCI_TRANSPORT_PACKET_RID;
109 }
110 
111 static inline void
112 vmci_transport_packet_init(struct vmci_transport_packet *pkt,
113 			   struct sockaddr_vm *src,
114 			   struct sockaddr_vm *dst,
115 			   u8 type,
116 			   u64 size,
117 			   u64 mode,
118 			   struct vmci_transport_waiting_info *wait,
119 			   u16 proto,
120 			   struct vmci_handle handle)
121 {
122 	memset(pkt, 0, sizeof(*pkt));
123 
124 	/* We register the stream control handler as an any cid handle so we
125 	 * must always send from a source address of VMADDR_CID_ANY
126 	 */
127 	pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
128 				       VMCI_TRANSPORT_PACKET_RID);
129 	pkt->dg.dst = vmci_make_handle(dst->svm_cid,
130 				       vmci_transport_peer_rid(dst->svm_cid));
131 	pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
132 	pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
133 	pkt->type = type;
134 	pkt->src_port = src->svm_port;
135 	pkt->dst_port = dst->svm_port;
136 
137 	switch (pkt->type) {
138 	case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
139 		pkt->u.size = 0;
140 		break;
141 
142 	case VMCI_TRANSPORT_PACKET_TYPE_REQUEST:
143 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
144 		pkt->u.size = size;
145 		break;
146 
147 	case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
148 	case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
149 		pkt->u.handle = handle;
150 		break;
151 
152 	case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
153 	case VMCI_TRANSPORT_PACKET_TYPE_READ:
154 	case VMCI_TRANSPORT_PACKET_TYPE_RST:
155 		pkt->u.size = 0;
156 		break;
157 
158 	case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
159 		pkt->u.mode = mode;
160 		break;
161 
162 	case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
163 	case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
164 		pkt->u.wait = *wait;
165 		break;
166 
167 	case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2:
168 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
169 		pkt->u.size = size;
170 		pkt->proto = proto;
171 		break;
172 	}
173 }
174 
175 static inline void
176 vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
177 				    struct sockaddr_vm *local,
178 				    struct sockaddr_vm *remote)
179 {
180 	vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
181 	vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
182 }
183 
184 static int
185 __vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
186 				  struct sockaddr_vm *src,
187 				  struct sockaddr_vm *dst,
188 				  enum vmci_transport_packet_type type,
189 				  u64 size,
190 				  u64 mode,
191 				  struct vmci_transport_waiting_info *wait,
192 				  u16 proto,
193 				  struct vmci_handle handle,
194 				  bool convert_error)
195 {
196 	int err;
197 
198 	vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
199 				   proto, handle);
200 	err = vmci_datagram_send(&pkt->dg);
201 	if (convert_error && (err < 0))
202 		return vmci_transport_error_to_vsock_error(err);
203 
204 	return err;
205 }
206 
207 static int
208 vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
209 				      enum vmci_transport_packet_type type,
210 				      u64 size,
211 				      u64 mode,
212 				      struct vmci_transport_waiting_info *wait,
213 				      struct vmci_handle handle)
214 {
215 	struct vmci_transport_packet reply;
216 	struct sockaddr_vm src, dst;
217 
218 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
219 		return 0;
220 	} else {
221 		vmci_transport_packet_get_addresses(pkt, &src, &dst);
222 		return __vmci_transport_send_control_pkt(&reply, &src, &dst,
223 							 type,
224 							 size, mode, wait,
225 							 VSOCK_PROTO_INVALID,
226 							 handle, true);
227 	}
228 }
229 
230 static int
231 vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
232 				   struct sockaddr_vm *dst,
233 				   enum vmci_transport_packet_type type,
234 				   u64 size,
235 				   u64 mode,
236 				   struct vmci_transport_waiting_info *wait,
237 				   struct vmci_handle handle)
238 {
239 	/* Note that it is safe to use a single packet across all CPUs since
240 	 * two tasklets of the same type are guaranteed to not ever run
241 	 * simultaneously. If that ever changes, or VMCI stops using tasklets,
242 	 * we can use per-cpu packets.
243 	 */
244 	static struct vmci_transport_packet pkt;
245 
246 	return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
247 						 size, mode, wait,
248 						 VSOCK_PROTO_INVALID, handle,
249 						 false);
250 }
251 
252 static int
253 vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
254 				      struct sockaddr_vm *dst,
255 				      enum vmci_transport_packet_type type,
256 				      u64 size,
257 				      u64 mode,
258 				      struct vmci_transport_waiting_info *wait,
259 				      u16 proto,
260 				      struct vmci_handle handle)
261 {
262 	struct vmci_transport_packet *pkt;
263 	int err;
264 
265 	pkt = kmalloc_obj(*pkt);
266 	if (!pkt)
267 		return -ENOMEM;
268 
269 	err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
270 						mode, wait, proto, handle,
271 						true);
272 	kfree(pkt);
273 
274 	return err;
275 }
276 
277 static int
278 vmci_transport_send_control_pkt(struct sock *sk,
279 				enum vmci_transport_packet_type type,
280 				u64 size,
281 				u64 mode,
282 				struct vmci_transport_waiting_info *wait,
283 				u16 proto,
284 				struct vmci_handle handle)
285 {
286 	struct vsock_sock *vsk;
287 
288 	vsk = vsock_sk(sk);
289 
290 	if (!vsock_addr_bound(&vsk->local_addr))
291 		return -EINVAL;
292 
293 	if (!vsock_addr_bound(&vsk->remote_addr))
294 		return -EINVAL;
295 
296 	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
297 						     &vsk->remote_addr,
298 						     type, size, mode,
299 						     wait, proto, handle);
300 }
301 
302 static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
303 					struct sockaddr_vm *src,
304 					struct vmci_transport_packet *pkt)
305 {
306 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
307 		return 0;
308 	return vmci_transport_send_control_pkt_bh(
309 					dst, src,
310 					VMCI_TRANSPORT_PACKET_TYPE_RST, 0,
311 					0, NULL, VMCI_INVALID_HANDLE);
312 }
313 
314 static int vmci_transport_send_reset(struct sock *sk,
315 				     struct vmci_transport_packet *pkt)
316 {
317 	struct sockaddr_vm *dst_ptr;
318 	struct sockaddr_vm dst;
319 	struct vsock_sock *vsk;
320 
321 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
322 		return 0;
323 
324 	vsk = vsock_sk(sk);
325 
326 	if (!vsock_addr_bound(&vsk->local_addr))
327 		return -EINVAL;
328 
329 	if (vsock_addr_bound(&vsk->remote_addr)) {
330 		dst_ptr = &vsk->remote_addr;
331 	} else {
332 		vsock_addr_init(&dst, pkt->dg.src.context,
333 				pkt->src_port);
334 		dst_ptr = &dst;
335 	}
336 	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
337 					     VMCI_TRANSPORT_PACKET_TYPE_RST,
338 					     0, 0, NULL, VSOCK_PROTO_INVALID,
339 					     VMCI_INVALID_HANDLE);
340 }
341 
342 static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
343 {
344 	return vmci_transport_send_control_pkt(
345 					sk,
346 					VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
347 					size, 0, NULL,
348 					VSOCK_PROTO_INVALID,
349 					VMCI_INVALID_HANDLE);
350 }
351 
352 static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
353 					  u16 version)
354 {
355 	return vmci_transport_send_control_pkt(
356 					sk,
357 					VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
358 					size, 0, NULL, version,
359 					VMCI_INVALID_HANDLE);
360 }
361 
362 static int vmci_transport_send_qp_offer(struct sock *sk,
363 					struct vmci_handle handle)
364 {
365 	return vmci_transport_send_control_pkt(
366 					sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
367 					0, NULL,
368 					VSOCK_PROTO_INVALID, handle);
369 }
370 
371 static int vmci_transport_send_attach(struct sock *sk,
372 				      struct vmci_handle handle)
373 {
374 	return vmci_transport_send_control_pkt(
375 					sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
376 					0, 0, NULL, VSOCK_PROTO_INVALID,
377 					handle);
378 }
379 
380 static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
381 {
382 	return vmci_transport_reply_control_pkt_fast(
383 						pkt,
384 						VMCI_TRANSPORT_PACKET_TYPE_RST,
385 						0, 0, NULL,
386 						VMCI_INVALID_HANDLE);
387 }
388 
389 static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst,
390 					  struct sockaddr_vm *src)
391 {
392 	return vmci_transport_send_control_pkt_bh(
393 					dst, src,
394 					VMCI_TRANSPORT_PACKET_TYPE_INVALID,
395 					0, 0, NULL, VMCI_INVALID_HANDLE);
396 }
397 
398 int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
399 				 struct sockaddr_vm *src)
400 {
401 	return vmci_transport_send_control_pkt_bh(
402 					dst, src,
403 					VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
404 					0, NULL, VMCI_INVALID_HANDLE);
405 }
406 
407 int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
408 				struct sockaddr_vm *src)
409 {
410 	return vmci_transport_send_control_pkt_bh(
411 					dst, src,
412 					VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
413 					0, NULL, VMCI_INVALID_HANDLE);
414 }
415 
416 int vmci_transport_send_wrote(struct sock *sk)
417 {
418 	return vmci_transport_send_control_pkt(
419 					sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
420 					0, NULL, VSOCK_PROTO_INVALID,
421 					VMCI_INVALID_HANDLE);
422 }
423 
424 int vmci_transport_send_read(struct sock *sk)
425 {
426 	return vmci_transport_send_control_pkt(
427 					sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
428 					0, NULL, VSOCK_PROTO_INVALID,
429 					VMCI_INVALID_HANDLE);
430 }
431 
432 int vmci_transport_send_waiting_write(struct sock *sk,
433 				      struct vmci_transport_waiting_info *wait)
434 {
435 	return vmci_transport_send_control_pkt(
436 				sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
437 				0, 0, wait, VSOCK_PROTO_INVALID,
438 				VMCI_INVALID_HANDLE);
439 }
440 
441 int vmci_transport_send_waiting_read(struct sock *sk,
442 				     struct vmci_transport_waiting_info *wait)
443 {
444 	return vmci_transport_send_control_pkt(
445 				sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
446 				0, 0, wait, VSOCK_PROTO_INVALID,
447 				VMCI_INVALID_HANDLE);
448 }
449 
450 static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode)
451 {
452 	return vmci_transport_send_control_pkt(
453 					&vsk->sk,
454 					VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
455 					0, mode, NULL,
456 					VSOCK_PROTO_INVALID,
457 					VMCI_INVALID_HANDLE);
458 }
459 
460 static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
461 {
462 	return vmci_transport_send_control_pkt(sk,
463 					VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
464 					size, 0, NULL,
465 					VSOCK_PROTO_INVALID,
466 					VMCI_INVALID_HANDLE);
467 }
468 
469 static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
470 					     u16 version)
471 {
472 	return vmci_transport_send_control_pkt(
473 					sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
474 					size, 0, NULL, version,
475 					VMCI_INVALID_HANDLE);
476 }
477 
478 static struct sock *vmci_transport_get_pending(
479 					struct sock *listener,
480 					struct vmci_transport_packet *pkt)
481 {
482 	struct vsock_sock *vlistener;
483 	struct vsock_sock *vpending;
484 	struct sock *pending;
485 	struct sockaddr_vm src;
486 
487 	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
488 
489 	vlistener = vsock_sk(listener);
490 
491 	list_for_each_entry(vpending, &vlistener->pending_links,
492 			    pending_links) {
493 		if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
494 		    pkt->dst_port == vpending->local_addr.svm_port) {
495 			pending = sk_vsock(vpending);
496 			sock_hold(pending);
497 			goto found;
498 		}
499 	}
500 
501 	pending = NULL;
502 found:
503 	return pending;
504 
505 }
506 
507 static void vmci_transport_release_pending(struct sock *pending)
508 {
509 	sock_put(pending);
510 }
511 
512 /* We allow two kinds of sockets to communicate with a restricted VM: 1)
513  * trusted sockets 2) sockets from applications running as the same user as the
514  * VM (this is only true for the host side and only when using hosted products)
515  */
516 
517 static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
518 {
519 	return vsock->trusted ||
520 	       vmci_is_context_owner(peer_cid, vsock->owner->uid);
521 }
522 
523 /* We allow sending datagrams to and receiving datagrams from a restricted VM
524  * only if it is trusted as described in vmci_transport_is_trusted.
525  */
526 
527 static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
528 {
529 	if (VMADDR_CID_HYPERVISOR == peer_cid)
530 		return true;
531 
532 	if (vsock->cached_peer != peer_cid) {
533 		vsock->cached_peer = peer_cid;
534 		if (!vmci_transport_is_trusted(vsock, peer_cid) &&
535 		    (vmci_context_get_priv_flags(peer_cid) &
536 		     VMCI_PRIVILEGE_FLAG_RESTRICTED)) {
537 			vsock->cached_peer_allow_dgram = false;
538 		} else {
539 			vsock->cached_peer_allow_dgram = true;
540 		}
541 	}
542 
543 	return vsock->cached_peer_allow_dgram;
544 }
545 
546 static int
547 vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
548 				struct vmci_handle *handle,
549 				u64 produce_size,
550 				u64 consume_size,
551 				u32 peer, u32 flags, bool trusted)
552 {
553 	int err = 0;
554 
555 	if (trusted) {
556 		/* Try to allocate our queue pair as trusted. This will only
557 		 * work if vsock is running in the host.
558 		 */
559 
560 		err = vmci_qpair_alloc(qpair, handle, produce_size,
561 				       consume_size,
562 				       peer, flags,
563 				       VMCI_PRIVILEGE_FLAG_TRUSTED);
564 		if (err != VMCI_ERROR_NO_ACCESS)
565 			goto out;
566 
567 	}
568 
569 	err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size,
570 			       peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
571 out:
572 	if (err < 0) {
573 		pr_err_once("Could not attach to queue pair with %d\n", err);
574 		err = vmci_transport_error_to_vsock_error(err);
575 	}
576 
577 	return err;
578 }
579 
580 static int
581 vmci_transport_datagram_create_hnd(u32 resource_id,
582 				   u32 flags,
583 				   vmci_datagram_recv_cb recv_cb,
584 				   void *client_data,
585 				   struct vmci_handle *out_handle)
586 {
587 	int err = 0;
588 
589 	/* Try to allocate our datagram handler as trusted. This will only work
590 	 * if vsock is running in the host.
591 	 */
592 
593 	err = vmci_datagram_create_handle_priv(resource_id, flags,
594 					       VMCI_PRIVILEGE_FLAG_TRUSTED,
595 					       recv_cb,
596 					       client_data, out_handle);
597 
598 	if (err == VMCI_ERROR_NO_ACCESS)
599 		err = vmci_datagram_create_handle(resource_id, flags,
600 						  recv_cb, client_data,
601 						  out_handle);
602 
603 	return err;
604 }
605 
606 /* This is invoked as part of a tasklet that's scheduled when the VMCI
607  * interrupt fires.  This is run in bottom-half context and if it ever needs to
608  * sleep it should defer that work to a work queue.
609  */
610 
611 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
612 {
613 	struct sock *sk;
614 	size_t size;
615 	struct sk_buff *skb;
616 	struct vsock_sock *vsk;
617 
618 	sk = (struct sock *)data;
619 
620 	/* This handler is privileged when this module is running on the host.
621 	 * We will get datagrams from all endpoints (even VMs that are in a
622 	 * restricted context). If we get one from a restricted context then
623 	 * the destination socket must be trusted.
624 	 *
625 	 * NOTE: We access the socket struct without holding the lock here.
626 	 * This is ok because the field we are interested is never modified
627 	 * outside of the create and destruct socket functions.
628 	 */
629 	vsk = vsock_sk(sk);
630 	if (!vmci_transport_allow_dgram(vsk, dg->src.context))
631 		return VMCI_ERROR_NO_ACCESS;
632 
633 	size = VMCI_DG_SIZE(dg);
634 
635 	/* Attach the packet to the socket's receive queue as an sk_buff. */
636 	skb = alloc_skb(size, GFP_ATOMIC);
637 	if (!skb)
638 		return VMCI_ERROR_NO_MEM;
639 
640 	/* sk_receive_skb() will do a sock_put(), so hold here. */
641 	sock_hold(sk);
642 	skb_put(skb, size);
643 	memcpy(skb->data, dg, size);
644 	sk_receive_skb(sk, skb, 0);
645 
646 	return VMCI_SUCCESS;
647 }
648 
649 static bool vmci_transport_stream_allow(struct vsock_sock *vsk, u32 cid,
650 					u32 port)
651 {
652 	static const u32 non_socket_contexts[] = {
653 		VMADDR_CID_LOCAL,
654 	};
655 	int i;
656 
657 	if (!vsock_net_mode_global(vsk))
658 		return false;
659 
660 	BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts));
661 
662 	for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) {
663 		if (cid == non_socket_contexts[i])
664 			return false;
665 	}
666 
667 	return true;
668 }
669 
670 /* This is invoked as part of a tasklet that's scheduled when the VMCI
671  * interrupt fires.  This is run in bottom-half context but it defers most of
672  * its work to the packet handling work queue.
673  */
674 
675 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
676 {
677 	struct sock *sk;
678 	struct sockaddr_vm dst;
679 	struct sockaddr_vm src;
680 	struct vmci_transport_packet *pkt;
681 	struct vsock_sock *vsk;
682 	bool bh_process_pkt;
683 	int err;
684 
685 	sk = NULL;
686 	err = VMCI_SUCCESS;
687 	bh_process_pkt = false;
688 
689 	/* Ignore incoming packets from resources that aren't vsock
690 	 * implementations.
691 	 */
692 	if (vmci_transport_peer_rid(dg->src.context) != dg->src.resource)
693 		return VMCI_ERROR_NO_ACCESS;
694 
695 	if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
696 		/* Drop datagrams that do not contain full VSock packets. */
697 		return VMCI_ERROR_INVALID_ARGS;
698 
699 	pkt = (struct vmci_transport_packet *)dg;
700 
701 	/* Find the socket that should handle this packet.  First we look for a
702 	 * connected socket and if there is none we look for a socket bound to
703 	 * the destintation address.
704 	 */
705 	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
706 	vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
707 
708 	sk = vsock_find_connected_socket(&src, &dst);
709 	if (!sk) {
710 		sk = vsock_find_bound_socket(&dst);
711 		if (!sk) {
712 			/* We could not find a socket for this specified
713 			 * address.  If this packet is a RST, we just drop it.
714 			 * If it is another packet, we send a RST.  Note that
715 			 * we do not send a RST reply to RSTs so that we do not
716 			 * continually send RSTs between two endpoints.
717 			 *
718 			 * Note that since this is a reply, dst is src and src
719 			 * is dst.
720 			 */
721 			if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
722 				pr_err("unable to send reset\n");
723 
724 			err = VMCI_ERROR_NOT_FOUND;
725 			goto out;
726 		}
727 	}
728 
729 	/* If the received packet type is beyond all types known to this
730 	 * implementation, reply with an invalid message.  Hopefully this will
731 	 * help when implementing backwards compatibility in the future.
732 	 */
733 	if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
734 		vmci_transport_send_invalid_bh(&dst, &src);
735 		err = VMCI_ERROR_INVALID_ARGS;
736 		goto out;
737 	}
738 
739 	/* This handler is privileged when this module is running on the host.
740 	 * We will get datagram connect requests from all endpoints (even VMs
741 	 * that are in a restricted context). If we get one from a restricted
742 	 * context then the destination socket must be trusted.
743 	 *
744 	 * NOTE: We access the socket struct without holding the lock here.
745 	 * This is ok because the field we are interested is never modified
746 	 * outside of the create and destruct socket functions.
747 	 */
748 	vsk = vsock_sk(sk);
749 	if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
750 		err = VMCI_ERROR_NO_ACCESS;
751 		goto out;
752 	}
753 
754 	/* Ignore incoming packets from contexts without sockets. */
755 	if (!vmci_transport_stream_allow(vsk, dg->src.context, -1)) {
756 		err = VMCI_ERROR_NO_ACCESS;
757 		goto out;
758 	}
759 
760 	/* We do most everything in a work queue, but let's fast path the
761 	 * notification of reads and writes to help data transfer performance.
762 	 * We can only do this if there is no process context code executing
763 	 * for this socket since that may change the state.
764 	 */
765 	bh_lock_sock(sk);
766 
767 	if (!sock_owned_by_user(sk)) {
768 		/* The local context ID may be out of date, update it. */
769 		vsk->local_addr.svm_cid = dst.svm_cid;
770 
771 		if (sk->sk_state == TCP_ESTABLISHED)
772 			vmci_trans(vsk)->notify_ops->handle_notify_pkt(
773 					sk, pkt, true, &dst, &src,
774 					&bh_process_pkt);
775 	}
776 
777 	bh_unlock_sock(sk);
778 
779 	if (!bh_process_pkt) {
780 		struct vmci_transport_recv_pkt_info *recv_pkt_info;
781 
782 		recv_pkt_info = kmalloc_obj(*recv_pkt_info, GFP_ATOMIC);
783 		if (!recv_pkt_info) {
784 			if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
785 				pr_err("unable to send reset\n");
786 
787 			err = VMCI_ERROR_NO_MEM;
788 			goto out;
789 		}
790 
791 		recv_pkt_info->sk = sk;
792 		memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
793 		INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
794 
795 		schedule_work(&recv_pkt_info->work);
796 		/* Clear sk so that the reference count incremented by one of
797 		 * the Find functions above is not decremented below.  We need
798 		 * that reference count for the packet handler we've scheduled
799 		 * to run.
800 		 */
801 		sk = NULL;
802 	}
803 
804 out:
805 	if (sk)
806 		sock_put(sk);
807 
808 	return err;
809 }
810 
811 static void vmci_transport_handle_detach(struct sock *sk)
812 {
813 	struct vsock_sock *vsk;
814 
815 	vsk = vsock_sk(sk);
816 	if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
817 		sock_set_flag(sk, SOCK_DONE);
818 
819 		/* On a detach the peer will not be sending or receiving
820 		 * anymore.
821 		 */
822 		vsk->peer_shutdown = SHUTDOWN_MASK;
823 
824 		/* We should not be sending anymore since the peer won't be
825 		 * there to receive, but we can still receive if there is data
826 		 * left in our consume queue. If the local endpoint is a host,
827 		 * we can't call vsock_stream_has_data, since that may block,
828 		 * but a host endpoint can't read data once the VM has
829 		 * detached, so there is no available data in that case.
830 		 */
831 		if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
832 		    vsock_stream_has_data(vsk) <= 0) {
833 			if (sk->sk_state == TCP_SYN_SENT) {
834 				/* The peer may detach from a queue pair while
835 				 * we are still in the connecting state, i.e.,
836 				 * if the peer VM is killed after attaching to
837 				 * a queue pair, but before we complete the
838 				 * handshake. In that case, we treat the detach
839 				 * event like a reset.
840 				 */
841 
842 				sk->sk_state = TCP_CLOSE;
843 				sk->sk_err = ECONNRESET;
844 				sk_error_report(sk);
845 				return;
846 			}
847 			sk->sk_state = TCP_CLOSE;
848 		}
849 		sk->sk_state_change(sk);
850 	}
851 }
852 
853 static void vmci_transport_peer_detach_cb(u32 sub_id,
854 					  const struct vmci_event_data *e_data,
855 					  void *client_data)
856 {
857 	struct vmci_transport *trans = client_data;
858 	const struct vmci_event_payload_qp *e_payload;
859 
860 	e_payload = vmci_event_data_const_payload(e_data);
861 
862 	/* XXX This is lame, we should provide a way to lookup sockets by
863 	 * qp_handle.
864 	 */
865 	if (vmci_handle_is_invalid(e_payload->handle) ||
866 	    !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
867 		return;
868 
869 	/* We don't ask for delayed CBs when we subscribe to this event (we
870 	 * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
871 	 * guarantees in that case about what context we might be running in,
872 	 * so it could be BH or process, blockable or non-blockable.  So we
873 	 * need to account for all possible contexts here.
874 	 */
875 	spin_lock_bh(&trans->lock);
876 	if (!trans->sk)
877 		goto out;
878 
879 	/* Apart from here, trans->lock is only grabbed as part of sk destruct,
880 	 * where trans->sk isn't locked.
881 	 */
882 	bh_lock_sock(trans->sk);
883 
884 	vmci_transport_handle_detach(trans->sk);
885 
886 	bh_unlock_sock(trans->sk);
887  out:
888 	spin_unlock_bh(&trans->lock);
889 }
890 
891 static void vmci_transport_qp_resumed_cb(u32 sub_id,
892 					 const struct vmci_event_data *e_data,
893 					 void *client_data)
894 {
895 	vsock_for_each_connected_socket(&vmci_transport,
896 					vmci_transport_handle_detach);
897 }
898 
899 static void vmci_transport_recv_pkt_work(struct work_struct *work)
900 {
901 	struct vmci_transport_recv_pkt_info *recv_pkt_info;
902 	struct vmci_transport_packet *pkt;
903 	struct sock *sk;
904 
905 	recv_pkt_info =
906 		container_of(work, struct vmci_transport_recv_pkt_info, work);
907 	sk = recv_pkt_info->sk;
908 	pkt = &recv_pkt_info->pkt;
909 
910 	lock_sock(sk);
911 
912 	/* The local context ID may be out of date. */
913 	vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
914 
915 	switch (sk->sk_state) {
916 	case TCP_LISTEN:
917 		vmci_transport_recv_listen(sk, pkt);
918 		break;
919 	case TCP_SYN_SENT:
920 		/* Processing of pending connections for servers goes through
921 		 * the listening socket, so see vmci_transport_recv_listen()
922 		 * for that path.
923 		 */
924 		vmci_transport_recv_connecting_client(sk, pkt);
925 		break;
926 	case TCP_ESTABLISHED:
927 		vmci_transport_recv_connected(sk, pkt);
928 		break;
929 	default:
930 		/* Because this function does not run in the same context as
931 		 * vmci_transport_recv_stream_cb it is possible that the
932 		 * socket has closed. We need to let the other side know or it
933 		 * could be sitting in a connect and hang forever. Send a
934 		 * reset to prevent that.
935 		 */
936 		vmci_transport_send_reset(sk, pkt);
937 		break;
938 	}
939 
940 	release_sock(sk);
941 	kfree(recv_pkt_info);
942 	/* Release reference obtained in the stream callback when we fetched
943 	 * this socket out of the bound or connected list.
944 	 */
945 	sock_put(sk);
946 }
947 
948 static int vmci_transport_recv_listen(struct sock *sk,
949 				      struct vmci_transport_packet *pkt)
950 {
951 	struct sock *pending;
952 	struct vsock_sock *vpending;
953 	int err;
954 	u64 qp_size;
955 	bool old_request = false;
956 	bool old_pkt_proto = false;
957 
958 	/* Because we are in the listen state, we could be receiving a packet
959 	 * for ourself or any previous connection requests that we received.
960 	 * If it's the latter, we try to find a socket in our list of pending
961 	 * connections and, if we do, call the appropriate handler for the
962 	 * state that socket is in.  Otherwise we try to service the
963 	 * connection request.
964 	 */
965 	pending = vmci_transport_get_pending(sk, pkt);
966 	if (pending) {
967 		lock_sock(pending);
968 
969 		/* The local context ID may be out of date. */
970 		vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
971 
972 		switch (pending->sk_state) {
973 		case TCP_SYN_SENT:
974 			err = vmci_transport_recv_connecting_server(sk,
975 								    pending,
976 								    pkt);
977 			break;
978 		default:
979 			vmci_transport_send_reset(pending, pkt);
980 			err = -EINVAL;
981 		}
982 
983 		if (err < 0)
984 			vsock_remove_pending(sk, pending);
985 
986 		release_sock(pending);
987 		vmci_transport_release_pending(pending);
988 
989 		return err;
990 	}
991 
992 	/* The listen state only accepts connection requests.  Reply with a
993 	 * reset unless we received a reset.
994 	 */
995 
996 	if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
997 	      pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
998 		vmci_transport_reply_reset(pkt);
999 		return -EINVAL;
1000 	}
1001 
1002 	if (pkt->u.size == 0) {
1003 		vmci_transport_reply_reset(pkt);
1004 		return -EINVAL;
1005 	}
1006 
1007 	/* If this socket can't accommodate this connection request, we send a
1008 	 * reset.  Otherwise we create and initialize a child socket and reply
1009 	 * with a connection negotiation.
1010 	 */
1011 	if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
1012 		vmci_transport_reply_reset(pkt);
1013 		return -ECONNREFUSED;
1014 	}
1015 
1016 	pending = vsock_create_connected(sk);
1017 	if (!pending) {
1018 		vmci_transport_send_reset(sk, pkt);
1019 		return -ENOMEM;
1020 	}
1021 
1022 	vpending = vsock_sk(pending);
1023 
1024 	vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
1025 			pkt->dst_port);
1026 	vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
1027 			pkt->src_port);
1028 
1029 	err = vsock_assign_transport(vpending, vsock_sk(sk));
1030 	/* Transport assigned (looking at remote_addr) must be the same
1031 	 * where we received the request.
1032 	 */
1033 	if (err || !vmci_check_transport(vpending)) {
1034 		vmci_transport_send_reset(sk, pkt);
1035 		sock_put(pending);
1036 		return err;
1037 	}
1038 
1039 	/* If the proposed size fits within our min/max, accept it. Otherwise
1040 	 * propose our own size.
1041 	 */
1042 	if (pkt->u.size >= vpending->buffer_min_size &&
1043 	    pkt->u.size <= vpending->buffer_max_size) {
1044 		qp_size = pkt->u.size;
1045 	} else {
1046 		qp_size = vpending->buffer_size;
1047 	}
1048 
1049 	/* Figure out if we are using old or new requests based on the
1050 	 * overrides pkt types sent by our peer.
1051 	 */
1052 	if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1053 		old_request = old_pkt_proto;
1054 	} else {
1055 		if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
1056 			old_request = true;
1057 		else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
1058 			old_request = false;
1059 
1060 	}
1061 
1062 	if (old_request) {
1063 		/* Handle a REQUEST (or override) */
1064 		u16 version = VSOCK_PROTO_INVALID;
1065 		if (vmci_transport_proto_to_notify_struct(
1066 			pending, &version, true))
1067 			err = vmci_transport_send_negotiate(pending, qp_size);
1068 		else
1069 			err = -EINVAL;
1070 
1071 	} else {
1072 		/* Handle a REQUEST2 (or override) */
1073 		int proto_int = pkt->proto;
1074 		int pos;
1075 		u16 active_proto_version = 0;
1076 
1077 		/* The list of possible protocols is the intersection of all
1078 		 * protocols the client supports ... plus all the protocols we
1079 		 * support.
1080 		 */
1081 		proto_int &= vmci_transport_new_proto_supported_versions();
1082 
1083 		/* We choose the highest possible protocol version and use that
1084 		 * one.
1085 		 */
1086 		pos = fls(proto_int);
1087 		if (pos) {
1088 			active_proto_version = (1 << (pos - 1));
1089 			if (vmci_transport_proto_to_notify_struct(
1090 				pending, &active_proto_version, false))
1091 				err = vmci_transport_send_negotiate2(pending,
1092 							qp_size,
1093 							active_proto_version);
1094 			else
1095 				err = -EINVAL;
1096 
1097 		} else {
1098 			err = -EINVAL;
1099 		}
1100 	}
1101 
1102 	if (err < 0) {
1103 		vmci_transport_send_reset(sk, pkt);
1104 		sock_put(pending);
1105 		err = vmci_transport_error_to_vsock_error(err);
1106 		goto out;
1107 	}
1108 
1109 	vsock_add_pending(sk, pending);
1110 	sk_acceptq_added(sk);
1111 
1112 	pending->sk_state = TCP_SYN_SENT;
1113 	vmci_trans(vpending)->produce_size =
1114 		vmci_trans(vpending)->consume_size = qp_size;
1115 	vpending->buffer_size = qp_size;
1116 
1117 	vmci_trans(vpending)->notify_ops->process_request(pending);
1118 
1119 	/* We might never receive another message for this socket and it's not
1120 	 * connected to any process, so we have to ensure it gets cleaned up
1121 	 * ourself.  Our delayed work function will take care of that.  Note
1122 	 * that we do not ever cancel this function since we have few
1123 	 * guarantees about its state when calling cancel_delayed_work().
1124 	 * Instead we hold a reference on the socket for that function and make
1125 	 * it capable of handling cases where it needs to do nothing but
1126 	 * release that reference.
1127 	 */
1128 	vpending->listener = sk;
1129 	sock_hold(sk);
1130 	sock_hold(pending);
1131 	schedule_delayed_work(&vpending->pending_work, HZ);
1132 
1133 out:
1134 	return err;
1135 }
1136 
1137 static int
1138 vmci_transport_recv_connecting_server(struct sock *listener,
1139 				      struct sock *pending,
1140 				      struct vmci_transport_packet *pkt)
1141 {
1142 	struct vsock_sock *vpending;
1143 	struct vmci_handle handle;
1144 	struct vmci_qp *qpair;
1145 	bool is_local;
1146 	u32 flags;
1147 	u32 detach_sub_id;
1148 	int err;
1149 	int skerr;
1150 
1151 	vpending = vsock_sk(pending);
1152 	detach_sub_id = VMCI_INVALID_ID;
1153 
1154 	switch (pkt->type) {
1155 	case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
1156 		if (vmci_handle_is_invalid(pkt->u.handle)) {
1157 			vmci_transport_send_reset(pending, pkt);
1158 			skerr = EPROTO;
1159 			err = -EINVAL;
1160 			goto destroy;
1161 		}
1162 		break;
1163 	default:
1164 		/* Close and cleanup the connection. */
1165 		vmci_transport_send_reset(pending, pkt);
1166 		skerr = EPROTO;
1167 		err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
1168 		goto destroy;
1169 	}
1170 
1171 	/* In order to complete the connection we need to attach to the offered
1172 	 * queue pair and send an attach notification.  We also subscribe to the
1173 	 * detach event so we know when our peer goes away, and we do that
1174 	 * before attaching so we don't miss an event.  If all this succeeds,
1175 	 * we update our state and wakeup anything waiting in accept() for a
1176 	 * connection.
1177 	 */
1178 
1179 	/* We don't care about attach since we ensure the other side has
1180 	 * attached by specifying the ATTACH_ONLY flag below.
1181 	 */
1182 	err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1183 				   vmci_transport_peer_detach_cb,
1184 				   vmci_trans(vpending), &detach_sub_id);
1185 	if (err < VMCI_SUCCESS) {
1186 		vmci_transport_send_reset(pending, pkt);
1187 		err = vmci_transport_error_to_vsock_error(err);
1188 		skerr = -err;
1189 		goto destroy;
1190 	}
1191 
1192 	vmci_trans(vpending)->detach_sub_id = detach_sub_id;
1193 
1194 	/* Now attach to the queue pair the client created. */
1195 	handle = pkt->u.handle;
1196 
1197 	/* vpending->local_addr always has a context id so we do not need to
1198 	 * worry about VMADDR_CID_ANY in this case.
1199 	 */
1200 	is_local =
1201 	    vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid;
1202 	flags = VMCI_QPFLAG_ATTACH_ONLY;
1203 	flags |= is_local ? VMCI_QPFLAG_LOCAL : 0;
1204 
1205 	err = vmci_transport_queue_pair_alloc(
1206 					&qpair,
1207 					&handle,
1208 					vmci_trans(vpending)->produce_size,
1209 					vmci_trans(vpending)->consume_size,
1210 					pkt->dg.src.context,
1211 					flags,
1212 					vmci_transport_is_trusted(
1213 						vpending,
1214 						vpending->remote_addr.svm_cid));
1215 	if (err < 0) {
1216 		vmci_transport_send_reset(pending, pkt);
1217 		skerr = -err;
1218 		goto destroy;
1219 	}
1220 
1221 	vmci_trans(vpending)->qp_handle = handle;
1222 	vmci_trans(vpending)->qpair = qpair;
1223 
1224 	/* When we send the attach message, we must be ready to handle incoming
1225 	 * control messages on the newly connected socket. So we move the
1226 	 * pending socket to the connected state before sending the attach
1227 	 * message. Otherwise, an incoming packet triggered by the attach being
1228 	 * received by the peer may be processed concurrently with what happens
1229 	 * below after sending the attach message, and that incoming packet
1230 	 * will find the listening socket instead of the (currently) pending
1231 	 * socket. Note that enqueueing the socket increments the reference
1232 	 * count, so even if a reset comes before the connection is accepted,
1233 	 * the socket will be valid until it is removed from the queue.
1234 	 *
1235 	 * If we fail sending the attach below, we remove the socket from the
1236 	 * connected list and move the socket to TCP_CLOSE before
1237 	 * releasing the lock, so a pending slow path processing of an incoming
1238 	 * packet will not see the socket in the connected state in that case.
1239 	 */
1240 	pending->sk_state = TCP_ESTABLISHED;
1241 
1242 	vsock_insert_connected(vpending);
1243 
1244 	/* Notify our peer of our attach. */
1245 	err = vmci_transport_send_attach(pending, handle);
1246 	if (err < 0) {
1247 		vsock_remove_connected(vpending);
1248 		pr_err("Could not send attach\n");
1249 		vmci_transport_send_reset(pending, pkt);
1250 		err = vmci_transport_error_to_vsock_error(err);
1251 		skerr = -err;
1252 		goto destroy;
1253 	}
1254 
1255 	/* We have a connection. Move the now connected socket from the
1256 	 * listener's pending list to the accept queue so callers of accept()
1257 	 * can find it.
1258 	 */
1259 	vsock_remove_pending(listener, pending);
1260 	vsock_enqueue_accept(listener, pending);
1261 
1262 	/* Callers of accept() will be waiting on the listening socket, not
1263 	 * the pending socket.
1264 	 */
1265 	listener->sk_data_ready(listener);
1266 
1267 	return 0;
1268 
1269 destroy:
1270 	pending->sk_err = skerr;
1271 	pending->sk_state = TCP_CLOSE;
1272 	/* As long as we drop our reference, all necessary cleanup will handle
1273 	 * when the cleanup function drops its reference and our destruct
1274 	 * implementation is called.  Note that since the listen handler will
1275 	 * remove pending from the pending list upon our failure, the cleanup
1276 	 * function won't drop the additional reference, which is why we do it
1277 	 * here.
1278 	 */
1279 	sock_put(pending);
1280 
1281 	return err;
1282 }
1283 
1284 static int
1285 vmci_transport_recv_connecting_client(struct sock *sk,
1286 				      struct vmci_transport_packet *pkt)
1287 {
1288 	struct vsock_sock *vsk;
1289 	int err;
1290 	int skerr;
1291 
1292 	vsk = vsock_sk(sk);
1293 
1294 	switch (pkt->type) {
1295 	case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
1296 		if (vmci_handle_is_invalid(pkt->u.handle) ||
1297 		    !vmci_handle_is_equal(pkt->u.handle,
1298 					  vmci_trans(vsk)->qp_handle)) {
1299 			skerr = EPROTO;
1300 			err = -EINVAL;
1301 			goto destroy;
1302 		}
1303 
1304 		/* Signify the socket is connected and wakeup the waiter in
1305 		 * connect(). Also place the socket in the connected table for
1306 		 * accounting (it can already be found since it's in the bound
1307 		 * table).
1308 		 */
1309 		sk->sk_state = TCP_ESTABLISHED;
1310 		sk->sk_socket->state = SS_CONNECTED;
1311 		vsock_insert_connected(vsk);
1312 		sk->sk_state_change(sk);
1313 
1314 		break;
1315 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
1316 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
1317 		if (pkt->u.size == 0
1318 		    || pkt->dg.src.context != vsk->remote_addr.svm_cid
1319 		    || pkt->src_port != vsk->remote_addr.svm_port
1320 		    || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)
1321 		    || vmci_trans(vsk)->qpair
1322 		    || vmci_trans(vsk)->produce_size != 0
1323 		    || vmci_trans(vsk)->consume_size != 0
1324 		    || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1325 			skerr = EPROTO;
1326 			err = -EINVAL;
1327 
1328 			goto destroy;
1329 		}
1330 
1331 		err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
1332 		if (err) {
1333 			skerr = -err;
1334 			goto destroy;
1335 		}
1336 
1337 		break;
1338 	case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
1339 		err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
1340 		if (err) {
1341 			skerr = -err;
1342 			goto destroy;
1343 		}
1344 
1345 		break;
1346 	case VMCI_TRANSPORT_PACKET_TYPE_RST:
1347 		/* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
1348 		 * continue processing here after they sent an INVALID packet.
1349 		 * This meant that we got a RST after the INVALID. We ignore a
1350 		 * RST after an INVALID. The common code doesn't send the RST
1351 		 * ... so we can hang if an old version of the common code
1352 		 * fails between getting a REQUEST and sending an OFFER back.
1353 		 * Not much we can do about it... except hope that it doesn't
1354 		 * happen.
1355 		 */
1356 		if (vsk->ignore_connecting_rst) {
1357 			vsk->ignore_connecting_rst = false;
1358 		} else {
1359 			skerr = ECONNRESET;
1360 			err = 0;
1361 			goto destroy;
1362 		}
1363 
1364 		break;
1365 	default:
1366 		/* Close and cleanup the connection. */
1367 		skerr = EPROTO;
1368 		err = -EINVAL;
1369 		goto destroy;
1370 	}
1371 
1372 	return 0;
1373 
1374 destroy:
1375 	vmci_transport_send_reset(sk, pkt);
1376 
1377 	sk->sk_state = TCP_CLOSE;
1378 	sk->sk_err = skerr;
1379 	sk_error_report(sk);
1380 	return err;
1381 }
1382 
1383 static int vmci_transport_recv_connecting_client_negotiate(
1384 					struct sock *sk,
1385 					struct vmci_transport_packet *pkt)
1386 {
1387 	int err;
1388 	struct vsock_sock *vsk;
1389 	struct vmci_handle handle;
1390 	struct vmci_qp *qpair;
1391 	u32 detach_sub_id;
1392 	bool is_local;
1393 	u32 flags;
1394 	bool old_proto = true;
1395 	bool old_pkt_proto;
1396 	u16 version;
1397 
1398 	vsk = vsock_sk(sk);
1399 	handle = VMCI_INVALID_HANDLE;
1400 	detach_sub_id = VMCI_INVALID_ID;
1401 
1402 	/* If we have gotten here then we should be past the point where old
1403 	 * linux vsock could have sent the bogus rst.
1404 	 */
1405 	vsk->sent_request = false;
1406 	vsk->ignore_connecting_rst = false;
1407 
1408 	/* Verify that we're OK with the proposed queue pair size */
1409 	if (pkt->u.size < vsk->buffer_min_size ||
1410 	    pkt->u.size > vsk->buffer_max_size) {
1411 		err = -EINVAL;
1412 		goto destroy;
1413 	}
1414 
1415 	/* At this point we know the CID the peer is using to talk to us. */
1416 
1417 	if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
1418 		vsk->local_addr.svm_cid = pkt->dg.dst.context;
1419 
1420 	/* Setup the notify ops to be the highest supported version that both
1421 	 * the server and the client support.
1422 	 */
1423 
1424 	if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1425 		old_proto = old_pkt_proto;
1426 	} else {
1427 		if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
1428 			old_proto = true;
1429 		else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
1430 			old_proto = false;
1431 
1432 	}
1433 
1434 	if (old_proto)
1435 		version = VSOCK_PROTO_INVALID;
1436 	else
1437 		version = pkt->proto;
1438 
1439 	if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
1440 		err = -EINVAL;
1441 		goto destroy;
1442 	}
1443 
1444 	/* Subscribe to detach events first.
1445 	 *
1446 	 * XXX We attach once for each queue pair created for now so it is easy
1447 	 * to find the socket (it's provided), but later we should only
1448 	 * subscribe once and add a way to lookup sockets by queue pair handle.
1449 	 */
1450 	err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1451 				   vmci_transport_peer_detach_cb,
1452 				   vmci_trans(vsk), &detach_sub_id);
1453 	if (err < VMCI_SUCCESS) {
1454 		err = vmci_transport_error_to_vsock_error(err);
1455 		goto destroy;
1456 	}
1457 
1458 	/* Make VMCI select the handle for us. */
1459 	handle = VMCI_INVALID_HANDLE;
1460 	is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
1461 	flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
1462 
1463 	err = vmci_transport_queue_pair_alloc(&qpair,
1464 					      &handle,
1465 					      pkt->u.size,
1466 					      pkt->u.size,
1467 					      vsk->remote_addr.svm_cid,
1468 					      flags,
1469 					      vmci_transport_is_trusted(
1470 						  vsk,
1471 						  vsk->
1472 						  remote_addr.svm_cid));
1473 	if (err < 0)
1474 		goto destroy;
1475 
1476 	err = vmci_transport_send_qp_offer(sk, handle);
1477 	if (err < 0) {
1478 		err = vmci_transport_error_to_vsock_error(err);
1479 		goto destroy;
1480 	}
1481 
1482 	vmci_trans(vsk)->qp_handle = handle;
1483 	vmci_trans(vsk)->qpair = qpair;
1484 
1485 	vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
1486 		pkt->u.size;
1487 
1488 	vmci_trans(vsk)->detach_sub_id = detach_sub_id;
1489 
1490 	vmci_trans(vsk)->notify_ops->process_negotiate(sk);
1491 
1492 	return 0;
1493 
1494 destroy:
1495 	if (detach_sub_id != VMCI_INVALID_ID)
1496 		vmci_event_unsubscribe(detach_sub_id);
1497 
1498 	if (!vmci_handle_is_invalid(handle))
1499 		vmci_qpair_detach(&qpair);
1500 
1501 	return err;
1502 }
1503 
1504 static int
1505 vmci_transport_recv_connecting_client_invalid(struct sock *sk,
1506 					      struct vmci_transport_packet *pkt)
1507 {
1508 	int err = 0;
1509 	struct vsock_sock *vsk = vsock_sk(sk);
1510 
1511 	if (vsk->sent_request) {
1512 		vsk->sent_request = false;
1513 		vsk->ignore_connecting_rst = true;
1514 
1515 		err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
1516 		if (err < 0)
1517 			err = vmci_transport_error_to_vsock_error(err);
1518 		else
1519 			err = 0;
1520 
1521 	}
1522 
1523 	return err;
1524 }
1525 
1526 static int vmci_transport_recv_connected(struct sock *sk,
1527 					 struct vmci_transport_packet *pkt)
1528 {
1529 	struct vsock_sock *vsk;
1530 	bool pkt_processed = false;
1531 
1532 	/* In cases where we are closing the connection, it's sufficient to
1533 	 * mark the state change (and maybe error) and wake up any waiting
1534 	 * threads. Since this is a connected socket, it's owned by a user
1535 	 * process and will be cleaned up when the failure is passed back on
1536 	 * the current or next system call.  Our system call implementations
1537 	 * must therefore check for error and state changes on entry and when
1538 	 * being awoken.
1539 	 */
1540 	switch (pkt->type) {
1541 	case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
1542 		if (pkt->u.mode) {
1543 			vsk = vsock_sk(sk);
1544 
1545 			vsk->peer_shutdown |= pkt->u.mode;
1546 			sk->sk_state_change(sk);
1547 		}
1548 		break;
1549 
1550 	case VMCI_TRANSPORT_PACKET_TYPE_RST:
1551 		vsk = vsock_sk(sk);
1552 		/* It is possible that we sent our peer a message (e.g a
1553 		 * WAITING_READ) right before we got notified that the peer had
1554 		 * detached. If that happens then we can get a RST pkt back
1555 		 * from our peer even though there is data available for us to
1556 		 * read. In that case, don't shutdown the socket completely but
1557 		 * instead allow the local client to finish reading data off
1558 		 * the queuepair. Always treat a RST pkt in connected mode like
1559 		 * a clean shutdown.
1560 		 */
1561 		sock_set_flag(sk, SOCK_DONE);
1562 		vsk->peer_shutdown = SHUTDOWN_MASK;
1563 		if (vsock_stream_has_data(vsk) <= 0)
1564 			sk->sk_state = TCP_CLOSING;
1565 
1566 		sk->sk_state_change(sk);
1567 		break;
1568 
1569 	default:
1570 		vsk = vsock_sk(sk);
1571 		vmci_trans(vsk)->notify_ops->handle_notify_pkt(
1572 				sk, pkt, false, NULL, NULL,
1573 				&pkt_processed);
1574 		if (!pkt_processed)
1575 			return -EINVAL;
1576 
1577 		break;
1578 	}
1579 
1580 	return 0;
1581 }
1582 
1583 static int vmci_transport_socket_init(struct vsock_sock *vsk,
1584 				      struct vsock_sock *psk)
1585 {
1586 	vsk->trans = kmalloc_obj(struct vmci_transport);
1587 	if (!vsk->trans)
1588 		return -ENOMEM;
1589 
1590 	vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1591 	vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1592 	vmci_trans(vsk)->qpair = NULL;
1593 	vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
1594 	vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
1595 	vmci_trans(vsk)->notify_ops = NULL;
1596 	INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
1597 	vmci_trans(vsk)->sk = &vsk->sk;
1598 	spin_lock_init(&vmci_trans(vsk)->lock);
1599 
1600 	return 0;
1601 }
1602 
1603 static void vmci_transport_free_resources(struct list_head *transport_list)
1604 {
1605 	while (!list_empty(transport_list)) {
1606 		struct vmci_transport *transport =
1607 		    list_first_entry(transport_list, struct vmci_transport,
1608 				     elem);
1609 		list_del(&transport->elem);
1610 
1611 		if (transport->detach_sub_id != VMCI_INVALID_ID) {
1612 			vmci_event_unsubscribe(transport->detach_sub_id);
1613 			transport->detach_sub_id = VMCI_INVALID_ID;
1614 		}
1615 
1616 		if (!vmci_handle_is_invalid(transport->qp_handle)) {
1617 			vmci_qpair_detach(&transport->qpair);
1618 			transport->qp_handle = VMCI_INVALID_HANDLE;
1619 			transport->produce_size = 0;
1620 			transport->consume_size = 0;
1621 		}
1622 
1623 		kfree(transport);
1624 	}
1625 }
1626 
1627 static void vmci_transport_cleanup(struct work_struct *work)
1628 {
1629 	LIST_HEAD(pending);
1630 
1631 	spin_lock_bh(&vmci_transport_cleanup_lock);
1632 	list_replace_init(&vmci_transport_cleanup_list, &pending);
1633 	spin_unlock_bh(&vmci_transport_cleanup_lock);
1634 	vmci_transport_free_resources(&pending);
1635 }
1636 
1637 static void vmci_transport_destruct(struct vsock_sock *vsk)
1638 {
1639 	/* transport can be NULL if we hit a failure at init() time */
1640 	if (!vmci_trans(vsk))
1641 		return;
1642 
1643 	/* Ensure that the detach callback doesn't use the sk/vsk
1644 	 * we are about to destruct.
1645 	 */
1646 	spin_lock_bh(&vmci_trans(vsk)->lock);
1647 	vmci_trans(vsk)->sk = NULL;
1648 	spin_unlock_bh(&vmci_trans(vsk)->lock);
1649 
1650 	if (vmci_trans(vsk)->notify_ops)
1651 		vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
1652 
1653 	spin_lock_bh(&vmci_transport_cleanup_lock);
1654 	list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
1655 	spin_unlock_bh(&vmci_transport_cleanup_lock);
1656 	schedule_work(&vmci_transport_cleanup_work);
1657 
1658 	vsk->trans = NULL;
1659 }
1660 
1661 static void vmci_transport_release(struct vsock_sock *vsk)
1662 {
1663 	vsock_remove_sock(vsk);
1664 
1665 	if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
1666 		vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
1667 		vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1668 	}
1669 }
1670 
1671 static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
1672 				     struct sockaddr_vm *addr)
1673 {
1674 	u32 port;
1675 	u32 flags;
1676 	int err;
1677 
1678 	/* VMCI will select a resource ID for us if we provide
1679 	 * VMCI_INVALID_ID.
1680 	 */
1681 	port = addr->svm_port == VMADDR_PORT_ANY ?
1682 			VMCI_INVALID_ID : addr->svm_port;
1683 
1684 	if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE))
1685 		return -EACCES;
1686 
1687 	flags = addr->svm_cid == VMADDR_CID_ANY ?
1688 				VMCI_FLAG_ANYCID_DG_HND : 0;
1689 
1690 	err = vmci_transport_datagram_create_hnd(port, flags,
1691 						 vmci_transport_recv_dgram_cb,
1692 						 &vsk->sk,
1693 						 &vmci_trans(vsk)->dg_handle);
1694 	if (err < VMCI_SUCCESS)
1695 		return vmci_transport_error_to_vsock_error(err);
1696 	vsock_addr_init(&vsk->local_addr, addr->svm_cid,
1697 			vmci_trans(vsk)->dg_handle.resource);
1698 
1699 	return 0;
1700 }
1701 
1702 static int vmci_transport_dgram_enqueue(
1703 	struct vsock_sock *vsk,
1704 	struct sockaddr_vm *remote_addr,
1705 	struct msghdr *msg,
1706 	size_t len)
1707 {
1708 	int err;
1709 	struct vmci_datagram *dg;
1710 
1711 	if (len > VMCI_MAX_DG_PAYLOAD_SIZE)
1712 		return -EMSGSIZE;
1713 
1714 	if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid))
1715 		return -EPERM;
1716 
1717 	/* Allocate a buffer for the user's message and our packet header. */
1718 	dg = kmalloc(len + sizeof(*dg), GFP_KERNEL);
1719 	if (!dg)
1720 		return -ENOMEM;
1721 
1722 	err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
1723 	if (err) {
1724 		kfree(dg);
1725 		return err;
1726 	}
1727 
1728 	dg->dst = vmci_make_handle(remote_addr->svm_cid,
1729 				   remote_addr->svm_port);
1730 	dg->src = vmci_make_handle(vsk->local_addr.svm_cid,
1731 				   vsk->local_addr.svm_port);
1732 	dg->payload_size = len;
1733 
1734 	err = vmci_datagram_send(dg);
1735 	kfree(dg);
1736 	if (err < 0)
1737 		return vmci_transport_error_to_vsock_error(err);
1738 
1739 	return err - sizeof(*dg);
1740 }
1741 
1742 static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1743 					struct msghdr *msg, size_t len,
1744 					int flags)
1745 {
1746 	int err;
1747 	struct vmci_datagram *dg;
1748 	size_t payload_len;
1749 	struct sk_buff *skb;
1750 
1751 	if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1752 		return -EOPNOTSUPP;
1753 
1754 	/* Retrieve the head sk_buff from the socket's receive queue. */
1755 	err = 0;
1756 	skb = skb_recv_datagram(&vsk->sk, flags, &err);
1757 	if (!skb)
1758 		return err;
1759 
1760 	dg = (struct vmci_datagram *)skb->data;
1761 	if (!dg)
1762 		/* err is 0, meaning we read zero bytes. */
1763 		goto out;
1764 
1765 	payload_len = dg->payload_size;
1766 	/* Ensure the sk_buff matches the payload size claimed in the packet. */
1767 	if (payload_len != skb->len - sizeof(*dg)) {
1768 		err = -EINVAL;
1769 		goto out;
1770 	}
1771 
1772 	if (payload_len > len) {
1773 		payload_len = len;
1774 		msg->msg_flags |= MSG_TRUNC;
1775 	}
1776 
1777 	/* Place the datagram payload in the user's iovec. */
1778 	err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len);
1779 	if (err)
1780 		goto out;
1781 
1782 	if (msg->msg_name) {
1783 		/* Provide the address of the sender. */
1784 		DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name);
1785 		vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
1786 		msg->msg_namelen = sizeof(*vm_addr);
1787 	}
1788 	err = payload_len;
1789 
1790 out:
1791 	skb_free_datagram(&vsk->sk, skb);
1792 	return err;
1793 }
1794 
1795 static bool vmci_transport_dgram_allow(struct vsock_sock *vsk, u32 cid,
1796 				       u32 port)
1797 {
1798 	if (!vsock_net_mode_global(vsk))
1799 		return false;
1800 
1801 	if (cid == VMADDR_CID_HYPERVISOR) {
1802 		/* Registrations of PBRPC Servers do not modify VMX/Hypervisor
1803 		 * state and are allowed.
1804 		 */
1805 		return port == VMCI_UNITY_PBRPC_REGISTER;
1806 	}
1807 
1808 	return true;
1809 }
1810 
1811 static int vmci_transport_connect(struct vsock_sock *vsk)
1812 {
1813 	int err;
1814 	bool old_pkt_proto = false;
1815 	struct sock *sk = &vsk->sk;
1816 
1817 	if (vmci_transport_old_proto_override(&old_pkt_proto) &&
1818 		old_pkt_proto) {
1819 		err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
1820 		if (err < 0) {
1821 			sk->sk_state = TCP_CLOSE;
1822 			return err;
1823 		}
1824 	} else {
1825 		int supported_proto_versions =
1826 			vmci_transport_new_proto_supported_versions();
1827 		err = vmci_transport_send_conn_request2(sk, vsk->buffer_size,
1828 				supported_proto_versions);
1829 		if (err < 0) {
1830 			sk->sk_state = TCP_CLOSE;
1831 			return err;
1832 		}
1833 
1834 		vsk->sent_request = true;
1835 	}
1836 
1837 	return err;
1838 }
1839 
1840 static ssize_t vmci_transport_stream_dequeue(
1841 	struct vsock_sock *vsk,
1842 	struct msghdr *msg,
1843 	size_t len,
1844 	int flags)
1845 {
1846 	ssize_t err;
1847 
1848 	if (flags & MSG_PEEK)
1849 		err = vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, len, 0);
1850 	else
1851 		err = vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, len, 0);
1852 
1853 	if (err < 0)
1854 		err = -ENOMEM;
1855 
1856 	return err;
1857 }
1858 
1859 static ssize_t vmci_transport_stream_enqueue(
1860 	struct vsock_sock *vsk,
1861 	struct msghdr *msg,
1862 	size_t len)
1863 {
1864 	ssize_t err;
1865 
1866 	err = vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
1867 	if (err < 0)
1868 		err = -ENOMEM;
1869 
1870 	return err;
1871 }
1872 
1873 static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
1874 {
1875 	return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair);
1876 }
1877 
1878 static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk)
1879 {
1880 	return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair);
1881 }
1882 
1883 static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk)
1884 {
1885 	return vmci_trans(vsk)->consume_size;
1886 }
1887 
1888 static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
1889 {
1890 	return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
1891 }
1892 
1893 static int vmci_transport_notify_poll_in(
1894 	struct vsock_sock *vsk,
1895 	size_t target,
1896 	bool *data_ready_now)
1897 {
1898 	return vmci_trans(vsk)->notify_ops->poll_in(
1899 			&vsk->sk, target, data_ready_now);
1900 }
1901 
1902 static int vmci_transport_notify_poll_out(
1903 	struct vsock_sock *vsk,
1904 	size_t target,
1905 	bool *space_available_now)
1906 {
1907 	return vmci_trans(vsk)->notify_ops->poll_out(
1908 			&vsk->sk, target, space_available_now);
1909 }
1910 
1911 static int vmci_transport_notify_recv_init(
1912 	struct vsock_sock *vsk,
1913 	size_t target,
1914 	struct vsock_transport_recv_notify_data *data)
1915 {
1916 	return vmci_trans(vsk)->notify_ops->recv_init(
1917 			&vsk->sk, target,
1918 			(struct vmci_transport_recv_notify_data *)data);
1919 }
1920 
1921 static int vmci_transport_notify_recv_pre_block(
1922 	struct vsock_sock *vsk,
1923 	size_t target,
1924 	struct vsock_transport_recv_notify_data *data)
1925 {
1926 	return vmci_trans(vsk)->notify_ops->recv_pre_block(
1927 			&vsk->sk, target,
1928 			(struct vmci_transport_recv_notify_data *)data);
1929 }
1930 
1931 static int vmci_transport_notify_recv_pre_dequeue(
1932 	struct vsock_sock *vsk,
1933 	size_t target,
1934 	struct vsock_transport_recv_notify_data *data)
1935 {
1936 	return vmci_trans(vsk)->notify_ops->recv_pre_dequeue(
1937 			&vsk->sk, target,
1938 			(struct vmci_transport_recv_notify_data *)data);
1939 }
1940 
1941 static int vmci_transport_notify_recv_post_dequeue(
1942 	struct vsock_sock *vsk,
1943 	size_t target,
1944 	ssize_t copied,
1945 	bool data_read,
1946 	struct vsock_transport_recv_notify_data *data)
1947 {
1948 	return vmci_trans(vsk)->notify_ops->recv_post_dequeue(
1949 			&vsk->sk, target, copied, data_read,
1950 			(struct vmci_transport_recv_notify_data *)data);
1951 }
1952 
1953 static int vmci_transport_notify_send_init(
1954 	struct vsock_sock *vsk,
1955 	struct vsock_transport_send_notify_data *data)
1956 {
1957 	return vmci_trans(vsk)->notify_ops->send_init(
1958 			&vsk->sk,
1959 			(struct vmci_transport_send_notify_data *)data);
1960 }
1961 
1962 static int vmci_transport_notify_send_pre_block(
1963 	struct vsock_sock *vsk,
1964 	struct vsock_transport_send_notify_data *data)
1965 {
1966 	return vmci_trans(vsk)->notify_ops->send_pre_block(
1967 			&vsk->sk,
1968 			(struct vmci_transport_send_notify_data *)data);
1969 }
1970 
1971 static int vmci_transport_notify_send_pre_enqueue(
1972 	struct vsock_sock *vsk,
1973 	struct vsock_transport_send_notify_data *data)
1974 {
1975 	return vmci_trans(vsk)->notify_ops->send_pre_enqueue(
1976 			&vsk->sk,
1977 			(struct vmci_transport_send_notify_data *)data);
1978 }
1979 
1980 static int vmci_transport_notify_send_post_enqueue(
1981 	struct vsock_sock *vsk,
1982 	ssize_t written,
1983 	struct vsock_transport_send_notify_data *data)
1984 {
1985 	return vmci_trans(vsk)->notify_ops->send_post_enqueue(
1986 			&vsk->sk, written,
1987 			(struct vmci_transport_send_notify_data *)data);
1988 }
1989 
1990 static bool vmci_transport_old_proto_override(bool *old_pkt_proto)
1991 {
1992 	if (PROTOCOL_OVERRIDE != -1) {
1993 		if (PROTOCOL_OVERRIDE == 0)
1994 			*old_pkt_proto = true;
1995 		else
1996 			*old_pkt_proto = false;
1997 
1998 		pr_info("Proto override in use\n");
1999 		return true;
2000 	}
2001 
2002 	return false;
2003 }
2004 
2005 static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
2006 						  u16 *proto,
2007 						  bool old_pkt_proto)
2008 {
2009 	struct vsock_sock *vsk = vsock_sk(sk);
2010 
2011 	if (old_pkt_proto) {
2012 		if (*proto != VSOCK_PROTO_INVALID) {
2013 			pr_err("Can't set both an old and new protocol\n");
2014 			return false;
2015 		}
2016 		vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops;
2017 		goto exit;
2018 	}
2019 
2020 	switch (*proto) {
2021 	case VSOCK_PROTO_PKT_ON_NOTIFY:
2022 		vmci_trans(vsk)->notify_ops =
2023 			&vmci_transport_notify_pkt_q_state_ops;
2024 		break;
2025 	default:
2026 		pr_err("Unknown notify protocol version\n");
2027 		return false;
2028 	}
2029 
2030 exit:
2031 	vmci_trans(vsk)->notify_ops->socket_init(sk);
2032 	return true;
2033 }
2034 
2035 static u16 vmci_transport_new_proto_supported_versions(void)
2036 {
2037 	if (PROTOCOL_OVERRIDE != -1)
2038 		return PROTOCOL_OVERRIDE;
2039 
2040 	return VSOCK_PROTO_ALL_SUPPORTED;
2041 }
2042 
2043 static u32 vmci_transport_get_local_cid(void)
2044 {
2045 	return vmci_get_context_id();
2046 }
2047 
2048 static struct vsock_transport vmci_transport = {
2049 	.module = THIS_MODULE,
2050 	.init = vmci_transport_socket_init,
2051 	.destruct = vmci_transport_destruct,
2052 	.release = vmci_transport_release,
2053 	.connect = vmci_transport_connect,
2054 	.dgram_bind = vmci_transport_dgram_bind,
2055 	.dgram_dequeue = vmci_transport_dgram_dequeue,
2056 	.dgram_enqueue = vmci_transport_dgram_enqueue,
2057 	.dgram_allow = vmci_transport_dgram_allow,
2058 	.stream_dequeue = vmci_transport_stream_dequeue,
2059 	.stream_enqueue = vmci_transport_stream_enqueue,
2060 	.stream_has_data = vmci_transport_stream_has_data,
2061 	.stream_has_space = vmci_transport_stream_has_space,
2062 	.stream_rcvhiwat = vmci_transport_stream_rcvhiwat,
2063 	.stream_is_active = vmci_transport_stream_is_active,
2064 	.stream_allow = vmci_transport_stream_allow,
2065 	.notify_poll_in = vmci_transport_notify_poll_in,
2066 	.notify_poll_out = vmci_transport_notify_poll_out,
2067 	.notify_recv_init = vmci_transport_notify_recv_init,
2068 	.notify_recv_pre_block = vmci_transport_notify_recv_pre_block,
2069 	.notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue,
2070 	.notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue,
2071 	.notify_send_init = vmci_transport_notify_send_init,
2072 	.notify_send_pre_block = vmci_transport_notify_send_pre_block,
2073 	.notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
2074 	.notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
2075 	.shutdown = vmci_transport_shutdown,
2076 	.get_local_cid = vmci_transport_get_local_cid,
2077 };
2078 
2079 static bool vmci_check_transport(struct vsock_sock *vsk)
2080 {
2081 	return vsk->transport == &vmci_transport;
2082 }
2083 
2084 static void vmci_vsock_transport_cb(bool is_host)
2085 {
2086 	int features;
2087 
2088 	if (is_host)
2089 		features = VSOCK_TRANSPORT_F_H2G;
2090 	else
2091 		features = VSOCK_TRANSPORT_F_G2H;
2092 
2093 	vsock_core_register(&vmci_transport, features);
2094 }
2095 
2096 static int __init vmci_transport_init(void)
2097 {
2098 	int err;
2099 
2100 	/* Create the datagram handle that we will use to send and receive all
2101 	 * VSocket control messages for this context.
2102 	 */
2103 	err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID,
2104 						 VMCI_FLAG_ANYCID_DG_HND,
2105 						 vmci_transport_recv_stream_cb,
2106 						 NULL,
2107 						 &vmci_transport_stream_handle);
2108 	if (err < VMCI_SUCCESS) {
2109 		pr_err("Unable to create datagram handle. (%d)\n", err);
2110 		return vmci_transport_error_to_vsock_error(err);
2111 	}
2112 	err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
2113 				   vmci_transport_qp_resumed_cb,
2114 				   NULL, &vmci_transport_qp_resumed_sub_id);
2115 	if (err < VMCI_SUCCESS) {
2116 		pr_err("Unable to subscribe to resumed event. (%d)\n", err);
2117 		err = vmci_transport_error_to_vsock_error(err);
2118 		vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2119 		goto err_destroy_stream_handle;
2120 	}
2121 
2122 	/* Register only with dgram feature, other features (H2G, G2H) will be
2123 	 * registered when the first host or guest becomes active.
2124 	 */
2125 	err = vsock_core_register(&vmci_transport, VSOCK_TRANSPORT_F_DGRAM);
2126 	if (err < 0)
2127 		goto err_unsubscribe;
2128 
2129 	err = vmci_register_vsock_callback(vmci_vsock_transport_cb);
2130 	if (err < 0)
2131 		goto err_unregister;
2132 
2133 	return 0;
2134 
2135 err_unregister:
2136 	vsock_core_unregister(&vmci_transport);
2137 err_unsubscribe:
2138 	vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2139 err_destroy_stream_handle:
2140 	vmci_datagram_destroy_handle(vmci_transport_stream_handle);
2141 	return err;
2142 }
2143 module_init(vmci_transport_init);
2144 
2145 static void __exit vmci_transport_exit(void)
2146 {
2147 	cancel_work_sync(&vmci_transport_cleanup_work);
2148 	vmci_transport_free_resources(&vmci_transport_cleanup_list);
2149 
2150 	if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
2151 		if (vmci_datagram_destroy_handle(
2152 			vmci_transport_stream_handle) != VMCI_SUCCESS)
2153 			pr_err("Couldn't destroy datagram handle\n");
2154 		vmci_transport_stream_handle = VMCI_INVALID_HANDLE;
2155 	}
2156 
2157 	if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) {
2158 		vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2159 		vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2160 	}
2161 
2162 	vmci_register_vsock_callback(NULL);
2163 	vsock_core_unregister(&vmci_transport);
2164 }
2165 module_exit(vmci_transport_exit);
2166 
2167 MODULE_AUTHOR("VMware, Inc.");
2168 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2169 MODULE_VERSION("1.0.5.0-k");
2170 MODULE_LICENSE("GPL v2");
2171 MODULE_ALIAS("vmware_vsock");
2172 MODULE_ALIAS_NETPROTO(PF_VSOCK);
2173