xref: /linux/drivers/vhost/vsock.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vhost transport for vsock
4  *
5  * Copyright (C) 2013-2015 Red Hat, Inc.
6  * Author: Asias He <asias@redhat.com>
7  *         Stefan Hajnoczi <stefanha@redhat.com>
8  */
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
14 #include <net/sock.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
18 
19 #include <net/af_vsock.h>
20 #include "vhost.h"
21 
22 #define VHOST_VSOCK_DEFAULT_HOST_CID	2
23 /* Max number of bytes transferred before requeueing the job.
24  * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27  * Using this limit prevents one virtqueue from starving others with
28  * small pkts.
29  */
30 #define VHOST_VSOCK_PKT_WEIGHT 256
31 
32 enum {
33 	VHOST_VSOCK_FEATURES = VHOST_FEATURES |
34 			       (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
35 			       (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
36 };
37 
38 enum {
39 	VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
40 };
41 
42 /* Used to track all the vhost_vsock instances on the system. */
43 static DEFINE_MUTEX(vhost_vsock_mutex);
44 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
45 
46 struct vhost_vsock {
47 	struct vhost_dev dev;
48 	struct vhost_virtqueue vqs[2];
49 
50 	/* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
51 	struct hlist_node hash;
52 
53 	struct vhost_work send_pkt_work;
54 	struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
55 
56 	atomic_t queued_replies;
57 
58 	u32 guest_cid;
59 	bool seqpacket_allow;
60 };
61 
62 static u32 vhost_transport_get_local_cid(void)
63 {
64 	return VHOST_VSOCK_DEFAULT_HOST_CID;
65 }
66 
67 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
68  * RCU read lock.
69  */
70 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
71 {
72 	struct vhost_vsock *vsock;
73 
74 	hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
75 		u32 other_cid = vsock->guest_cid;
76 
77 		/* Skip instances that have no CID yet */
78 		if (other_cid == 0)
79 			continue;
80 
81 		if (other_cid == guest_cid)
82 			return vsock;
83 
84 	}
85 
86 	return NULL;
87 }
88 
89 static void
90 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
91 			    struct vhost_virtqueue *vq)
92 {
93 	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
94 	int pkts = 0, total_len = 0;
95 	bool added = false;
96 	bool restart_tx = false;
97 
98 	mutex_lock(&vq->mutex);
99 
100 	if (!vhost_vq_get_backend(vq))
101 		goto out;
102 
103 	if (!vq_meta_prefetch(vq))
104 		goto out;
105 
106 	/* Avoid further vmexits, we're already processing the virtqueue */
107 	vhost_disable_notify(&vsock->dev, vq);
108 
109 	do {
110 		struct virtio_vsock_hdr *hdr;
111 		size_t iov_len, payload_len;
112 		struct iov_iter iov_iter;
113 		u32 flags_to_restore = 0;
114 		struct sk_buff *skb;
115 		unsigned out, in;
116 		size_t nbytes;
117 		u32 offset;
118 		int head;
119 
120 		skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
121 
122 		if (!skb) {
123 			vhost_enable_notify(&vsock->dev, vq);
124 			break;
125 		}
126 
127 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
128 					 &out, &in, NULL, NULL);
129 		if (head < 0) {
130 			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
131 			break;
132 		}
133 
134 		if (head == vq->num) {
135 			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
136 			/* We cannot finish yet if more buffers snuck in while
137 			 * re-enabling notify.
138 			 */
139 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
140 				vhost_disable_notify(&vsock->dev, vq);
141 				continue;
142 			}
143 			break;
144 		}
145 
146 		if (out) {
147 			kfree_skb(skb);
148 			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
149 			break;
150 		}
151 
152 		iov_len = iov_length(&vq->iov[out], in);
153 		if (iov_len < sizeof(*hdr)) {
154 			kfree_skb(skb);
155 			vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
156 			break;
157 		}
158 
159 		iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
160 		offset = VIRTIO_VSOCK_SKB_CB(skb)->offset;
161 		payload_len = skb->len - offset;
162 		hdr = virtio_vsock_hdr(skb);
163 
164 		/* If the packet is greater than the space available in the
165 		 * buffer, we split it using multiple buffers.
166 		 */
167 		if (payload_len > iov_len - sizeof(*hdr)) {
168 			payload_len = iov_len - sizeof(*hdr);
169 
170 			/* As we are copying pieces of large packet's buffer to
171 			 * small rx buffers, headers of packets in rx queue are
172 			 * created dynamically and are initialized with header
173 			 * of current packet(except length). But in case of
174 			 * SOCK_SEQPACKET, we also must clear message delimeter
175 			 * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
176 			 * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
177 			 * there will be sequence of packets with these
178 			 * bits set. After initialized header will be copied to
179 			 * rx buffer, these required bits will be restored.
180 			 */
181 			if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
182 				hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
183 				flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
184 
185 				if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
186 					hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
187 					flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
188 				}
189 			}
190 		}
191 
192 		/* Set the correct length in the header */
193 		hdr->len = cpu_to_le32(payload_len);
194 
195 		nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
196 		if (nbytes != sizeof(*hdr)) {
197 			kfree_skb(skb);
198 			vq_err(vq, "Faulted on copying pkt hdr\n");
199 			break;
200 		}
201 
202 		if (skb_copy_datagram_iter(skb,
203 					   offset,
204 					   &iov_iter,
205 					   payload_len)) {
206 			kfree_skb(skb);
207 			vq_err(vq, "Faulted on copying pkt buf\n");
208 			break;
209 		}
210 
211 		/* Deliver to monitoring devices all packets that we
212 		 * will transmit.
213 		 */
214 		virtio_transport_deliver_tap_pkt(skb);
215 
216 		vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
217 		added = true;
218 
219 		VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len;
220 		total_len += payload_len;
221 
222 		/* If we didn't send all the payload we can requeue the packet
223 		 * to send it with the next available buffer.
224 		 */
225 		if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) {
226 			hdr->flags |= cpu_to_le32(flags_to_restore);
227 
228 			/* We are queueing the same skb to handle
229 			 * the remaining bytes, and we want to deliver it
230 			 * to monitoring devices in the next iteration.
231 			 */
232 			virtio_vsock_skb_clear_tap_delivered(skb);
233 			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
234 		} else {
235 			if (virtio_vsock_skb_reply(skb)) {
236 				int val;
237 
238 				val = atomic_dec_return(&vsock->queued_replies);
239 
240 				/* Do we have resources to resume tx
241 				 * processing?
242 				 */
243 				if (val + 1 == tx_vq->num)
244 					restart_tx = true;
245 			}
246 
247 			consume_skb(skb);
248 		}
249 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
250 	if (added)
251 		vhost_signal(&vsock->dev, vq);
252 
253 out:
254 	mutex_unlock(&vq->mutex);
255 
256 	if (restart_tx)
257 		vhost_poll_queue(&tx_vq->poll);
258 }
259 
260 static void vhost_transport_send_pkt_work(struct vhost_work *work)
261 {
262 	struct vhost_virtqueue *vq;
263 	struct vhost_vsock *vsock;
264 
265 	vsock = container_of(work, struct vhost_vsock, send_pkt_work);
266 	vq = &vsock->vqs[VSOCK_VQ_RX];
267 
268 	vhost_transport_do_send_pkt(vsock, vq);
269 }
270 
271 static int
272 vhost_transport_send_pkt(struct sk_buff *skb)
273 {
274 	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
275 	struct vhost_vsock *vsock;
276 	int len = skb->len;
277 
278 	rcu_read_lock();
279 
280 	/* Find the vhost_vsock according to guest context id  */
281 	vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
282 	if (!vsock) {
283 		rcu_read_unlock();
284 		kfree_skb(skb);
285 		return -ENODEV;
286 	}
287 
288 	if (virtio_vsock_skb_reply(skb))
289 		atomic_inc(&vsock->queued_replies);
290 
291 	virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
292 	vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
293 
294 	rcu_read_unlock();
295 	return len;
296 }
297 
298 static int
299 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
300 {
301 	struct vhost_vsock *vsock;
302 	int cnt = 0;
303 	int ret = -ENODEV;
304 
305 	rcu_read_lock();
306 
307 	/* Find the vhost_vsock according to guest context id  */
308 	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
309 	if (!vsock)
310 		goto out;
311 
312 	cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
313 
314 	if (cnt) {
315 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
316 		int new_cnt;
317 
318 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
319 		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
320 			vhost_poll_queue(&tx_vq->poll);
321 	}
322 
323 	ret = 0;
324 out:
325 	rcu_read_unlock();
326 	return ret;
327 }
328 
329 static struct sk_buff *
330 vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
331 		      unsigned int out, unsigned int in)
332 {
333 	struct virtio_vsock_hdr *hdr;
334 	struct iov_iter iov_iter;
335 	struct sk_buff *skb;
336 	size_t payload_len;
337 	size_t nbytes;
338 	size_t len;
339 
340 	if (in != 0) {
341 		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
342 		return NULL;
343 	}
344 
345 	len = iov_length(vq->iov, out);
346 
347 	/* len contains both payload and hdr */
348 	skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
349 	if (!skb)
350 		return NULL;
351 
352 	iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
353 
354 	hdr = virtio_vsock_hdr(skb);
355 	nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
356 	if (nbytes != sizeof(*hdr)) {
357 		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
358 		       sizeof(*hdr), nbytes);
359 		kfree_skb(skb);
360 		return NULL;
361 	}
362 
363 	payload_len = le32_to_cpu(hdr->len);
364 
365 	/* No payload */
366 	if (!payload_len)
367 		return skb;
368 
369 	/* The pkt is too big or the length in the header is invalid */
370 	if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
371 	    payload_len + sizeof(*hdr) > len) {
372 		kfree_skb(skb);
373 		return NULL;
374 	}
375 
376 	virtio_vsock_skb_rx_put(skb);
377 
378 	nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
379 	if (nbytes != payload_len) {
380 		vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
381 		       payload_len, nbytes);
382 		kfree_skb(skb);
383 		return NULL;
384 	}
385 
386 	return skb;
387 }
388 
389 /* Is there space left for replies to rx packets? */
390 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
391 {
392 	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
393 	int val;
394 
395 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
396 	val = atomic_read(&vsock->queued_replies);
397 
398 	return val < vq->num;
399 }
400 
401 static bool vhost_transport_msgzerocopy_allow(void)
402 {
403 	return true;
404 }
405 
406 static bool vhost_transport_seqpacket_allow(u32 remote_cid);
407 
408 static struct virtio_transport vhost_transport = {
409 	.transport = {
410 		.module                   = THIS_MODULE,
411 
412 		.get_local_cid            = vhost_transport_get_local_cid,
413 
414 		.init                     = virtio_transport_do_socket_init,
415 		.destruct                 = virtio_transport_destruct,
416 		.release                  = virtio_transport_release,
417 		.connect                  = virtio_transport_connect,
418 		.shutdown                 = virtio_transport_shutdown,
419 		.cancel_pkt               = vhost_transport_cancel_pkt,
420 
421 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
422 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
423 		.dgram_bind               = virtio_transport_dgram_bind,
424 		.dgram_allow              = virtio_transport_dgram_allow,
425 
426 		.stream_enqueue           = virtio_transport_stream_enqueue,
427 		.stream_dequeue           = virtio_transport_stream_dequeue,
428 		.stream_has_data          = virtio_transport_stream_has_data,
429 		.stream_has_space         = virtio_transport_stream_has_space,
430 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
431 		.stream_is_active         = virtio_transport_stream_is_active,
432 		.stream_allow             = virtio_transport_stream_allow,
433 
434 		.seqpacket_dequeue        = virtio_transport_seqpacket_dequeue,
435 		.seqpacket_enqueue        = virtio_transport_seqpacket_enqueue,
436 		.seqpacket_allow          = vhost_transport_seqpacket_allow,
437 		.seqpacket_has_data       = virtio_transport_seqpacket_has_data,
438 
439 		.msgzerocopy_allow        = vhost_transport_msgzerocopy_allow,
440 
441 		.notify_poll_in           = virtio_transport_notify_poll_in,
442 		.notify_poll_out          = virtio_transport_notify_poll_out,
443 		.notify_recv_init         = virtio_transport_notify_recv_init,
444 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
445 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
446 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
447 		.notify_send_init         = virtio_transport_notify_send_init,
448 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
449 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
450 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
451 		.notify_buffer_size       = virtio_transport_notify_buffer_size,
452 		.notify_set_rcvlowat      = virtio_transport_notify_set_rcvlowat,
453 
454 		.read_skb = virtio_transport_read_skb,
455 	},
456 
457 	.send_pkt = vhost_transport_send_pkt,
458 };
459 
460 static bool vhost_transport_seqpacket_allow(u32 remote_cid)
461 {
462 	struct vhost_vsock *vsock;
463 	bool seqpacket_allow = false;
464 
465 	rcu_read_lock();
466 	vsock = vhost_vsock_get(remote_cid);
467 
468 	if (vsock)
469 		seqpacket_allow = vsock->seqpacket_allow;
470 
471 	rcu_read_unlock();
472 
473 	return seqpacket_allow;
474 }
475 
476 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
477 {
478 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
479 						  poll.work);
480 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
481 						 dev);
482 	int head, pkts = 0, total_len = 0;
483 	unsigned int out, in;
484 	struct sk_buff *skb;
485 	bool added = false;
486 
487 	mutex_lock(&vq->mutex);
488 
489 	if (!vhost_vq_get_backend(vq))
490 		goto out;
491 
492 	if (!vq_meta_prefetch(vq))
493 		goto out;
494 
495 	vhost_disable_notify(&vsock->dev, vq);
496 	do {
497 		struct virtio_vsock_hdr *hdr;
498 
499 		if (!vhost_vsock_more_replies(vsock)) {
500 			/* Stop tx until the device processes already
501 			 * pending replies.  Leave tx virtqueue
502 			 * callbacks disabled.
503 			 */
504 			goto no_more_replies;
505 		}
506 
507 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
508 					 &out, &in, NULL, NULL);
509 		if (head < 0)
510 			break;
511 
512 		if (head == vq->num) {
513 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
514 				vhost_disable_notify(&vsock->dev, vq);
515 				continue;
516 			}
517 			break;
518 		}
519 
520 		skb = vhost_vsock_alloc_skb(vq, out, in);
521 		if (!skb) {
522 			vq_err(vq, "Faulted on pkt\n");
523 			continue;
524 		}
525 
526 		total_len += sizeof(*hdr) + skb->len;
527 
528 		/* Deliver to monitoring devices all received packets */
529 		virtio_transport_deliver_tap_pkt(skb);
530 
531 		hdr = virtio_vsock_hdr(skb);
532 
533 		/* Only accept correctly addressed packets */
534 		if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
535 		    le64_to_cpu(hdr->dst_cid) ==
536 		    vhost_transport_get_local_cid())
537 			virtio_transport_recv_pkt(&vhost_transport, skb);
538 		else
539 			kfree_skb(skb);
540 
541 		vhost_add_used(vq, head, 0);
542 		added = true;
543 	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
544 
545 no_more_replies:
546 	if (added)
547 		vhost_signal(&vsock->dev, vq);
548 
549 out:
550 	mutex_unlock(&vq->mutex);
551 }
552 
553 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
554 {
555 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
556 						poll.work);
557 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
558 						 dev);
559 
560 	vhost_transport_do_send_pkt(vsock, vq);
561 }
562 
563 static int vhost_vsock_start(struct vhost_vsock *vsock)
564 {
565 	struct vhost_virtqueue *vq;
566 	size_t i;
567 	int ret;
568 
569 	mutex_lock(&vsock->dev.mutex);
570 
571 	ret = vhost_dev_check_owner(&vsock->dev);
572 	if (ret)
573 		goto err;
574 
575 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
576 		vq = &vsock->vqs[i];
577 
578 		mutex_lock(&vq->mutex);
579 
580 		if (!vhost_vq_access_ok(vq)) {
581 			ret = -EFAULT;
582 			goto err_vq;
583 		}
584 
585 		if (!vhost_vq_get_backend(vq)) {
586 			vhost_vq_set_backend(vq, vsock);
587 			ret = vhost_vq_init_access(vq);
588 			if (ret)
589 				goto err_vq;
590 		}
591 
592 		mutex_unlock(&vq->mutex);
593 	}
594 
595 	/* Some packets may have been queued before the device was started,
596 	 * let's kick the send worker to send them.
597 	 */
598 	vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
599 
600 	mutex_unlock(&vsock->dev.mutex);
601 	return 0;
602 
603 err_vq:
604 	vhost_vq_set_backend(vq, NULL);
605 	mutex_unlock(&vq->mutex);
606 
607 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
608 		vq = &vsock->vqs[i];
609 
610 		mutex_lock(&vq->mutex);
611 		vhost_vq_set_backend(vq, NULL);
612 		mutex_unlock(&vq->mutex);
613 	}
614 err:
615 	mutex_unlock(&vsock->dev.mutex);
616 	return ret;
617 }
618 
619 static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
620 {
621 	size_t i;
622 	int ret = 0;
623 
624 	mutex_lock(&vsock->dev.mutex);
625 
626 	if (check_owner) {
627 		ret = vhost_dev_check_owner(&vsock->dev);
628 		if (ret)
629 			goto err;
630 	}
631 
632 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
633 		struct vhost_virtqueue *vq = &vsock->vqs[i];
634 
635 		mutex_lock(&vq->mutex);
636 		vhost_vq_set_backend(vq, NULL);
637 		mutex_unlock(&vq->mutex);
638 	}
639 
640 err:
641 	mutex_unlock(&vsock->dev.mutex);
642 	return ret;
643 }
644 
645 static void vhost_vsock_free(struct vhost_vsock *vsock)
646 {
647 	kvfree(vsock);
648 }
649 
650 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
651 {
652 	struct vhost_virtqueue **vqs;
653 	struct vhost_vsock *vsock;
654 	int ret;
655 
656 	/* This struct is large and allocation could fail, fall back to vmalloc
657 	 * if there is no other way.
658 	 */
659 	vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
660 	if (!vsock)
661 		return -ENOMEM;
662 
663 	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
664 	if (!vqs) {
665 		ret = -ENOMEM;
666 		goto out;
667 	}
668 
669 	vsock->guest_cid = 0; /* no CID assigned yet */
670 	vsock->seqpacket_allow = false;
671 
672 	atomic_set(&vsock->queued_replies, 0);
673 
674 	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
675 	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
676 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
677 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
678 
679 	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
680 		       UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
681 		       VHOST_VSOCK_WEIGHT, true, NULL);
682 
683 	file->private_data = vsock;
684 	skb_queue_head_init(&vsock->send_pkt_queue);
685 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
686 	return 0;
687 
688 out:
689 	vhost_vsock_free(vsock);
690 	return ret;
691 }
692 
693 static void vhost_vsock_flush(struct vhost_vsock *vsock)
694 {
695 	vhost_dev_flush(&vsock->dev);
696 }
697 
698 static void vhost_vsock_reset_orphans(struct sock *sk)
699 {
700 	struct vsock_sock *vsk = vsock_sk(sk);
701 
702 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
703 	 * under vsock_table_lock so the sock cannot disappear while we're
704 	 * executing.
705 	 */
706 
707 	/* If the peer is still valid, no need to reset connection */
708 	if (vhost_vsock_get(vsk->remote_addr.svm_cid))
709 		return;
710 
711 	/* If the close timeout is pending, let it expire.  This avoids races
712 	 * with the timeout callback.
713 	 */
714 	if (vsk->close_work_scheduled)
715 		return;
716 
717 	sock_set_flag(sk, SOCK_DONE);
718 	vsk->peer_shutdown = SHUTDOWN_MASK;
719 	sk->sk_state = SS_UNCONNECTED;
720 	sk->sk_err = ECONNRESET;
721 	sk_error_report(sk);
722 }
723 
724 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
725 {
726 	struct vhost_vsock *vsock = file->private_data;
727 
728 	mutex_lock(&vhost_vsock_mutex);
729 	if (vsock->guest_cid)
730 		hash_del_rcu(&vsock->hash);
731 	mutex_unlock(&vhost_vsock_mutex);
732 
733 	/* Wait for other CPUs to finish using vsock */
734 	synchronize_rcu();
735 
736 	/* Iterating over all connections for all CIDs to find orphans is
737 	 * inefficient.  Room for improvement here. */
738 	vsock_for_each_connected_socket(&vhost_transport.transport,
739 					vhost_vsock_reset_orphans);
740 
741 	/* Don't check the owner, because we are in the release path, so we
742 	 * need to stop the vsock device in any case.
743 	 * vhost_vsock_stop() can not fail in this case, so we don't need to
744 	 * check the return code.
745 	 */
746 	vhost_vsock_stop(vsock, false);
747 	vhost_vsock_flush(vsock);
748 	vhost_dev_stop(&vsock->dev);
749 
750 	virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
751 
752 	vhost_dev_cleanup(&vsock->dev);
753 	kfree(vsock->dev.vqs);
754 	vhost_vsock_free(vsock);
755 	return 0;
756 }
757 
758 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
759 {
760 	struct vhost_vsock *other;
761 
762 	/* Refuse reserved CIDs */
763 	if (guest_cid <= VMADDR_CID_HOST ||
764 	    guest_cid == U32_MAX)
765 		return -EINVAL;
766 
767 	/* 64-bit CIDs are not yet supported */
768 	if (guest_cid > U32_MAX)
769 		return -EINVAL;
770 
771 	/* Refuse if CID is assigned to the guest->host transport (i.e. nested
772 	 * VM), to make the loopback work.
773 	 */
774 	if (vsock_find_cid(guest_cid))
775 		return -EADDRINUSE;
776 
777 	/* Refuse if CID is already in use */
778 	mutex_lock(&vhost_vsock_mutex);
779 	other = vhost_vsock_get(guest_cid);
780 	if (other && other != vsock) {
781 		mutex_unlock(&vhost_vsock_mutex);
782 		return -EADDRINUSE;
783 	}
784 
785 	if (vsock->guest_cid)
786 		hash_del_rcu(&vsock->hash);
787 
788 	vsock->guest_cid = guest_cid;
789 	hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
790 	mutex_unlock(&vhost_vsock_mutex);
791 
792 	return 0;
793 }
794 
795 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
796 {
797 	struct vhost_virtqueue *vq;
798 	int i;
799 
800 	if (features & ~VHOST_VSOCK_FEATURES)
801 		return -EOPNOTSUPP;
802 
803 	mutex_lock(&vsock->dev.mutex);
804 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
805 	    !vhost_log_access_ok(&vsock->dev)) {
806 		goto err;
807 	}
808 
809 	if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
810 		if (vhost_init_device_iotlb(&vsock->dev))
811 			goto err;
812 	}
813 
814 	vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET);
815 
816 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
817 		vq = &vsock->vqs[i];
818 		mutex_lock(&vq->mutex);
819 		vq->acked_features = features;
820 		mutex_unlock(&vq->mutex);
821 	}
822 	mutex_unlock(&vsock->dev.mutex);
823 	return 0;
824 
825 err:
826 	mutex_unlock(&vsock->dev.mutex);
827 	return -EFAULT;
828 }
829 
830 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
831 				  unsigned long arg)
832 {
833 	struct vhost_vsock *vsock = f->private_data;
834 	void __user *argp = (void __user *)arg;
835 	u64 guest_cid;
836 	u64 features;
837 	int start;
838 	int r;
839 
840 	switch (ioctl) {
841 	case VHOST_VSOCK_SET_GUEST_CID:
842 		if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
843 			return -EFAULT;
844 		return vhost_vsock_set_cid(vsock, guest_cid);
845 	case VHOST_VSOCK_SET_RUNNING:
846 		if (copy_from_user(&start, argp, sizeof(start)))
847 			return -EFAULT;
848 		if (start)
849 			return vhost_vsock_start(vsock);
850 		else
851 			return vhost_vsock_stop(vsock, true);
852 	case VHOST_GET_FEATURES:
853 		features = VHOST_VSOCK_FEATURES;
854 		if (copy_to_user(argp, &features, sizeof(features)))
855 			return -EFAULT;
856 		return 0;
857 	case VHOST_SET_FEATURES:
858 		if (copy_from_user(&features, argp, sizeof(features)))
859 			return -EFAULT;
860 		return vhost_vsock_set_features(vsock, features);
861 	case VHOST_GET_BACKEND_FEATURES:
862 		features = VHOST_VSOCK_BACKEND_FEATURES;
863 		if (copy_to_user(argp, &features, sizeof(features)))
864 			return -EFAULT;
865 		return 0;
866 	case VHOST_SET_BACKEND_FEATURES:
867 		if (copy_from_user(&features, argp, sizeof(features)))
868 			return -EFAULT;
869 		if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
870 			return -EOPNOTSUPP;
871 		vhost_set_backend_features(&vsock->dev, features);
872 		return 0;
873 	default:
874 		mutex_lock(&vsock->dev.mutex);
875 		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
876 		if (r == -ENOIOCTLCMD)
877 			r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
878 		else
879 			vhost_vsock_flush(vsock);
880 		mutex_unlock(&vsock->dev.mutex);
881 		return r;
882 	}
883 }
884 
885 static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
886 {
887 	struct file *file = iocb->ki_filp;
888 	struct vhost_vsock *vsock = file->private_data;
889 	struct vhost_dev *dev = &vsock->dev;
890 	int noblock = file->f_flags & O_NONBLOCK;
891 
892 	return vhost_chr_read_iter(dev, to, noblock);
893 }
894 
895 static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
896 					struct iov_iter *from)
897 {
898 	struct file *file = iocb->ki_filp;
899 	struct vhost_vsock *vsock = file->private_data;
900 	struct vhost_dev *dev = &vsock->dev;
901 
902 	return vhost_chr_write_iter(dev, from);
903 }
904 
905 static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
906 {
907 	struct vhost_vsock *vsock = file->private_data;
908 	struct vhost_dev *dev = &vsock->dev;
909 
910 	return vhost_chr_poll(file, dev, wait);
911 }
912 
913 static const struct file_operations vhost_vsock_fops = {
914 	.owner          = THIS_MODULE,
915 	.open           = vhost_vsock_dev_open,
916 	.release        = vhost_vsock_dev_release,
917 	.llseek		= noop_llseek,
918 	.unlocked_ioctl = vhost_vsock_dev_ioctl,
919 	.compat_ioctl   = compat_ptr_ioctl,
920 	.read_iter      = vhost_vsock_chr_read_iter,
921 	.write_iter     = vhost_vsock_chr_write_iter,
922 	.poll           = vhost_vsock_chr_poll,
923 };
924 
925 static struct miscdevice vhost_vsock_misc = {
926 	.minor = VHOST_VSOCK_MINOR,
927 	.name = "vhost-vsock",
928 	.fops = &vhost_vsock_fops,
929 };
930 
931 static int __init vhost_vsock_init(void)
932 {
933 	int ret;
934 
935 	ret = vsock_core_register(&vhost_transport.transport,
936 				  VSOCK_TRANSPORT_F_H2G);
937 	if (ret < 0)
938 		return ret;
939 
940 	ret = misc_register(&vhost_vsock_misc);
941 	if (ret) {
942 		vsock_core_unregister(&vhost_transport.transport);
943 		return ret;
944 	}
945 
946 	return 0;
947 };
948 
949 static void __exit vhost_vsock_exit(void)
950 {
951 	misc_deregister(&vhost_vsock_misc);
952 	vsock_core_unregister(&vhost_transport.transport);
953 };
954 
955 module_init(vhost_vsock_init);
956 module_exit(vhost_vsock_exit);
957 MODULE_LICENSE("GPL v2");
958 MODULE_AUTHOR("Asias He");
959 MODULE_DESCRIPTION("vhost transport for vsock ");
960 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
961 MODULE_ALIAS("devname:vhost-vsock");
962