xref: /linux/net/vmw_vsock/virtio_transport.c (revision 1e123fd73deb16cb362ecefb55c90c9196f4a6c2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * virtio transport for vsock
4  *
5  * Copyright (C) 2013-2015 Red Hat, Inc.
6  * Author: Asias He <asias@redhat.com>
7  *         Stefan Hajnoczi <stefanha@redhat.com>
8  *
9  * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10  * early virtio-vsock proof-of-concept bits.
11  */
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/virtio.h>
17 #include <linux/virtio_ids.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_vsock.h>
20 #include <net/sock.h>
21 #include <linux/mutex.h>
22 #include <net/af_vsock.h>
23 
24 static struct workqueue_struct *virtio_vsock_workqueue;
25 static struct virtio_vsock __rcu *the_virtio_vsock;
26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
27 static struct virtio_transport virtio_transport; /* forward declaration */
28 
29 struct virtio_vsock {
30 	struct virtio_device *vdev;
31 	struct virtqueue *vqs[VSOCK_VQ_MAX];
32 
33 	/* Virtqueue processing is deferred to a workqueue */
34 	struct work_struct tx_work;
35 	struct work_struct rx_work;
36 	struct work_struct event_work;
37 
38 	/* The following fields are protected by tx_lock.  vqs[VSOCK_VQ_TX]
39 	 * must be accessed with tx_lock held.
40 	 */
41 	struct mutex tx_lock;
42 	bool tx_run;
43 
44 	struct work_struct send_pkt_work;
45 	struct sk_buff_head send_pkt_queue;
46 
47 	atomic_t queued_replies;
48 
49 	/* The following fields are protected by rx_lock.  vqs[VSOCK_VQ_RX]
50 	 * must be accessed with rx_lock held.
51 	 */
52 	struct mutex rx_lock;
53 	bool rx_run;
54 	int rx_buf_nr;
55 	int rx_buf_max_nr;
56 
57 	/* The following fields are protected by event_lock.
58 	 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
59 	 */
60 	struct mutex event_lock;
61 	bool event_run;
62 	struct virtio_vsock_event event_list[8];
63 
64 	u32 guest_cid;
65 	bool seqpacket_allow;
66 
67 	/* These fields are used only in tx path in function
68 	 * 'virtio_transport_send_pkt_work()', so to save
69 	 * stack space in it, place both of them here. Each
70 	 * pointer from 'out_sgs' points to the corresponding
71 	 * element in 'out_bufs' - this is initialized in
72 	 * 'virtio_vsock_probe()'. Both fields are protected
73 	 * by 'tx_lock'. +1 is needed for packet header.
74 	 */
75 	struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
76 	struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
77 };
78 
79 static u32 virtio_transport_get_local_cid(void)
80 {
81 	struct virtio_vsock *vsock;
82 	u32 ret;
83 
84 	rcu_read_lock();
85 	vsock = rcu_dereference(the_virtio_vsock);
86 	if (!vsock) {
87 		ret = VMADDR_CID_ANY;
88 		goto out_rcu;
89 	}
90 
91 	ret = vsock->guest_cid;
92 out_rcu:
93 	rcu_read_unlock();
94 	return ret;
95 }
96 
97 static void
98 virtio_transport_send_pkt_work(struct work_struct *work)
99 {
100 	struct virtio_vsock *vsock =
101 		container_of(work, struct virtio_vsock, send_pkt_work);
102 	struct virtqueue *vq;
103 	bool added = false;
104 	bool restart_rx = false;
105 
106 	mutex_lock(&vsock->tx_lock);
107 
108 	if (!vsock->tx_run)
109 		goto out;
110 
111 	vq = vsock->vqs[VSOCK_VQ_TX];
112 
113 	for (;;) {
114 		int ret, in_sg = 0, out_sg = 0;
115 		struct scatterlist **sgs;
116 		struct sk_buff *skb;
117 		bool reply;
118 
119 		skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
120 		if (!skb)
121 			break;
122 
123 		reply = virtio_vsock_skb_reply(skb);
124 		sgs = vsock->out_sgs;
125 		sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
126 			    sizeof(*virtio_vsock_hdr(skb)));
127 		out_sg++;
128 
129 		if (!skb_is_nonlinear(skb)) {
130 			if (skb->len > 0) {
131 				sg_init_one(sgs[out_sg], skb->data, skb->len);
132 				out_sg++;
133 			}
134 		} else {
135 			struct skb_shared_info *si;
136 			int i;
137 
138 			/* If skb is nonlinear, then its buffer must contain
139 			 * only header and nothing more. Data is stored in
140 			 * the fragged part.
141 			 */
142 			WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
143 
144 			si = skb_shinfo(skb);
145 
146 			for (i = 0; i < si->nr_frags; i++) {
147 				skb_frag_t *skb_frag = &si->frags[i];
148 				void *va;
149 
150 				/* We will use 'page_to_virt()' for the userspace page
151 				 * here, because virtio or dma-mapping layers will call
152 				 * 'virt_to_phys()' later to fill the buffer descriptor.
153 				 * We don't touch memory at "virtual" address of this page.
154 				 */
155 				va = page_to_virt(skb_frag_page(skb_frag));
156 				sg_init_one(sgs[out_sg],
157 					    va + skb_frag_off(skb_frag),
158 					    skb_frag_size(skb_frag));
159 				out_sg++;
160 			}
161 		}
162 
163 		ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
164 		/* Usually this means that there is no more space available in
165 		 * the vq
166 		 */
167 		if (ret < 0) {
168 			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
169 			break;
170 		}
171 
172 		virtio_transport_deliver_tap_pkt(skb);
173 
174 		if (reply) {
175 			struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
176 			int val;
177 
178 			val = atomic_dec_return(&vsock->queued_replies);
179 
180 			/* Do we now have resources to resume rx processing? */
181 			if (val + 1 == virtqueue_get_vring_size(rx_vq))
182 				restart_rx = true;
183 		}
184 
185 		added = true;
186 	}
187 
188 	if (added)
189 		virtqueue_kick(vq);
190 
191 out:
192 	mutex_unlock(&vsock->tx_lock);
193 
194 	if (restart_rx)
195 		queue_work(virtio_vsock_workqueue, &vsock->rx_work);
196 }
197 
198 static int
199 virtio_transport_send_pkt(struct sk_buff *skb)
200 {
201 	struct virtio_vsock_hdr *hdr;
202 	struct virtio_vsock *vsock;
203 	int len = skb->len;
204 
205 	hdr = virtio_vsock_hdr(skb);
206 
207 	rcu_read_lock();
208 	vsock = rcu_dereference(the_virtio_vsock);
209 	if (!vsock) {
210 		kfree_skb(skb);
211 		len = -ENODEV;
212 		goto out_rcu;
213 	}
214 
215 	if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
216 		kfree_skb(skb);
217 		len = -ENODEV;
218 		goto out_rcu;
219 	}
220 
221 	if (virtio_vsock_skb_reply(skb))
222 		atomic_inc(&vsock->queued_replies);
223 
224 	virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
225 	queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
226 
227 out_rcu:
228 	rcu_read_unlock();
229 	return len;
230 }
231 
232 static int
233 virtio_transport_cancel_pkt(struct vsock_sock *vsk)
234 {
235 	struct virtio_vsock *vsock;
236 	int cnt = 0, ret;
237 
238 	rcu_read_lock();
239 	vsock = rcu_dereference(the_virtio_vsock);
240 	if (!vsock) {
241 		ret = -ENODEV;
242 		goto out_rcu;
243 	}
244 
245 	cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
246 
247 	if (cnt) {
248 		struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
249 		int new_cnt;
250 
251 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
252 		if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
253 		    new_cnt < virtqueue_get_vring_size(rx_vq))
254 			queue_work(virtio_vsock_workqueue, &vsock->rx_work);
255 	}
256 
257 	ret = 0;
258 
259 out_rcu:
260 	rcu_read_unlock();
261 	return ret;
262 }
263 
264 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
265 {
266 	int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
267 	struct scatterlist pkt, *p;
268 	struct virtqueue *vq;
269 	struct sk_buff *skb;
270 	int ret;
271 
272 	vq = vsock->vqs[VSOCK_VQ_RX];
273 
274 	do {
275 		skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
276 		if (!skb)
277 			break;
278 
279 		memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
280 		sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
281 		p = &pkt;
282 		ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
283 		if (ret < 0) {
284 			kfree_skb(skb);
285 			break;
286 		}
287 
288 		vsock->rx_buf_nr++;
289 	} while (vq->num_free);
290 	if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
291 		vsock->rx_buf_max_nr = vsock->rx_buf_nr;
292 	virtqueue_kick(vq);
293 }
294 
295 static void virtio_transport_tx_work(struct work_struct *work)
296 {
297 	struct virtio_vsock *vsock =
298 		container_of(work, struct virtio_vsock, tx_work);
299 	struct virtqueue *vq;
300 	bool added = false;
301 
302 	vq = vsock->vqs[VSOCK_VQ_TX];
303 	mutex_lock(&vsock->tx_lock);
304 
305 	if (!vsock->tx_run)
306 		goto out;
307 
308 	do {
309 		struct sk_buff *skb;
310 		unsigned int len;
311 
312 		virtqueue_disable_cb(vq);
313 		while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
314 			virtio_transport_consume_skb_sent(skb, true);
315 			added = true;
316 		}
317 	} while (!virtqueue_enable_cb(vq));
318 
319 out:
320 	mutex_unlock(&vsock->tx_lock);
321 
322 	if (added)
323 		queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
324 }
325 
326 /* Is there space left for replies to rx packets? */
327 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
328 {
329 	struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
330 	int val;
331 
332 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
333 	val = atomic_read(&vsock->queued_replies);
334 
335 	return val < virtqueue_get_vring_size(vq);
336 }
337 
338 /* event_lock must be held */
339 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
340 				       struct virtio_vsock_event *event)
341 {
342 	struct scatterlist sg;
343 	struct virtqueue *vq;
344 
345 	vq = vsock->vqs[VSOCK_VQ_EVENT];
346 
347 	sg_init_one(&sg, event, sizeof(*event));
348 
349 	return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
350 }
351 
352 /* event_lock must be held */
353 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
354 {
355 	size_t i;
356 
357 	for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
358 		struct virtio_vsock_event *event = &vsock->event_list[i];
359 
360 		virtio_vsock_event_fill_one(vsock, event);
361 	}
362 
363 	virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
364 }
365 
366 static void virtio_vsock_reset_sock(struct sock *sk)
367 {
368 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
369 	 * under vsock_table_lock so the sock cannot disappear while we're
370 	 * executing.
371 	 */
372 
373 	sk->sk_state = TCP_CLOSE;
374 	sk->sk_err = ECONNRESET;
375 	sk_error_report(sk);
376 }
377 
378 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
379 {
380 	struct virtio_device *vdev = vsock->vdev;
381 	__le64 guest_cid;
382 
383 	vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
384 			  &guest_cid, sizeof(guest_cid));
385 	vsock->guest_cid = le64_to_cpu(guest_cid);
386 }
387 
388 /* event_lock must be held */
389 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
390 				      struct virtio_vsock_event *event)
391 {
392 	switch (le32_to_cpu(event->id)) {
393 	case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
394 		virtio_vsock_update_guest_cid(vsock);
395 		vsock_for_each_connected_socket(&virtio_transport.transport,
396 						virtio_vsock_reset_sock);
397 		break;
398 	}
399 }
400 
401 static void virtio_transport_event_work(struct work_struct *work)
402 {
403 	struct virtio_vsock *vsock =
404 		container_of(work, struct virtio_vsock, event_work);
405 	struct virtqueue *vq;
406 
407 	vq = vsock->vqs[VSOCK_VQ_EVENT];
408 
409 	mutex_lock(&vsock->event_lock);
410 
411 	if (!vsock->event_run)
412 		goto out;
413 
414 	do {
415 		struct virtio_vsock_event *event;
416 		unsigned int len;
417 
418 		virtqueue_disable_cb(vq);
419 		while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
420 			if (len == sizeof(*event))
421 				virtio_vsock_event_handle(vsock, event);
422 
423 			virtio_vsock_event_fill_one(vsock, event);
424 		}
425 	} while (!virtqueue_enable_cb(vq));
426 
427 	virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
428 out:
429 	mutex_unlock(&vsock->event_lock);
430 }
431 
432 static void virtio_vsock_event_done(struct virtqueue *vq)
433 {
434 	struct virtio_vsock *vsock = vq->vdev->priv;
435 
436 	if (!vsock)
437 		return;
438 	queue_work(virtio_vsock_workqueue, &vsock->event_work);
439 }
440 
441 static void virtio_vsock_tx_done(struct virtqueue *vq)
442 {
443 	struct virtio_vsock *vsock = vq->vdev->priv;
444 
445 	if (!vsock)
446 		return;
447 	queue_work(virtio_vsock_workqueue, &vsock->tx_work);
448 }
449 
450 static void virtio_vsock_rx_done(struct virtqueue *vq)
451 {
452 	struct virtio_vsock *vsock = vq->vdev->priv;
453 
454 	if (!vsock)
455 		return;
456 	queue_work(virtio_vsock_workqueue, &vsock->rx_work);
457 }
458 
459 static bool virtio_transport_can_msgzerocopy(int bufs_num)
460 {
461 	struct virtio_vsock *vsock;
462 	bool res = false;
463 
464 	rcu_read_lock();
465 
466 	vsock = rcu_dereference(the_virtio_vsock);
467 	if (vsock) {
468 		struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
469 
470 		/* Check that tx queue is large enough to keep whole
471 		 * data to send. This is needed, because when there is
472 		 * not enough free space in the queue, current skb to
473 		 * send will be reinserted to the head of tx list of
474 		 * the socket to retry transmission later, so if skb
475 		 * is bigger than whole queue, it will be reinserted
476 		 * again and again, thus blocking other skbs to be sent.
477 		 * Each page of the user provided buffer will be added
478 		 * as a single buffer to the tx virtqueue, so compare
479 		 * number of pages against maximum capacity of the queue.
480 		 */
481 		if (bufs_num <= vq->num_max)
482 			res = true;
483 	}
484 
485 	rcu_read_unlock();
486 
487 	return res;
488 }
489 
490 static bool virtio_transport_msgzerocopy_allow(void)
491 {
492 	return true;
493 }
494 
495 static bool virtio_transport_seqpacket_allow(u32 remote_cid);
496 
497 static struct virtio_transport virtio_transport = {
498 	.transport = {
499 		.module                   = THIS_MODULE,
500 
501 		.get_local_cid            = virtio_transport_get_local_cid,
502 
503 		.init                     = virtio_transport_do_socket_init,
504 		.destruct                 = virtio_transport_destruct,
505 		.release                  = virtio_transport_release,
506 		.connect                  = virtio_transport_connect,
507 		.shutdown                 = virtio_transport_shutdown,
508 		.cancel_pkt               = virtio_transport_cancel_pkt,
509 
510 		.dgram_bind               = virtio_transport_dgram_bind,
511 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
512 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
513 		.dgram_allow              = virtio_transport_dgram_allow,
514 
515 		.stream_dequeue           = virtio_transport_stream_dequeue,
516 		.stream_enqueue           = virtio_transport_stream_enqueue,
517 		.stream_has_data          = virtio_transport_stream_has_data,
518 		.stream_has_space         = virtio_transport_stream_has_space,
519 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
520 		.stream_is_active         = virtio_transport_stream_is_active,
521 		.stream_allow             = virtio_transport_stream_allow,
522 
523 		.seqpacket_dequeue        = virtio_transport_seqpacket_dequeue,
524 		.seqpacket_enqueue        = virtio_transport_seqpacket_enqueue,
525 		.seqpacket_allow          = virtio_transport_seqpacket_allow,
526 		.seqpacket_has_data       = virtio_transport_seqpacket_has_data,
527 
528 		.msgzerocopy_allow        = virtio_transport_msgzerocopy_allow,
529 
530 		.notify_poll_in           = virtio_transport_notify_poll_in,
531 		.notify_poll_out          = virtio_transport_notify_poll_out,
532 		.notify_recv_init         = virtio_transport_notify_recv_init,
533 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
534 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
535 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
536 		.notify_send_init         = virtio_transport_notify_send_init,
537 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
538 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
539 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
540 		.notify_buffer_size       = virtio_transport_notify_buffer_size,
541 		.notify_set_rcvlowat      = virtio_transport_notify_set_rcvlowat,
542 
543 		.unsent_bytes             = virtio_transport_unsent_bytes,
544 
545 		.read_skb = virtio_transport_read_skb,
546 	},
547 
548 	.send_pkt = virtio_transport_send_pkt,
549 	.can_msgzerocopy = virtio_transport_can_msgzerocopy,
550 };
551 
552 static bool virtio_transport_seqpacket_allow(u32 remote_cid)
553 {
554 	struct virtio_vsock *vsock;
555 	bool seqpacket_allow;
556 
557 	seqpacket_allow = false;
558 	rcu_read_lock();
559 	vsock = rcu_dereference(the_virtio_vsock);
560 	if (vsock)
561 		seqpacket_allow = vsock->seqpacket_allow;
562 	rcu_read_unlock();
563 
564 	return seqpacket_allow;
565 }
566 
567 static void virtio_transport_rx_work(struct work_struct *work)
568 {
569 	struct virtio_vsock *vsock =
570 		container_of(work, struct virtio_vsock, rx_work);
571 	struct virtqueue *vq;
572 
573 	vq = vsock->vqs[VSOCK_VQ_RX];
574 
575 	mutex_lock(&vsock->rx_lock);
576 
577 	if (!vsock->rx_run)
578 		goto out;
579 
580 	do {
581 		virtqueue_disable_cb(vq);
582 		for (;;) {
583 			struct sk_buff *skb;
584 			unsigned int len;
585 
586 			if (!virtio_transport_more_replies(vsock)) {
587 				/* Stop rx until the device processes already
588 				 * pending replies.  Leave rx virtqueue
589 				 * callbacks disabled.
590 				 */
591 				goto out;
592 			}
593 
594 			skb = virtqueue_get_buf(vq, &len);
595 			if (!skb)
596 				break;
597 
598 			vsock->rx_buf_nr--;
599 
600 			/* Drop short/long packets */
601 			if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
602 				     len > virtio_vsock_skb_len(skb))) {
603 				kfree_skb(skb);
604 				continue;
605 			}
606 
607 			virtio_vsock_skb_rx_put(skb);
608 			virtio_transport_deliver_tap_pkt(skb);
609 			virtio_transport_recv_pkt(&virtio_transport, skb);
610 		}
611 	} while (!virtqueue_enable_cb(vq));
612 
613 out:
614 	if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
615 		virtio_vsock_rx_fill(vsock);
616 	mutex_unlock(&vsock->rx_lock);
617 }
618 
619 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
620 {
621 	struct virtio_device *vdev = vsock->vdev;
622 	struct virtqueue_info vqs_info[] = {
623 		{ "rx", virtio_vsock_rx_done },
624 		{ "tx", virtio_vsock_tx_done },
625 		{ "event", virtio_vsock_event_done },
626 	};
627 	int ret;
628 
629 	ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL);
630 	if (ret < 0)
631 		return ret;
632 
633 	virtio_vsock_update_guest_cid(vsock);
634 
635 	virtio_device_ready(vdev);
636 
637 	return 0;
638 }
639 
640 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
641 {
642 	mutex_lock(&vsock->tx_lock);
643 	vsock->tx_run = true;
644 	mutex_unlock(&vsock->tx_lock);
645 
646 	mutex_lock(&vsock->rx_lock);
647 	virtio_vsock_rx_fill(vsock);
648 	vsock->rx_run = true;
649 	mutex_unlock(&vsock->rx_lock);
650 
651 	mutex_lock(&vsock->event_lock);
652 	virtio_vsock_event_fill(vsock);
653 	vsock->event_run = true;
654 	mutex_unlock(&vsock->event_lock);
655 
656 	/* virtio_transport_send_pkt() can queue packets once
657 	 * the_virtio_vsock is set, but they won't be processed until
658 	 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
659 	 * when initialization finishes to send those packets queued
660 	 * earlier.
661 	 * We don't need to queue the other workers (rx, event) because
662 	 * as long as we don't fill the queues with empty buffers, the
663 	 * host can't send us any notification.
664 	 */
665 	queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
666 }
667 
668 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
669 {
670 	struct virtio_device *vdev = vsock->vdev;
671 	struct sk_buff *skb;
672 
673 	/* Reset all connected sockets when the VQs disappear */
674 	vsock_for_each_connected_socket(&virtio_transport.transport,
675 					virtio_vsock_reset_sock);
676 
677 	/* Stop all work handlers to make sure no one is accessing the device,
678 	 * so we can safely call virtio_reset_device().
679 	 */
680 	mutex_lock(&vsock->rx_lock);
681 	vsock->rx_run = false;
682 	mutex_unlock(&vsock->rx_lock);
683 
684 	mutex_lock(&vsock->tx_lock);
685 	vsock->tx_run = false;
686 	mutex_unlock(&vsock->tx_lock);
687 
688 	mutex_lock(&vsock->event_lock);
689 	vsock->event_run = false;
690 	mutex_unlock(&vsock->event_lock);
691 
692 	/* Flush all device writes and interrupts, device will not use any
693 	 * more buffers.
694 	 */
695 	virtio_reset_device(vdev);
696 
697 	mutex_lock(&vsock->rx_lock);
698 	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
699 		kfree_skb(skb);
700 	mutex_unlock(&vsock->rx_lock);
701 
702 	mutex_lock(&vsock->tx_lock);
703 	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
704 		kfree_skb(skb);
705 	mutex_unlock(&vsock->tx_lock);
706 
707 	virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
708 
709 	/* Delete virtqueues and flush outstanding callbacks if any */
710 	vdev->config->del_vqs(vdev);
711 }
712 
713 static int virtio_vsock_probe(struct virtio_device *vdev)
714 {
715 	struct virtio_vsock *vsock = NULL;
716 	int ret;
717 	int i;
718 
719 	ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
720 	if (ret)
721 		return ret;
722 
723 	/* Only one virtio-vsock device per guest is supported */
724 	if (rcu_dereference_protected(the_virtio_vsock,
725 				lockdep_is_held(&the_virtio_vsock_mutex))) {
726 		ret = -EBUSY;
727 		goto out;
728 	}
729 
730 	vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
731 	if (!vsock) {
732 		ret = -ENOMEM;
733 		goto out;
734 	}
735 
736 	vsock->vdev = vdev;
737 
738 	vsock->rx_buf_nr = 0;
739 	vsock->rx_buf_max_nr = 0;
740 	atomic_set(&vsock->queued_replies, 0);
741 
742 	mutex_init(&vsock->tx_lock);
743 	mutex_init(&vsock->rx_lock);
744 	mutex_init(&vsock->event_lock);
745 	skb_queue_head_init(&vsock->send_pkt_queue);
746 	INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
747 	INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
748 	INIT_WORK(&vsock->event_work, virtio_transport_event_work);
749 	INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
750 
751 	if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
752 		vsock->seqpacket_allow = true;
753 
754 	vdev->priv = vsock;
755 
756 	ret = virtio_vsock_vqs_init(vsock);
757 	if (ret < 0)
758 		goto out;
759 
760 	for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
761 		vsock->out_sgs[i] = &vsock->out_bufs[i];
762 
763 	rcu_assign_pointer(the_virtio_vsock, vsock);
764 	virtio_vsock_vqs_start(vsock);
765 
766 	mutex_unlock(&the_virtio_vsock_mutex);
767 
768 	return 0;
769 
770 out:
771 	kfree(vsock);
772 	mutex_unlock(&the_virtio_vsock_mutex);
773 	return ret;
774 }
775 
776 static void virtio_vsock_remove(struct virtio_device *vdev)
777 {
778 	struct virtio_vsock *vsock = vdev->priv;
779 
780 	mutex_lock(&the_virtio_vsock_mutex);
781 
782 	vdev->priv = NULL;
783 	rcu_assign_pointer(the_virtio_vsock, NULL);
784 	synchronize_rcu();
785 
786 	virtio_vsock_vqs_del(vsock);
787 
788 	/* Other works can be queued before 'config->del_vqs()', so we flush
789 	 * all works before to free the vsock object to avoid use after free.
790 	 */
791 	flush_work(&vsock->rx_work);
792 	flush_work(&vsock->tx_work);
793 	flush_work(&vsock->event_work);
794 	flush_work(&vsock->send_pkt_work);
795 
796 	mutex_unlock(&the_virtio_vsock_mutex);
797 
798 	kfree(vsock);
799 }
800 
801 #ifdef CONFIG_PM_SLEEP
802 static int virtio_vsock_freeze(struct virtio_device *vdev)
803 {
804 	struct virtio_vsock *vsock = vdev->priv;
805 
806 	mutex_lock(&the_virtio_vsock_mutex);
807 
808 	rcu_assign_pointer(the_virtio_vsock, NULL);
809 	synchronize_rcu();
810 
811 	virtio_vsock_vqs_del(vsock);
812 
813 	mutex_unlock(&the_virtio_vsock_mutex);
814 
815 	return 0;
816 }
817 
818 static int virtio_vsock_restore(struct virtio_device *vdev)
819 {
820 	struct virtio_vsock *vsock = vdev->priv;
821 	int ret;
822 
823 	mutex_lock(&the_virtio_vsock_mutex);
824 
825 	/* Only one virtio-vsock device per guest is supported */
826 	if (rcu_dereference_protected(the_virtio_vsock,
827 				lockdep_is_held(&the_virtio_vsock_mutex))) {
828 		ret = -EBUSY;
829 		goto out;
830 	}
831 
832 	ret = virtio_vsock_vqs_init(vsock);
833 	if (ret < 0)
834 		goto out;
835 
836 	rcu_assign_pointer(the_virtio_vsock, vsock);
837 	virtio_vsock_vqs_start(vsock);
838 
839 out:
840 	mutex_unlock(&the_virtio_vsock_mutex);
841 	return ret;
842 }
843 #endif /* CONFIG_PM_SLEEP */
844 
845 static struct virtio_device_id id_table[] = {
846 	{ VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
847 	{ 0 },
848 };
849 
850 static unsigned int features[] = {
851 	VIRTIO_VSOCK_F_SEQPACKET
852 };
853 
854 static struct virtio_driver virtio_vsock_driver = {
855 	.feature_table = features,
856 	.feature_table_size = ARRAY_SIZE(features),
857 	.driver.name = KBUILD_MODNAME,
858 	.id_table = id_table,
859 	.probe = virtio_vsock_probe,
860 	.remove = virtio_vsock_remove,
861 #ifdef CONFIG_PM_SLEEP
862 	.freeze = virtio_vsock_freeze,
863 	.restore = virtio_vsock_restore,
864 #endif
865 };
866 
867 static int __init virtio_vsock_init(void)
868 {
869 	int ret;
870 
871 	virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
872 	if (!virtio_vsock_workqueue)
873 		return -ENOMEM;
874 
875 	ret = vsock_core_register(&virtio_transport.transport,
876 				  VSOCK_TRANSPORT_F_G2H);
877 	if (ret)
878 		goto out_wq;
879 
880 	ret = register_virtio_driver(&virtio_vsock_driver);
881 	if (ret)
882 		goto out_vci;
883 
884 	return 0;
885 
886 out_vci:
887 	vsock_core_unregister(&virtio_transport.transport);
888 out_wq:
889 	destroy_workqueue(virtio_vsock_workqueue);
890 	return ret;
891 }
892 
893 static void __exit virtio_vsock_exit(void)
894 {
895 	unregister_virtio_driver(&virtio_vsock_driver);
896 	vsock_core_unregister(&virtio_transport.transport);
897 	destroy_workqueue(virtio_vsock_workqueue);
898 }
899 
900 module_init(virtio_vsock_init);
901 module_exit(virtio_vsock_exit);
902 MODULE_LICENSE("GPL v2");
903 MODULE_AUTHOR("Asias He");
904 MODULE_DESCRIPTION("virtio transport for vsock");
905 MODULE_DEVICE_TABLE(virtio, id_table);
906