xref: /linux/net/vmw_vsock/virtio_transport.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * virtio transport for vsock
4  *
5  * Copyright (C) 2013-2015 Red Hat, Inc.
6  * Author: Asias He <asias@redhat.com>
7  *         Stefan Hajnoczi <stefanha@redhat.com>
8  *
9  * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10  * early virtio-vsock proof-of-concept bits.
11  */
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/virtio.h>
17 #include <linux/virtio_ids.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_vsock.h>
20 #include <net/sock.h>
21 #include <linux/mutex.h>
22 #include <net/af_vsock.h>
23 
24 static struct workqueue_struct *virtio_vsock_workqueue;
25 static struct virtio_vsock __rcu *the_virtio_vsock;
26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
27 static struct virtio_transport virtio_transport; /* forward declaration */
28 
29 struct virtio_vsock {
30 	struct virtio_device *vdev;
31 	struct virtqueue *vqs[VSOCK_VQ_MAX];
32 
33 	/* Virtqueue processing is deferred to a workqueue */
34 	struct work_struct tx_work;
35 	struct work_struct rx_work;
36 	struct work_struct event_work;
37 
38 	/* The following fields are protected by tx_lock.  vqs[VSOCK_VQ_TX]
39 	 * must be accessed with tx_lock held.
40 	 */
41 	struct mutex tx_lock;
42 	bool tx_run;
43 
44 	struct work_struct send_pkt_work;
45 	struct sk_buff_head send_pkt_queue;
46 
47 	atomic_t queued_replies;
48 
49 	/* The following fields are protected by rx_lock.  vqs[VSOCK_VQ_RX]
50 	 * must be accessed with rx_lock held.
51 	 */
52 	struct mutex rx_lock;
53 	bool rx_run;
54 	int rx_buf_nr;
55 	int rx_buf_max_nr;
56 
57 	/* The following fields are protected by event_lock.
58 	 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
59 	 */
60 	struct mutex event_lock;
61 	bool event_run;
62 	struct virtio_vsock_event event_list[8];
63 
64 	u32 guest_cid;
65 	bool seqpacket_allow;
66 
67 	/* These fields are used only in tx path in function
68 	 * 'virtio_transport_send_pkt_work()', so to save
69 	 * stack space in it, place both of them here. Each
70 	 * pointer from 'out_sgs' points to the corresponding
71 	 * element in 'out_bufs' - this is initialized in
72 	 * 'virtio_vsock_probe()'. Both fields are protected
73 	 * by 'tx_lock'. +1 is needed for packet header.
74 	 */
75 	struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
76 	struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
77 };
78 
79 static u32 virtio_transport_get_local_cid(void)
80 {
81 	struct virtio_vsock *vsock;
82 	u32 ret;
83 
84 	rcu_read_lock();
85 	vsock = rcu_dereference(the_virtio_vsock);
86 	if (!vsock) {
87 		ret = VMADDR_CID_ANY;
88 		goto out_rcu;
89 	}
90 
91 	ret = vsock->guest_cid;
92 out_rcu:
93 	rcu_read_unlock();
94 	return ret;
95 }
96 
97 /* Caller need to hold vsock->tx_lock on vq */
98 static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq,
99 				     struct virtio_vsock *vsock, gfp_t gfp)
100 {
101 	int ret, in_sg = 0, out_sg = 0;
102 	struct scatterlist **sgs;
103 
104 	sgs = vsock->out_sgs;
105 	sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
106 		    sizeof(*virtio_vsock_hdr(skb)));
107 	out_sg++;
108 
109 	if (!skb_is_nonlinear(skb)) {
110 		if (skb->len > 0) {
111 			sg_init_one(sgs[out_sg], skb->data, skb->len);
112 			out_sg++;
113 		}
114 	} else {
115 		struct skb_shared_info *si;
116 		int i;
117 
118 		/* If skb is nonlinear, then its buffer must contain
119 		 * only header and nothing more. Data is stored in
120 		 * the fragged part.
121 		 */
122 		WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
123 
124 		si = skb_shinfo(skb);
125 
126 		for (i = 0; i < si->nr_frags; i++) {
127 			skb_frag_t *skb_frag = &si->frags[i];
128 			void *va;
129 
130 			/* We will use 'page_to_virt()' for the userspace page
131 			 * here, because virtio or dma-mapping layers will call
132 			 * 'virt_to_phys()' later to fill the buffer descriptor.
133 			 * We don't touch memory at "virtual" address of this page.
134 			 */
135 			va = page_to_virt(skb_frag_page(skb_frag));
136 			sg_init_one(sgs[out_sg],
137 				    va + skb_frag_off(skb_frag),
138 				    skb_frag_size(skb_frag));
139 			out_sg++;
140 		}
141 	}
142 
143 	ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, gfp);
144 	/* Usually this means that there is no more space available in
145 	 * the vq
146 	 */
147 	if (ret < 0)
148 		return ret;
149 
150 	virtio_transport_deliver_tap_pkt(skb);
151 	return 0;
152 }
153 
154 static void
155 virtio_transport_send_pkt_work(struct work_struct *work)
156 {
157 	struct virtio_vsock *vsock =
158 		container_of(work, struct virtio_vsock, send_pkt_work);
159 	struct virtqueue *vq;
160 	bool added = false;
161 	bool restart_rx = false;
162 
163 	mutex_lock(&vsock->tx_lock);
164 
165 	if (!vsock->tx_run)
166 		goto out;
167 
168 	vq = vsock->vqs[VSOCK_VQ_TX];
169 
170 	for (;;) {
171 		struct sk_buff *skb;
172 		bool reply;
173 		int ret;
174 
175 		skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
176 		if (!skb)
177 			break;
178 
179 		reply = virtio_vsock_skb_reply(skb);
180 
181 		ret = virtio_transport_send_skb(skb, vq, vsock, GFP_KERNEL);
182 		if (ret < 0) {
183 			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
184 			break;
185 		}
186 
187 		if (reply) {
188 			struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
189 			int val;
190 
191 			val = atomic_dec_return(&vsock->queued_replies);
192 
193 			/* Do we now have resources to resume rx processing? */
194 			if (val + 1 == virtqueue_get_vring_size(rx_vq))
195 				restart_rx = true;
196 		}
197 
198 		added = true;
199 	}
200 
201 	if (added)
202 		virtqueue_kick(vq);
203 
204 out:
205 	mutex_unlock(&vsock->tx_lock);
206 
207 	if (restart_rx)
208 		queue_work(virtio_vsock_workqueue, &vsock->rx_work);
209 }
210 
211 /* Caller need to hold RCU for vsock.
212  * Returns 0 if the packet is successfully put on the vq.
213  */
214 static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb)
215 {
216 	struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
217 	int ret;
218 
219 	/* Inside RCU, can't sleep! */
220 	ret = mutex_trylock(&vsock->tx_lock);
221 	if (unlikely(ret == 0))
222 		return -EBUSY;
223 
224 	ret = virtio_transport_send_skb(skb, vq, vsock, GFP_ATOMIC);
225 	if (ret == 0)
226 		virtqueue_kick(vq);
227 
228 	mutex_unlock(&vsock->tx_lock);
229 
230 	return ret;
231 }
232 
233 static int
234 virtio_transport_send_pkt(struct sk_buff *skb)
235 {
236 	struct virtio_vsock_hdr *hdr;
237 	struct virtio_vsock *vsock;
238 	int len = skb->len;
239 
240 	hdr = virtio_vsock_hdr(skb);
241 
242 	rcu_read_lock();
243 	vsock = rcu_dereference(the_virtio_vsock);
244 	if (!vsock) {
245 		kfree_skb(skb);
246 		len = -ENODEV;
247 		goto out_rcu;
248 	}
249 
250 	if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
251 		kfree_skb(skb);
252 		len = -ENODEV;
253 		goto out_rcu;
254 	}
255 
256 	/* If send_pkt_queue is empty, we can safely bypass this queue
257 	 * because packet order is maintained and (try) to put the packet
258 	 * on the virtqueue using virtio_transport_send_skb_fast_path.
259 	 * If this fails we simply put the packet on the intermediate
260 	 * queue and schedule the worker.
261 	 */
262 	if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) ||
263 	    virtio_transport_send_skb_fast_path(vsock, skb)) {
264 		if (virtio_vsock_skb_reply(skb))
265 			atomic_inc(&vsock->queued_replies);
266 
267 		virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
268 		queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
269 	}
270 
271 out_rcu:
272 	rcu_read_unlock();
273 	return len;
274 }
275 
276 static int
277 virtio_transport_cancel_pkt(struct vsock_sock *vsk)
278 {
279 	struct virtio_vsock *vsock;
280 	int cnt = 0, ret;
281 
282 	rcu_read_lock();
283 	vsock = rcu_dereference(the_virtio_vsock);
284 	if (!vsock) {
285 		ret = -ENODEV;
286 		goto out_rcu;
287 	}
288 
289 	cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
290 
291 	if (cnt) {
292 		struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
293 		int new_cnt;
294 
295 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
296 		if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
297 		    new_cnt < virtqueue_get_vring_size(rx_vq))
298 			queue_work(virtio_vsock_workqueue, &vsock->rx_work);
299 	}
300 
301 	ret = 0;
302 
303 out_rcu:
304 	rcu_read_unlock();
305 	return ret;
306 }
307 
308 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
309 {
310 	int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
311 	struct scatterlist pkt, *p;
312 	struct virtqueue *vq;
313 	struct sk_buff *skb;
314 	int ret;
315 
316 	vq = vsock->vqs[VSOCK_VQ_RX];
317 
318 	do {
319 		skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
320 		if (!skb)
321 			break;
322 
323 		memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
324 		sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
325 		p = &pkt;
326 		ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
327 		if (ret < 0) {
328 			kfree_skb(skb);
329 			break;
330 		}
331 
332 		vsock->rx_buf_nr++;
333 	} while (vq->num_free);
334 	if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
335 		vsock->rx_buf_max_nr = vsock->rx_buf_nr;
336 	virtqueue_kick(vq);
337 }
338 
339 static void virtio_transport_tx_work(struct work_struct *work)
340 {
341 	struct virtio_vsock *vsock =
342 		container_of(work, struct virtio_vsock, tx_work);
343 	struct virtqueue *vq;
344 	bool added = false;
345 
346 	vq = vsock->vqs[VSOCK_VQ_TX];
347 	mutex_lock(&vsock->tx_lock);
348 
349 	if (!vsock->tx_run)
350 		goto out;
351 
352 	do {
353 		struct sk_buff *skb;
354 		unsigned int len;
355 
356 		virtqueue_disable_cb(vq);
357 		while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
358 			virtio_transport_consume_skb_sent(skb, true);
359 			added = true;
360 		}
361 	} while (!virtqueue_enable_cb(vq));
362 
363 out:
364 	mutex_unlock(&vsock->tx_lock);
365 
366 	if (added)
367 		queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
368 }
369 
370 /* Is there space left for replies to rx packets? */
371 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
372 {
373 	struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
374 	int val;
375 
376 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
377 	val = atomic_read(&vsock->queued_replies);
378 
379 	return val < virtqueue_get_vring_size(vq);
380 }
381 
382 /* event_lock must be held */
383 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
384 				       struct virtio_vsock_event *event)
385 {
386 	struct scatterlist sg;
387 	struct virtqueue *vq;
388 
389 	vq = vsock->vqs[VSOCK_VQ_EVENT];
390 
391 	sg_init_one(&sg, event, sizeof(*event));
392 
393 	return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
394 }
395 
396 /* event_lock must be held */
397 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
398 {
399 	size_t i;
400 
401 	for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
402 		struct virtio_vsock_event *event = &vsock->event_list[i];
403 
404 		virtio_vsock_event_fill_one(vsock, event);
405 	}
406 
407 	virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
408 }
409 
410 static void virtio_vsock_reset_sock(struct sock *sk)
411 {
412 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
413 	 * under vsock_table_lock so the sock cannot disappear while we're
414 	 * executing.
415 	 */
416 
417 	sk->sk_state = TCP_CLOSE;
418 	sk->sk_err = ECONNRESET;
419 	sk_error_report(sk);
420 }
421 
422 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
423 {
424 	struct virtio_device *vdev = vsock->vdev;
425 	__le64 guest_cid;
426 
427 	vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
428 			  &guest_cid, sizeof(guest_cid));
429 	vsock->guest_cid = le64_to_cpu(guest_cid);
430 }
431 
432 /* event_lock must be held */
433 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
434 				      struct virtio_vsock_event *event)
435 {
436 	switch (le32_to_cpu(event->id)) {
437 	case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
438 		virtio_vsock_update_guest_cid(vsock);
439 		vsock_for_each_connected_socket(&virtio_transport.transport,
440 						virtio_vsock_reset_sock);
441 		break;
442 	}
443 }
444 
445 static void virtio_transport_event_work(struct work_struct *work)
446 {
447 	struct virtio_vsock *vsock =
448 		container_of(work, struct virtio_vsock, event_work);
449 	struct virtqueue *vq;
450 
451 	vq = vsock->vqs[VSOCK_VQ_EVENT];
452 
453 	mutex_lock(&vsock->event_lock);
454 
455 	if (!vsock->event_run)
456 		goto out;
457 
458 	do {
459 		struct virtio_vsock_event *event;
460 		unsigned int len;
461 
462 		virtqueue_disable_cb(vq);
463 		while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
464 			if (len == sizeof(*event))
465 				virtio_vsock_event_handle(vsock, event);
466 
467 			virtio_vsock_event_fill_one(vsock, event);
468 		}
469 	} while (!virtqueue_enable_cb(vq));
470 
471 	virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
472 out:
473 	mutex_unlock(&vsock->event_lock);
474 }
475 
476 static void virtio_vsock_event_done(struct virtqueue *vq)
477 {
478 	struct virtio_vsock *vsock = vq->vdev->priv;
479 
480 	if (!vsock)
481 		return;
482 	queue_work(virtio_vsock_workqueue, &vsock->event_work);
483 }
484 
485 static void virtio_vsock_tx_done(struct virtqueue *vq)
486 {
487 	struct virtio_vsock *vsock = vq->vdev->priv;
488 
489 	if (!vsock)
490 		return;
491 	queue_work(virtio_vsock_workqueue, &vsock->tx_work);
492 }
493 
494 static void virtio_vsock_rx_done(struct virtqueue *vq)
495 {
496 	struct virtio_vsock *vsock = vq->vdev->priv;
497 
498 	if (!vsock)
499 		return;
500 	queue_work(virtio_vsock_workqueue, &vsock->rx_work);
501 }
502 
503 static bool virtio_transport_can_msgzerocopy(int bufs_num)
504 {
505 	struct virtio_vsock *vsock;
506 	bool res = false;
507 
508 	rcu_read_lock();
509 
510 	vsock = rcu_dereference(the_virtio_vsock);
511 	if (vsock) {
512 		struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
513 
514 		/* Check that tx queue is large enough to keep whole
515 		 * data to send. This is needed, because when there is
516 		 * not enough free space in the queue, current skb to
517 		 * send will be reinserted to the head of tx list of
518 		 * the socket to retry transmission later, so if skb
519 		 * is bigger than whole queue, it will be reinserted
520 		 * again and again, thus blocking other skbs to be sent.
521 		 * Each page of the user provided buffer will be added
522 		 * as a single buffer to the tx virtqueue, so compare
523 		 * number of pages against maximum capacity of the queue.
524 		 */
525 		if (bufs_num <= vq->num_max)
526 			res = true;
527 	}
528 
529 	rcu_read_unlock();
530 
531 	return res;
532 }
533 
534 static bool virtio_transport_msgzerocopy_allow(void)
535 {
536 	return true;
537 }
538 
539 static bool virtio_transport_seqpacket_allow(u32 remote_cid);
540 
541 static struct virtio_transport virtio_transport = {
542 	.transport = {
543 		.module                   = THIS_MODULE,
544 
545 		.get_local_cid            = virtio_transport_get_local_cid,
546 
547 		.init                     = virtio_transport_do_socket_init,
548 		.destruct                 = virtio_transport_destruct,
549 		.release                  = virtio_transport_release,
550 		.connect                  = virtio_transport_connect,
551 		.shutdown                 = virtio_transport_shutdown,
552 		.cancel_pkt               = virtio_transport_cancel_pkt,
553 
554 		.dgram_bind               = virtio_transport_dgram_bind,
555 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
556 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
557 		.dgram_allow              = virtio_transport_dgram_allow,
558 
559 		.stream_dequeue           = virtio_transport_stream_dequeue,
560 		.stream_enqueue           = virtio_transport_stream_enqueue,
561 		.stream_has_data          = virtio_transport_stream_has_data,
562 		.stream_has_space         = virtio_transport_stream_has_space,
563 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
564 		.stream_is_active         = virtio_transport_stream_is_active,
565 		.stream_allow             = virtio_transport_stream_allow,
566 
567 		.seqpacket_dequeue        = virtio_transport_seqpacket_dequeue,
568 		.seqpacket_enqueue        = virtio_transport_seqpacket_enqueue,
569 		.seqpacket_allow          = virtio_transport_seqpacket_allow,
570 		.seqpacket_has_data       = virtio_transport_seqpacket_has_data,
571 
572 		.msgzerocopy_allow        = virtio_transport_msgzerocopy_allow,
573 
574 		.notify_poll_in           = virtio_transport_notify_poll_in,
575 		.notify_poll_out          = virtio_transport_notify_poll_out,
576 		.notify_recv_init         = virtio_transport_notify_recv_init,
577 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
578 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
579 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
580 		.notify_send_init         = virtio_transport_notify_send_init,
581 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
582 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
583 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
584 		.notify_buffer_size       = virtio_transport_notify_buffer_size,
585 		.notify_set_rcvlowat      = virtio_transport_notify_set_rcvlowat,
586 
587 		.unsent_bytes             = virtio_transport_unsent_bytes,
588 
589 		.read_skb = virtio_transport_read_skb,
590 	},
591 
592 	.send_pkt = virtio_transport_send_pkt,
593 	.can_msgzerocopy = virtio_transport_can_msgzerocopy,
594 };
595 
596 static bool virtio_transport_seqpacket_allow(u32 remote_cid)
597 {
598 	struct virtio_vsock *vsock;
599 	bool seqpacket_allow;
600 
601 	seqpacket_allow = false;
602 	rcu_read_lock();
603 	vsock = rcu_dereference(the_virtio_vsock);
604 	if (vsock)
605 		seqpacket_allow = vsock->seqpacket_allow;
606 	rcu_read_unlock();
607 
608 	return seqpacket_allow;
609 }
610 
611 static void virtio_transport_rx_work(struct work_struct *work)
612 {
613 	struct virtio_vsock *vsock =
614 		container_of(work, struct virtio_vsock, rx_work);
615 	struct virtqueue *vq;
616 
617 	vq = vsock->vqs[VSOCK_VQ_RX];
618 
619 	mutex_lock(&vsock->rx_lock);
620 
621 	if (!vsock->rx_run)
622 		goto out;
623 
624 	do {
625 		virtqueue_disable_cb(vq);
626 		for (;;) {
627 			struct sk_buff *skb;
628 			unsigned int len;
629 
630 			if (!virtio_transport_more_replies(vsock)) {
631 				/* Stop rx until the device processes already
632 				 * pending replies.  Leave rx virtqueue
633 				 * callbacks disabled.
634 				 */
635 				goto out;
636 			}
637 
638 			skb = virtqueue_get_buf(vq, &len);
639 			if (!skb)
640 				break;
641 
642 			vsock->rx_buf_nr--;
643 
644 			/* Drop short/long packets */
645 			if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
646 				     len > virtio_vsock_skb_len(skb))) {
647 				kfree_skb(skb);
648 				continue;
649 			}
650 
651 			virtio_vsock_skb_rx_put(skb);
652 			virtio_transport_deliver_tap_pkt(skb);
653 			virtio_transport_recv_pkt(&virtio_transport, skb);
654 		}
655 	} while (!virtqueue_enable_cb(vq));
656 
657 out:
658 	if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
659 		virtio_vsock_rx_fill(vsock);
660 	mutex_unlock(&vsock->rx_lock);
661 }
662 
663 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
664 {
665 	struct virtio_device *vdev = vsock->vdev;
666 	struct virtqueue_info vqs_info[] = {
667 		{ "rx", virtio_vsock_rx_done },
668 		{ "tx", virtio_vsock_tx_done },
669 		{ "event", virtio_vsock_event_done },
670 	};
671 	int ret;
672 
673 	ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL);
674 	if (ret < 0)
675 		return ret;
676 
677 	virtio_vsock_update_guest_cid(vsock);
678 
679 	virtio_device_ready(vdev);
680 
681 	return 0;
682 }
683 
684 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
685 {
686 	mutex_lock(&vsock->tx_lock);
687 	vsock->tx_run = true;
688 	mutex_unlock(&vsock->tx_lock);
689 
690 	mutex_lock(&vsock->rx_lock);
691 	virtio_vsock_rx_fill(vsock);
692 	vsock->rx_run = true;
693 	mutex_unlock(&vsock->rx_lock);
694 
695 	mutex_lock(&vsock->event_lock);
696 	virtio_vsock_event_fill(vsock);
697 	vsock->event_run = true;
698 	mutex_unlock(&vsock->event_lock);
699 
700 	/* virtio_transport_send_pkt() can queue packets once
701 	 * the_virtio_vsock is set, but they won't be processed until
702 	 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
703 	 * when initialization finishes to send those packets queued
704 	 * earlier.
705 	 * We don't need to queue the other workers (rx, event) because
706 	 * as long as we don't fill the queues with empty buffers, the
707 	 * host can't send us any notification.
708 	 */
709 	queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
710 }
711 
712 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
713 {
714 	struct virtio_device *vdev = vsock->vdev;
715 	struct sk_buff *skb;
716 
717 	/* Reset all connected sockets when the VQs disappear */
718 	vsock_for_each_connected_socket(&virtio_transport.transport,
719 					virtio_vsock_reset_sock);
720 
721 	/* Stop all work handlers to make sure no one is accessing the device,
722 	 * so we can safely call virtio_reset_device().
723 	 */
724 	mutex_lock(&vsock->rx_lock);
725 	vsock->rx_run = false;
726 	mutex_unlock(&vsock->rx_lock);
727 
728 	mutex_lock(&vsock->tx_lock);
729 	vsock->tx_run = false;
730 	mutex_unlock(&vsock->tx_lock);
731 
732 	mutex_lock(&vsock->event_lock);
733 	vsock->event_run = false;
734 	mutex_unlock(&vsock->event_lock);
735 
736 	/* Flush all device writes and interrupts, device will not use any
737 	 * more buffers.
738 	 */
739 	virtio_reset_device(vdev);
740 
741 	mutex_lock(&vsock->rx_lock);
742 	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
743 		kfree_skb(skb);
744 	mutex_unlock(&vsock->rx_lock);
745 
746 	mutex_lock(&vsock->tx_lock);
747 	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
748 		kfree_skb(skb);
749 	mutex_unlock(&vsock->tx_lock);
750 
751 	virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
752 
753 	/* Delete virtqueues and flush outstanding callbacks if any */
754 	vdev->config->del_vqs(vdev);
755 }
756 
757 static int virtio_vsock_probe(struct virtio_device *vdev)
758 {
759 	struct virtio_vsock *vsock = NULL;
760 	int ret;
761 	int i;
762 
763 	ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
764 	if (ret)
765 		return ret;
766 
767 	/* Only one virtio-vsock device per guest is supported */
768 	if (rcu_dereference_protected(the_virtio_vsock,
769 				lockdep_is_held(&the_virtio_vsock_mutex))) {
770 		ret = -EBUSY;
771 		goto out;
772 	}
773 
774 	vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
775 	if (!vsock) {
776 		ret = -ENOMEM;
777 		goto out;
778 	}
779 
780 	vsock->vdev = vdev;
781 
782 	vsock->rx_buf_nr = 0;
783 	vsock->rx_buf_max_nr = 0;
784 	atomic_set(&vsock->queued_replies, 0);
785 
786 	mutex_init(&vsock->tx_lock);
787 	mutex_init(&vsock->rx_lock);
788 	mutex_init(&vsock->event_lock);
789 	skb_queue_head_init(&vsock->send_pkt_queue);
790 	INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
791 	INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
792 	INIT_WORK(&vsock->event_work, virtio_transport_event_work);
793 	INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
794 
795 	if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
796 		vsock->seqpacket_allow = true;
797 
798 	vdev->priv = vsock;
799 
800 	ret = virtio_vsock_vqs_init(vsock);
801 	if (ret < 0)
802 		goto out;
803 
804 	for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
805 		vsock->out_sgs[i] = &vsock->out_bufs[i];
806 
807 	rcu_assign_pointer(the_virtio_vsock, vsock);
808 	virtio_vsock_vqs_start(vsock);
809 
810 	mutex_unlock(&the_virtio_vsock_mutex);
811 
812 	return 0;
813 
814 out:
815 	kfree(vsock);
816 	mutex_unlock(&the_virtio_vsock_mutex);
817 	return ret;
818 }
819 
820 static void virtio_vsock_remove(struct virtio_device *vdev)
821 {
822 	struct virtio_vsock *vsock = vdev->priv;
823 
824 	mutex_lock(&the_virtio_vsock_mutex);
825 
826 	vdev->priv = NULL;
827 	rcu_assign_pointer(the_virtio_vsock, NULL);
828 	synchronize_rcu();
829 
830 	virtio_vsock_vqs_del(vsock);
831 
832 	/* Other works can be queued before 'config->del_vqs()', so we flush
833 	 * all works before to free the vsock object to avoid use after free.
834 	 */
835 	flush_work(&vsock->rx_work);
836 	flush_work(&vsock->tx_work);
837 	flush_work(&vsock->event_work);
838 	flush_work(&vsock->send_pkt_work);
839 
840 	mutex_unlock(&the_virtio_vsock_mutex);
841 
842 	kfree(vsock);
843 }
844 
845 #ifdef CONFIG_PM_SLEEP
846 static int virtio_vsock_freeze(struct virtio_device *vdev)
847 {
848 	struct virtio_vsock *vsock = vdev->priv;
849 
850 	mutex_lock(&the_virtio_vsock_mutex);
851 
852 	rcu_assign_pointer(the_virtio_vsock, NULL);
853 	synchronize_rcu();
854 
855 	virtio_vsock_vqs_del(vsock);
856 
857 	mutex_unlock(&the_virtio_vsock_mutex);
858 
859 	return 0;
860 }
861 
862 static int virtio_vsock_restore(struct virtio_device *vdev)
863 {
864 	struct virtio_vsock *vsock = vdev->priv;
865 	int ret;
866 
867 	mutex_lock(&the_virtio_vsock_mutex);
868 
869 	/* Only one virtio-vsock device per guest is supported */
870 	if (rcu_dereference_protected(the_virtio_vsock,
871 				lockdep_is_held(&the_virtio_vsock_mutex))) {
872 		ret = -EBUSY;
873 		goto out;
874 	}
875 
876 	ret = virtio_vsock_vqs_init(vsock);
877 	if (ret < 0)
878 		goto out;
879 
880 	rcu_assign_pointer(the_virtio_vsock, vsock);
881 	virtio_vsock_vqs_start(vsock);
882 
883 out:
884 	mutex_unlock(&the_virtio_vsock_mutex);
885 	return ret;
886 }
887 #endif /* CONFIG_PM_SLEEP */
888 
889 static struct virtio_device_id id_table[] = {
890 	{ VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
891 	{ 0 },
892 };
893 
894 static unsigned int features[] = {
895 	VIRTIO_VSOCK_F_SEQPACKET
896 };
897 
898 static struct virtio_driver virtio_vsock_driver = {
899 	.feature_table = features,
900 	.feature_table_size = ARRAY_SIZE(features),
901 	.driver.name = KBUILD_MODNAME,
902 	.id_table = id_table,
903 	.probe = virtio_vsock_probe,
904 	.remove = virtio_vsock_remove,
905 #ifdef CONFIG_PM_SLEEP
906 	.freeze = virtio_vsock_freeze,
907 	.restore = virtio_vsock_restore,
908 #endif
909 };
910 
911 static int __init virtio_vsock_init(void)
912 {
913 	int ret;
914 
915 	virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
916 	if (!virtio_vsock_workqueue)
917 		return -ENOMEM;
918 
919 	ret = vsock_core_register(&virtio_transport.transport,
920 				  VSOCK_TRANSPORT_F_G2H);
921 	if (ret)
922 		goto out_wq;
923 
924 	ret = register_virtio_driver(&virtio_vsock_driver);
925 	if (ret)
926 		goto out_vci;
927 
928 	return 0;
929 
930 out_vci:
931 	vsock_core_unregister(&virtio_transport.transport);
932 out_wq:
933 	destroy_workqueue(virtio_vsock_workqueue);
934 	return ret;
935 }
936 
937 static void __exit virtio_vsock_exit(void)
938 {
939 	unregister_virtio_driver(&virtio_vsock_driver);
940 	vsock_core_unregister(&virtio_transport.transport);
941 	destroy_workqueue(virtio_vsock_workqueue);
942 }
943 
944 module_init(virtio_vsock_init);
945 module_exit(virtio_vsock_exit);
946 MODULE_LICENSE("GPL v2");
947 MODULE_AUTHOR("Asias He");
948 MODULE_DESCRIPTION("virtio transport for vsock");
949 MODULE_DEVICE_TABLE(virtio, id_table);
950