1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * virtio transport for vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
8 *
9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10 * early virtio-vsock proof-of-concept bits.
11 */
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/virtio.h>
17 #include <linux/virtio_ids.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_vsock.h>
20 #include <net/sock.h>
21 #include <linux/mutex.h>
22 #include <net/af_vsock.h>
23
24 static struct workqueue_struct *virtio_vsock_workqueue;
25 static struct virtio_vsock __rcu *the_virtio_vsock;
26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
27 static struct virtio_transport virtio_transport; /* forward declaration */
28
29 struct virtio_vsock {
30 struct virtio_device *vdev;
31 struct virtqueue *vqs[VSOCK_VQ_MAX];
32
33 /* Virtqueue processing is deferred to a workqueue */
34 struct work_struct tx_work;
35 struct work_struct rx_work;
36 struct work_struct event_work;
37
38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
39 * must be accessed with tx_lock held.
40 */
41 struct mutex tx_lock;
42 bool tx_run;
43
44 struct work_struct send_pkt_work;
45 struct sk_buff_head send_pkt_queue;
46
47 atomic_t queued_replies;
48
49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
50 * must be accessed with rx_lock held.
51 */
52 struct mutex rx_lock;
53 bool rx_run;
54 int rx_buf_nr;
55 int rx_buf_max_nr;
56
57 /* The following fields are protected by event_lock.
58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
59 */
60 struct mutex event_lock;
61 bool event_run;
62 struct virtio_vsock_event event_list[8];
63
64 u32 guest_cid;
65 bool seqpacket_allow;
66
67 /* These fields are used only in tx path in function
68 * 'virtio_transport_send_pkt_work()', so to save
69 * stack space in it, place both of them here. Each
70 * pointer from 'out_sgs' points to the corresponding
71 * element in 'out_bufs' - this is initialized in
72 * 'virtio_vsock_probe()'. Both fields are protected
73 * by 'tx_lock'. +1 is needed for packet header.
74 */
75 struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
76 struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
77 };
78
virtio_transport_get_local_cid(void)79 static u32 virtio_transport_get_local_cid(void)
80 {
81 struct virtio_vsock *vsock;
82 u32 ret;
83
84 rcu_read_lock();
85 vsock = rcu_dereference(the_virtio_vsock);
86 if (!vsock) {
87 ret = VMADDR_CID_ANY;
88 goto out_rcu;
89 }
90
91 ret = vsock->guest_cid;
92 out_rcu:
93 rcu_read_unlock();
94 return ret;
95 }
96
97 /* Caller need to hold vsock->tx_lock on vq */
virtio_transport_send_skb(struct sk_buff * skb,struct virtqueue * vq,struct virtio_vsock * vsock,gfp_t gfp)98 static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq,
99 struct virtio_vsock *vsock, gfp_t gfp)
100 {
101 int ret, in_sg = 0, out_sg = 0;
102 struct scatterlist **sgs;
103
104 sgs = vsock->out_sgs;
105 sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
106 sizeof(*virtio_vsock_hdr(skb)));
107 out_sg++;
108
109 if (!skb_is_nonlinear(skb)) {
110 if (skb->len > 0) {
111 sg_init_one(sgs[out_sg], skb->data, skb->len);
112 out_sg++;
113 }
114 } else {
115 struct skb_shared_info *si;
116 int i;
117
118 /* If skb is nonlinear, then its buffer must contain
119 * only header and nothing more. Data is stored in
120 * the fragged part.
121 */
122 WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
123
124 si = skb_shinfo(skb);
125
126 for (i = 0; i < si->nr_frags; i++) {
127 skb_frag_t *skb_frag = &si->frags[i];
128 void *va;
129
130 /* We will use 'page_to_virt()' for the userspace page
131 * here, because virtio or dma-mapping layers will call
132 * 'virt_to_phys()' later to fill the buffer descriptor.
133 * We don't touch memory at "virtual" address of this page.
134 */
135 va = page_to_virt(skb_frag_page(skb_frag));
136 sg_init_one(sgs[out_sg],
137 va + skb_frag_off(skb_frag),
138 skb_frag_size(skb_frag));
139 out_sg++;
140 }
141 }
142
143 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, gfp);
144 /* Usually this means that there is no more space available in
145 * the vq
146 */
147 if (ret < 0)
148 return ret;
149
150 virtio_transport_deliver_tap_pkt(skb);
151 return 0;
152 }
153
154 static void
virtio_transport_send_pkt_work(struct work_struct * work)155 virtio_transport_send_pkt_work(struct work_struct *work)
156 {
157 struct virtio_vsock *vsock =
158 container_of(work, struct virtio_vsock, send_pkt_work);
159 struct virtqueue *vq;
160 bool added = false;
161 bool restart_rx = false;
162
163 mutex_lock(&vsock->tx_lock);
164
165 if (!vsock->tx_run)
166 goto out;
167
168 vq = vsock->vqs[VSOCK_VQ_TX];
169
170 for (;;) {
171 struct sk_buff *skb;
172 bool reply;
173 int ret;
174
175 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
176 if (!skb)
177 break;
178
179 reply = virtio_vsock_skb_reply(skb);
180
181 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_KERNEL);
182 if (ret < 0) {
183 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
184 break;
185 }
186
187 if (reply) {
188 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
189 int val;
190
191 val = atomic_dec_return(&vsock->queued_replies);
192
193 /* Do we now have resources to resume rx processing? */
194 if (val + 1 == virtqueue_get_vring_size(rx_vq))
195 restart_rx = true;
196 }
197
198 added = true;
199 }
200
201 if (added)
202 virtqueue_kick(vq);
203
204 out:
205 mutex_unlock(&vsock->tx_lock);
206
207 if (restart_rx)
208 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
209 }
210
211 /* Caller need to hold RCU for vsock.
212 * Returns 0 if the packet is successfully put on the vq.
213 */
virtio_transport_send_skb_fast_path(struct virtio_vsock * vsock,struct sk_buff * skb)214 static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb)
215 {
216 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
217 int ret;
218
219 /* Inside RCU, can't sleep! */
220 ret = mutex_trylock(&vsock->tx_lock);
221 if (unlikely(ret == 0))
222 return -EBUSY;
223
224 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_ATOMIC);
225 if (ret == 0)
226 virtqueue_kick(vq);
227
228 mutex_unlock(&vsock->tx_lock);
229
230 return ret;
231 }
232
233 static int
virtio_transport_send_pkt(struct sk_buff * skb)234 virtio_transport_send_pkt(struct sk_buff *skb)
235 {
236 struct virtio_vsock_hdr *hdr;
237 struct virtio_vsock *vsock;
238 int len = skb->len;
239
240 hdr = virtio_vsock_hdr(skb);
241
242 rcu_read_lock();
243 vsock = rcu_dereference(the_virtio_vsock);
244 if (!vsock) {
245 kfree_skb(skb);
246 len = -ENODEV;
247 goto out_rcu;
248 }
249
250 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
251 kfree_skb(skb);
252 len = -ENODEV;
253 goto out_rcu;
254 }
255
256 /* If send_pkt_queue is empty, we can safely bypass this queue
257 * because packet order is maintained and (try) to put the packet
258 * on the virtqueue using virtio_transport_send_skb_fast_path.
259 * If this fails we simply put the packet on the intermediate
260 * queue and schedule the worker.
261 */
262 if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) ||
263 virtio_transport_send_skb_fast_path(vsock, skb)) {
264 if (virtio_vsock_skb_reply(skb))
265 atomic_inc(&vsock->queued_replies);
266
267 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
268 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
269 }
270
271 out_rcu:
272 rcu_read_unlock();
273 return len;
274 }
275
276 static int
virtio_transport_cancel_pkt(struct vsock_sock * vsk)277 virtio_transport_cancel_pkt(struct vsock_sock *vsk)
278 {
279 struct virtio_vsock *vsock;
280 int cnt = 0, ret;
281
282 rcu_read_lock();
283 vsock = rcu_dereference(the_virtio_vsock);
284 if (!vsock) {
285 ret = -ENODEV;
286 goto out_rcu;
287 }
288
289 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
290
291 if (cnt) {
292 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
293 int new_cnt;
294
295 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
296 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
297 new_cnt < virtqueue_get_vring_size(rx_vq))
298 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
299 }
300
301 ret = 0;
302
303 out_rcu:
304 rcu_read_unlock();
305 return ret;
306 }
307
virtio_vsock_rx_fill(struct virtio_vsock * vsock)308 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
309 {
310 int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
311 struct scatterlist pkt, *p;
312 struct virtqueue *vq;
313 struct sk_buff *skb;
314 int ret;
315
316 vq = vsock->vqs[VSOCK_VQ_RX];
317
318 do {
319 skb = virtio_vsock_alloc_linear_skb(total_len, GFP_KERNEL);
320 if (!skb)
321 break;
322
323 memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
324 sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
325 p = &pkt;
326 ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
327 if (ret < 0) {
328 kfree_skb(skb);
329 break;
330 }
331
332 vsock->rx_buf_nr++;
333 } while (vq->num_free);
334 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
335 vsock->rx_buf_max_nr = vsock->rx_buf_nr;
336 virtqueue_kick(vq);
337 }
338
virtio_transport_tx_work(struct work_struct * work)339 static void virtio_transport_tx_work(struct work_struct *work)
340 {
341 struct virtio_vsock *vsock =
342 container_of(work, struct virtio_vsock, tx_work);
343 struct virtqueue *vq;
344 bool added = false;
345
346 vq = vsock->vqs[VSOCK_VQ_TX];
347 mutex_lock(&vsock->tx_lock);
348
349 if (!vsock->tx_run)
350 goto out;
351
352 do {
353 struct sk_buff *skb;
354 unsigned int len;
355
356 virtqueue_disable_cb(vq);
357 while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
358 virtio_transport_consume_skb_sent(skb, true);
359 added = true;
360 }
361 } while (!virtqueue_enable_cb(vq));
362
363 out:
364 mutex_unlock(&vsock->tx_lock);
365
366 if (added)
367 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
368 }
369
370 /* Is there space left for replies to rx packets? */
virtio_transport_more_replies(struct virtio_vsock * vsock)371 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
372 {
373 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
374 int val;
375
376 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
377 val = atomic_read(&vsock->queued_replies);
378
379 return val < virtqueue_get_vring_size(vq);
380 }
381
382 /* event_lock must be held */
virtio_vsock_event_fill_one(struct virtio_vsock * vsock,struct virtio_vsock_event * event)383 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
384 struct virtio_vsock_event *event)
385 {
386 struct scatterlist sg;
387 struct virtqueue *vq;
388
389 vq = vsock->vqs[VSOCK_VQ_EVENT];
390
391 sg_init_one(&sg, event, sizeof(*event));
392
393 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
394 }
395
396 /* event_lock must be held */
virtio_vsock_event_fill(struct virtio_vsock * vsock)397 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
398 {
399 size_t i;
400
401 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
402 struct virtio_vsock_event *event = &vsock->event_list[i];
403
404 virtio_vsock_event_fill_one(vsock, event);
405 }
406
407 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
408 }
409
virtio_vsock_reset_sock(struct sock * sk)410 static void virtio_vsock_reset_sock(struct sock *sk)
411 {
412 /* vmci_transport.c doesn't take sk_lock here either. At least we're
413 * under vsock_table_lock so the sock cannot disappear while we're
414 * executing.
415 */
416
417 sk->sk_state = TCP_CLOSE;
418 sk->sk_err = ECONNRESET;
419 sk_error_report(sk);
420 }
421
virtio_vsock_update_guest_cid(struct virtio_vsock * vsock)422 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
423 {
424 struct virtio_device *vdev = vsock->vdev;
425 __le64 guest_cid;
426
427 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
428 &guest_cid, sizeof(guest_cid));
429 vsock->guest_cid = le64_to_cpu(guest_cid);
430 }
431
432 /* event_lock must be held */
virtio_vsock_event_handle(struct virtio_vsock * vsock,struct virtio_vsock_event * event)433 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
434 struct virtio_vsock_event *event)
435 {
436 switch (le32_to_cpu(event->id)) {
437 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
438 virtio_vsock_update_guest_cid(vsock);
439 vsock_for_each_connected_socket(&virtio_transport.transport,
440 virtio_vsock_reset_sock);
441 break;
442 }
443 }
444
virtio_transport_event_work(struct work_struct * work)445 static void virtio_transport_event_work(struct work_struct *work)
446 {
447 struct virtio_vsock *vsock =
448 container_of(work, struct virtio_vsock, event_work);
449 struct virtqueue *vq;
450
451 vq = vsock->vqs[VSOCK_VQ_EVENT];
452
453 mutex_lock(&vsock->event_lock);
454
455 if (!vsock->event_run)
456 goto out;
457
458 do {
459 struct virtio_vsock_event *event;
460 unsigned int len;
461
462 virtqueue_disable_cb(vq);
463 while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
464 if (len == sizeof(*event))
465 virtio_vsock_event_handle(vsock, event);
466
467 virtio_vsock_event_fill_one(vsock, event);
468 }
469 } while (!virtqueue_enable_cb(vq));
470
471 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
472 out:
473 mutex_unlock(&vsock->event_lock);
474 }
475
virtio_vsock_event_done(struct virtqueue * vq)476 static void virtio_vsock_event_done(struct virtqueue *vq)
477 {
478 struct virtio_vsock *vsock = vq->vdev->priv;
479
480 if (!vsock)
481 return;
482 queue_work(virtio_vsock_workqueue, &vsock->event_work);
483 }
484
virtio_vsock_tx_done(struct virtqueue * vq)485 static void virtio_vsock_tx_done(struct virtqueue *vq)
486 {
487 struct virtio_vsock *vsock = vq->vdev->priv;
488
489 if (!vsock)
490 return;
491 queue_work(virtio_vsock_workqueue, &vsock->tx_work);
492 }
493
virtio_vsock_rx_done(struct virtqueue * vq)494 static void virtio_vsock_rx_done(struct virtqueue *vq)
495 {
496 struct virtio_vsock *vsock = vq->vdev->priv;
497
498 if (!vsock)
499 return;
500 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
501 }
502
virtio_transport_can_msgzerocopy(int bufs_num)503 static bool virtio_transport_can_msgzerocopy(int bufs_num)
504 {
505 struct virtio_vsock *vsock;
506 bool res = false;
507
508 rcu_read_lock();
509
510 vsock = rcu_dereference(the_virtio_vsock);
511 if (vsock) {
512 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];
513
514 /* Check that tx queue is large enough to keep whole
515 * data to send. This is needed, because when there is
516 * not enough free space in the queue, current skb to
517 * send will be reinserted to the head of tx list of
518 * the socket to retry transmission later, so if skb
519 * is bigger than whole queue, it will be reinserted
520 * again and again, thus blocking other skbs to be sent.
521 * Each page of the user provided buffer will be added
522 * as a single buffer to the tx virtqueue, so compare
523 * number of pages against maximum capacity of the queue.
524 */
525 if (bufs_num <= vq->num_max)
526 res = true;
527 }
528
529 rcu_read_unlock();
530
531 return res;
532 }
533
virtio_transport_msgzerocopy_allow(void)534 static bool virtio_transport_msgzerocopy_allow(void)
535 {
536 return true;
537 }
538
539 static bool virtio_transport_seqpacket_allow(u32 remote_cid);
540
541 static struct virtio_transport virtio_transport = {
542 .transport = {
543 .module = THIS_MODULE,
544
545 .get_local_cid = virtio_transport_get_local_cid,
546
547 .init = virtio_transport_do_socket_init,
548 .destruct = virtio_transport_destruct,
549 .release = virtio_transport_release,
550 .connect = virtio_transport_connect,
551 .shutdown = virtio_transport_shutdown,
552 .cancel_pkt = virtio_transport_cancel_pkt,
553
554 .dgram_bind = virtio_transport_dgram_bind,
555 .dgram_dequeue = virtio_transport_dgram_dequeue,
556 .dgram_enqueue = virtio_transport_dgram_enqueue,
557 .dgram_allow = virtio_transport_dgram_allow,
558
559 .stream_dequeue = virtio_transport_stream_dequeue,
560 .stream_enqueue = virtio_transport_stream_enqueue,
561 .stream_has_data = virtio_transport_stream_has_data,
562 .stream_has_space = virtio_transport_stream_has_space,
563 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
564 .stream_is_active = virtio_transport_stream_is_active,
565 .stream_allow = virtio_transport_stream_allow,
566
567 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
568 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
569 .seqpacket_allow = virtio_transport_seqpacket_allow,
570 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
571
572 .msgzerocopy_allow = virtio_transport_msgzerocopy_allow,
573
574 .notify_poll_in = virtio_transport_notify_poll_in,
575 .notify_poll_out = virtio_transport_notify_poll_out,
576 .notify_recv_init = virtio_transport_notify_recv_init,
577 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
578 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
579 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
580 .notify_send_init = virtio_transport_notify_send_init,
581 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
582 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
583 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
584 .notify_buffer_size = virtio_transport_notify_buffer_size,
585 .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
586
587 .unsent_bytes = virtio_transport_unsent_bytes,
588
589 .read_skb = virtio_transport_read_skb,
590 },
591
592 .send_pkt = virtio_transport_send_pkt,
593 .can_msgzerocopy = virtio_transport_can_msgzerocopy,
594 };
595
virtio_transport_seqpacket_allow(u32 remote_cid)596 static bool virtio_transport_seqpacket_allow(u32 remote_cid)
597 {
598 struct virtio_vsock *vsock;
599 bool seqpacket_allow;
600
601 seqpacket_allow = false;
602 rcu_read_lock();
603 vsock = rcu_dereference(the_virtio_vsock);
604 if (vsock)
605 seqpacket_allow = vsock->seqpacket_allow;
606 rcu_read_unlock();
607
608 return seqpacket_allow;
609 }
610
virtio_transport_rx_work(struct work_struct * work)611 static void virtio_transport_rx_work(struct work_struct *work)
612 {
613 struct virtio_vsock *vsock =
614 container_of(work, struct virtio_vsock, rx_work);
615 struct virtqueue *vq;
616
617 vq = vsock->vqs[VSOCK_VQ_RX];
618
619 mutex_lock(&vsock->rx_lock);
620
621 if (!vsock->rx_run)
622 goto out;
623
624 do {
625 virtqueue_disable_cb(vq);
626 for (;;) {
627 unsigned int len, payload_len;
628 struct virtio_vsock_hdr *hdr;
629 struct sk_buff *skb;
630
631 if (!virtio_transport_more_replies(vsock)) {
632 /* Stop rx until the device processes already
633 * pending replies. Leave rx virtqueue
634 * callbacks disabled.
635 */
636 goto out;
637 }
638
639 skb = virtqueue_get_buf(vq, &len);
640 if (!skb)
641 break;
642
643 vsock->rx_buf_nr--;
644
645 /* Drop short/long packets */
646 if (unlikely(len < sizeof(*hdr) ||
647 len > virtio_vsock_skb_len(skb))) {
648 kfree_skb(skb);
649 continue;
650 }
651
652 hdr = virtio_vsock_hdr(skb);
653 payload_len = le32_to_cpu(hdr->len);
654 if (unlikely(payload_len > len - sizeof(*hdr))) {
655 kfree_skb(skb);
656 continue;
657 }
658
659 if (payload_len)
660 virtio_vsock_skb_put(skb, payload_len);
661
662 virtio_transport_deliver_tap_pkt(skb);
663 virtio_transport_recv_pkt(&virtio_transport, skb);
664 }
665 } while (!virtqueue_enable_cb(vq));
666
667 out:
668 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
669 virtio_vsock_rx_fill(vsock);
670 mutex_unlock(&vsock->rx_lock);
671 }
672
virtio_vsock_vqs_init(struct virtio_vsock * vsock)673 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
674 {
675 struct virtio_device *vdev = vsock->vdev;
676 struct virtqueue_info vqs_info[] = {
677 { "rx", virtio_vsock_rx_done },
678 { "tx", virtio_vsock_tx_done },
679 { "event", virtio_vsock_event_done },
680 };
681 int ret;
682
683 mutex_lock(&vsock->rx_lock);
684 vsock->rx_buf_nr = 0;
685 vsock->rx_buf_max_nr = 0;
686 mutex_unlock(&vsock->rx_lock);
687
688 atomic_set(&vsock->queued_replies, 0);
689
690 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL);
691 if (ret < 0)
692 return ret;
693
694 virtio_vsock_update_guest_cid(vsock);
695
696 virtio_device_ready(vdev);
697
698 return 0;
699 }
700
virtio_vsock_vqs_start(struct virtio_vsock * vsock)701 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
702 {
703 mutex_lock(&vsock->tx_lock);
704 vsock->tx_run = true;
705 mutex_unlock(&vsock->tx_lock);
706
707 mutex_lock(&vsock->rx_lock);
708 virtio_vsock_rx_fill(vsock);
709 vsock->rx_run = true;
710 mutex_unlock(&vsock->rx_lock);
711
712 mutex_lock(&vsock->event_lock);
713 virtio_vsock_event_fill(vsock);
714 vsock->event_run = true;
715 mutex_unlock(&vsock->event_lock);
716
717 /* virtio_transport_send_pkt() can queue packets once
718 * the_virtio_vsock is set, but they won't be processed until
719 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
720 * when initialization finishes to send those packets queued
721 * earlier.
722 * We don't need to queue the other workers (rx, event) because
723 * as long as we don't fill the queues with empty buffers, the
724 * host can't send us any notification.
725 */
726 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
727 }
728
virtio_vsock_vqs_del(struct virtio_vsock * vsock)729 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
730 {
731 struct virtio_device *vdev = vsock->vdev;
732 struct sk_buff *skb;
733
734 /* Reset all connected sockets when the VQs disappear */
735 vsock_for_each_connected_socket(&virtio_transport.transport,
736 virtio_vsock_reset_sock);
737
738 /* Stop all work handlers to make sure no one is accessing the device,
739 * so we can safely call virtio_reset_device().
740 */
741 mutex_lock(&vsock->rx_lock);
742 vsock->rx_run = false;
743 mutex_unlock(&vsock->rx_lock);
744
745 mutex_lock(&vsock->tx_lock);
746 vsock->tx_run = false;
747 mutex_unlock(&vsock->tx_lock);
748
749 mutex_lock(&vsock->event_lock);
750 vsock->event_run = false;
751 mutex_unlock(&vsock->event_lock);
752
753 /* Flush all device writes and interrupts, device will not use any
754 * more buffers.
755 */
756 virtio_reset_device(vdev);
757
758 mutex_lock(&vsock->rx_lock);
759 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
760 kfree_skb(skb);
761 mutex_unlock(&vsock->rx_lock);
762
763 mutex_lock(&vsock->tx_lock);
764 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
765 kfree_skb(skb);
766 mutex_unlock(&vsock->tx_lock);
767
768 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
769
770 /* Delete virtqueues and flush outstanding callbacks if any */
771 vdev->config->del_vqs(vdev);
772 }
773
virtio_vsock_probe(struct virtio_device * vdev)774 static int virtio_vsock_probe(struct virtio_device *vdev)
775 {
776 struct virtio_vsock *vsock = NULL;
777 int ret;
778 int i;
779
780 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
781 if (ret)
782 return ret;
783
784 /* Only one virtio-vsock device per guest is supported */
785 if (rcu_dereference_protected(the_virtio_vsock,
786 lockdep_is_held(&the_virtio_vsock_mutex))) {
787 ret = -EBUSY;
788 goto out;
789 }
790
791 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
792 if (!vsock) {
793 ret = -ENOMEM;
794 goto out;
795 }
796
797 vsock->vdev = vdev;
798
799
800 mutex_init(&vsock->tx_lock);
801 mutex_init(&vsock->rx_lock);
802 mutex_init(&vsock->event_lock);
803 skb_queue_head_init(&vsock->send_pkt_queue);
804 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
805 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
806 INIT_WORK(&vsock->event_work, virtio_transport_event_work);
807 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
808
809 if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
810 vsock->seqpacket_allow = true;
811
812 vdev->priv = vsock;
813
814 ret = virtio_vsock_vqs_init(vsock);
815 if (ret < 0)
816 goto out;
817
818 for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
819 vsock->out_sgs[i] = &vsock->out_bufs[i];
820
821 rcu_assign_pointer(the_virtio_vsock, vsock);
822 virtio_vsock_vqs_start(vsock);
823
824 mutex_unlock(&the_virtio_vsock_mutex);
825
826 return 0;
827
828 out:
829 kfree(vsock);
830 mutex_unlock(&the_virtio_vsock_mutex);
831 return ret;
832 }
833
virtio_vsock_remove(struct virtio_device * vdev)834 static void virtio_vsock_remove(struct virtio_device *vdev)
835 {
836 struct virtio_vsock *vsock = vdev->priv;
837
838 mutex_lock(&the_virtio_vsock_mutex);
839
840 vdev->priv = NULL;
841 rcu_assign_pointer(the_virtio_vsock, NULL);
842 synchronize_rcu();
843
844 virtio_vsock_vqs_del(vsock);
845
846 /* Other works can be queued before 'config->del_vqs()', so we flush
847 * all works before to free the vsock object to avoid use after free.
848 */
849 flush_work(&vsock->rx_work);
850 flush_work(&vsock->tx_work);
851 flush_work(&vsock->event_work);
852 flush_work(&vsock->send_pkt_work);
853
854 mutex_unlock(&the_virtio_vsock_mutex);
855
856 kfree(vsock);
857 }
858
859 #ifdef CONFIG_PM_SLEEP
virtio_vsock_freeze(struct virtio_device * vdev)860 static int virtio_vsock_freeze(struct virtio_device *vdev)
861 {
862 struct virtio_vsock *vsock = vdev->priv;
863
864 mutex_lock(&the_virtio_vsock_mutex);
865
866 rcu_assign_pointer(the_virtio_vsock, NULL);
867 synchronize_rcu();
868
869 virtio_vsock_vqs_del(vsock);
870
871 mutex_unlock(&the_virtio_vsock_mutex);
872
873 return 0;
874 }
875
virtio_vsock_restore(struct virtio_device * vdev)876 static int virtio_vsock_restore(struct virtio_device *vdev)
877 {
878 struct virtio_vsock *vsock = vdev->priv;
879 int ret;
880
881 mutex_lock(&the_virtio_vsock_mutex);
882
883 /* Only one virtio-vsock device per guest is supported */
884 if (rcu_dereference_protected(the_virtio_vsock,
885 lockdep_is_held(&the_virtio_vsock_mutex))) {
886 ret = -EBUSY;
887 goto out;
888 }
889
890 ret = virtio_vsock_vqs_init(vsock);
891 if (ret < 0)
892 goto out;
893
894 rcu_assign_pointer(the_virtio_vsock, vsock);
895 virtio_vsock_vqs_start(vsock);
896
897 out:
898 mutex_unlock(&the_virtio_vsock_mutex);
899 return ret;
900 }
901 #endif /* CONFIG_PM_SLEEP */
902
903 static struct virtio_device_id id_table[] = {
904 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
905 { 0 },
906 };
907
908 static unsigned int features[] = {
909 VIRTIO_VSOCK_F_SEQPACKET
910 };
911
912 static struct virtio_driver virtio_vsock_driver = {
913 .feature_table = features,
914 .feature_table_size = ARRAY_SIZE(features),
915 .driver.name = KBUILD_MODNAME,
916 .id_table = id_table,
917 .probe = virtio_vsock_probe,
918 .remove = virtio_vsock_remove,
919 #ifdef CONFIG_PM_SLEEP
920 .freeze = virtio_vsock_freeze,
921 .restore = virtio_vsock_restore,
922 #endif
923 };
924
virtio_vsock_init(void)925 static int __init virtio_vsock_init(void)
926 {
927 int ret;
928
929 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
930 if (!virtio_vsock_workqueue)
931 return -ENOMEM;
932
933 ret = vsock_core_register(&virtio_transport.transport,
934 VSOCK_TRANSPORT_F_G2H);
935 if (ret)
936 goto out_wq;
937
938 ret = register_virtio_driver(&virtio_vsock_driver);
939 if (ret)
940 goto out_vci;
941
942 return 0;
943
944 out_vci:
945 vsock_core_unregister(&virtio_transport.transport);
946 out_wq:
947 destroy_workqueue(virtio_vsock_workqueue);
948 return ret;
949 }
950
virtio_vsock_exit(void)951 static void __exit virtio_vsock_exit(void)
952 {
953 unregister_virtio_driver(&virtio_vsock_driver);
954 vsock_core_unregister(&virtio_transport.transport);
955 destroy_workqueue(virtio_vsock_workqueue);
956 }
957
958 module_init(virtio_vsock_init);
959 module_exit(virtio_vsock_exit);
960 MODULE_LICENSE("GPL v2");
961 MODULE_AUTHOR("Asias He");
962 MODULE_DESCRIPTION("virtio transport for vsock");
963 MODULE_DEVICE_TABLE(virtio, id_table);
964