1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vhost transport for vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
8 */
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
14 #include <net/sock.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
18
19 #include <net/af_vsock.h>
20 #include "vhost.h"
21
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
23 /* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
28 * small pkts.
29 */
30 #define VHOST_VSOCK_PKT_WEIGHT 256
31
32 static const int vhost_vsock_bits[] = {
33 VHOST_FEATURES,
34 VIRTIO_F_ACCESS_PLATFORM,
35 VIRTIO_VSOCK_F_SEQPACKET
36 };
37
38 #define VHOST_VSOCK_FEATURES VHOST_FEATURES_U64(vhost_vsock_bits, 0)
39
40 enum {
41 VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
42 };
43
44 /* Used to track all the vhost_vsock instances on the system. */
45 static DEFINE_MUTEX(vhost_vsock_mutex);
46 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
47
48 struct vhost_vsock {
49 struct vhost_dev dev;
50 struct vhost_virtqueue vqs[2];
51 struct net *net;
52 netns_tracker ns_tracker;
53
54 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
55 struct hlist_node hash;
56
57 struct vhost_work send_pkt_work;
58 struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
59
60 atomic_t queued_replies;
61
62 u32 guest_cid;
63 bool seqpacket_allow;
64 };
65
vhost_transport_get_local_cid(void)66 static u32 vhost_transport_get_local_cid(void)
67 {
68 return VHOST_VSOCK_DEFAULT_HOST_CID;
69 }
70
71 /* Callers must be in an RCU read section or hold the vhost_vsock_mutex.
72 * The return value can only be dereferenced while within the section.
73 */
vhost_vsock_get(u32 guest_cid,struct net * net)74 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid, struct net *net)
75 {
76 struct vhost_vsock *vsock;
77
78 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid,
79 lockdep_is_held(&vhost_vsock_mutex)) {
80 u32 other_cid = vsock->guest_cid;
81
82 /* Skip instances that have no CID yet */
83 if (other_cid == 0)
84 continue;
85
86 if (other_cid == guest_cid &&
87 vsock_net_check_mode(net, vsock->net))
88 return vsock;
89 }
90
91 return NULL;
92 }
93
94 static void
vhost_transport_do_send_pkt(struct vhost_vsock * vsock,struct vhost_virtqueue * vq)95 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
96 struct vhost_virtqueue *vq)
97 {
98 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
99 int pkts = 0, total_len = 0;
100 bool added = false;
101 bool restart_tx = false;
102
103 mutex_lock(&vq->mutex);
104
105 if (!vhost_vq_get_backend(vq))
106 goto out;
107
108 if (!vq_meta_prefetch(vq))
109 goto out;
110
111 /* Avoid further vmexits, we're already processing the virtqueue */
112 vhost_disable_notify(&vsock->dev, vq);
113
114 do {
115 struct virtio_vsock_hdr *hdr;
116 size_t iov_len, payload_len;
117 struct iov_iter iov_iter;
118 u32 flags_to_restore = 0;
119 struct sk_buff *skb;
120 unsigned out, in;
121 size_t nbytes;
122 u32 offset;
123 int head;
124
125 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
126
127 if (!skb) {
128 vhost_enable_notify(&vsock->dev, vq);
129 break;
130 }
131
132 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
133 &out, &in, NULL, NULL);
134 if (head < 0) {
135 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
136 break;
137 }
138
139 if (head == vq->num) {
140 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
141 /* We cannot finish yet if more buffers snuck in while
142 * re-enabling notify.
143 */
144 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
145 vhost_disable_notify(&vsock->dev, vq);
146 continue;
147 }
148 break;
149 }
150
151 if (out) {
152 kfree_skb(skb);
153 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
154 break;
155 }
156
157 iov_len = iov_length(&vq->iov[out], in);
158 if (iov_len < sizeof(*hdr)) {
159 kfree_skb(skb);
160 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
161 break;
162 }
163
164 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
165 offset = VIRTIO_VSOCK_SKB_CB(skb)->offset;
166 payload_len = skb->len - offset;
167 hdr = virtio_vsock_hdr(skb);
168
169 /* If the packet is greater than the space available in the
170 * buffer, we split it using multiple buffers.
171 */
172 if (payload_len > iov_len - sizeof(*hdr)) {
173 payload_len = iov_len - sizeof(*hdr);
174
175 /* As we are copying pieces of large packet's buffer to
176 * small rx buffers, headers of packets in rx queue are
177 * created dynamically and are initialized with header
178 * of current packet(except length). But in case of
179 * SOCK_SEQPACKET, we also must clear message delimeter
180 * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
181 * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
182 * there will be sequence of packets with these
183 * bits set. After initialized header will be copied to
184 * rx buffer, these required bits will be restored.
185 */
186 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
187 hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
188 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
189
190 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
191 hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
192 flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
193 }
194 }
195 }
196
197 /* Set the correct length in the header */
198 hdr->len = cpu_to_le32(payload_len);
199
200 nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
201 if (nbytes != sizeof(*hdr)) {
202 kfree_skb(skb);
203 vq_err(vq, "Faulted on copying pkt hdr\n");
204 break;
205 }
206
207 if (skb_copy_datagram_iter(skb,
208 offset,
209 &iov_iter,
210 payload_len)) {
211 kfree_skb(skb);
212 vq_err(vq, "Faulted on copying pkt buf\n");
213 break;
214 }
215
216 /* Deliver to monitoring devices all packets that we
217 * will transmit.
218 */
219 virtio_transport_deliver_tap_pkt(skb);
220
221 vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
222 added = true;
223
224 VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len;
225 total_len += payload_len;
226
227 /* If we didn't send all the payload we can requeue the packet
228 * to send it with the next available buffer.
229 */
230 if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) {
231 hdr->flags |= cpu_to_le32(flags_to_restore);
232
233 /* We are queueing the same skb to handle
234 * the remaining bytes, and we want to deliver it
235 * to monitoring devices in the next iteration.
236 */
237 virtio_vsock_skb_clear_tap_delivered(skb);
238 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
239 } else {
240 if (virtio_vsock_skb_reply(skb)) {
241 int val;
242
243 val = atomic_dec_return(&vsock->queued_replies);
244
245 /* Do we have resources to resume tx
246 * processing?
247 */
248 if (val + 1 == tx_vq->num)
249 restart_tx = true;
250 }
251
252 virtio_transport_consume_skb_sent(skb, true);
253 }
254 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
255 if (added)
256 vhost_signal(&vsock->dev, vq);
257
258 out:
259 mutex_unlock(&vq->mutex);
260
261 if (restart_tx)
262 vhost_poll_queue(&tx_vq->poll);
263 }
264
vhost_transport_send_pkt_work(struct vhost_work * work)265 static void vhost_transport_send_pkt_work(struct vhost_work *work)
266 {
267 struct vhost_virtqueue *vq;
268 struct vhost_vsock *vsock;
269
270 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
271 vq = &vsock->vqs[VSOCK_VQ_RX];
272
273 vhost_transport_do_send_pkt(vsock, vq);
274 }
275
276 static int
vhost_transport_send_pkt(struct sk_buff * skb,struct net * net)277 vhost_transport_send_pkt(struct sk_buff *skb, struct net *net)
278 {
279 struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
280 struct vhost_vsock *vsock;
281 int len = skb->len;
282
283 rcu_read_lock();
284
285 /* Find the vhost_vsock according to guest context id */
286 vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid), net);
287 if (!vsock) {
288 rcu_read_unlock();
289 kfree_skb(skb);
290 return -ENODEV;
291 }
292
293 if (virtio_vsock_skb_reply(skb))
294 atomic_inc(&vsock->queued_replies);
295
296 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
297 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
298
299 rcu_read_unlock();
300 return len;
301 }
302
303 static int
vhost_transport_cancel_pkt(struct vsock_sock * vsk)304 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
305 {
306 struct vhost_vsock *vsock;
307 int cnt = 0;
308 int ret = -ENODEV;
309
310 rcu_read_lock();
311
312 /* Find the vhost_vsock according to guest context id */
313 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid,
314 sock_net(sk_vsock(vsk)));
315 if (!vsock)
316 goto out;
317
318 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
319
320 if (cnt) {
321 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
322 int new_cnt;
323
324 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
325 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
326 vhost_poll_queue(&tx_vq->poll);
327 }
328
329 ret = 0;
330 out:
331 rcu_read_unlock();
332 return ret;
333 }
334
335 static struct sk_buff *
vhost_vsock_alloc_skb(struct vhost_virtqueue * vq,unsigned int out,unsigned int in)336 vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
337 unsigned int out, unsigned int in)
338 {
339 struct virtio_vsock_hdr *hdr;
340 struct iov_iter iov_iter;
341 struct sk_buff *skb;
342 size_t payload_len;
343 size_t nbytes;
344 size_t len;
345
346 if (in != 0) {
347 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
348 return NULL;
349 }
350
351 len = iov_length(vq->iov, out);
352
353 if (len < VIRTIO_VSOCK_SKB_HEADROOM ||
354 len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
355 return NULL;
356
357 /* len contains both payload and hdr */
358 skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
359 if (!skb)
360 return NULL;
361
362 iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
363
364 hdr = virtio_vsock_hdr(skb);
365 nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
366 if (nbytes != sizeof(*hdr)) {
367 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
368 sizeof(*hdr), nbytes);
369 kfree_skb(skb);
370 return NULL;
371 }
372
373 payload_len = le32_to_cpu(hdr->len);
374
375 /* No payload */
376 if (!payload_len)
377 return skb;
378
379 /* The pkt is too big or the length in the header is invalid */
380 if (payload_len + sizeof(*hdr) > len) {
381 kfree_skb(skb);
382 return NULL;
383 }
384
385 virtio_vsock_skb_put(skb, payload_len);
386
387 if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
388 vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
389 kfree_skb(skb);
390 return NULL;
391 }
392
393 return skb;
394 }
395
396 /* Is there space left for replies to rx packets? */
vhost_vsock_more_replies(struct vhost_vsock * vsock)397 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
398 {
399 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
400 int val;
401
402 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
403 val = atomic_read(&vsock->queued_replies);
404
405 return val < vq->num;
406 }
407
vhost_transport_msgzerocopy_allow(void)408 static bool vhost_transport_msgzerocopy_allow(void)
409 {
410 return true;
411 }
412
413 static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk,
414 u32 remote_cid);
415
416 static bool
vhost_transport_stream_allow(struct vsock_sock * vsk,u32 cid,u32 port)417 vhost_transport_stream_allow(struct vsock_sock *vsk, u32 cid, u32 port)
418 {
419 return true;
420 }
421
422 static struct virtio_transport vhost_transport = {
423 .transport = {
424 .module = THIS_MODULE,
425
426 .get_local_cid = vhost_transport_get_local_cid,
427
428 .init = virtio_transport_do_socket_init,
429 .destruct = virtio_transport_destruct,
430 .release = virtio_transport_release,
431 .connect = virtio_transport_connect,
432 .shutdown = virtio_transport_shutdown,
433 .cancel_pkt = vhost_transport_cancel_pkt,
434
435 .dgram_enqueue = virtio_transport_dgram_enqueue,
436 .dgram_dequeue = virtio_transport_dgram_dequeue,
437 .dgram_bind = virtio_transport_dgram_bind,
438 .dgram_allow = virtio_transport_dgram_allow,
439
440 .stream_enqueue = virtio_transport_stream_enqueue,
441 .stream_dequeue = virtio_transport_stream_dequeue,
442 .stream_has_data = virtio_transport_stream_has_data,
443 .stream_has_space = virtio_transport_stream_has_space,
444 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
445 .stream_is_active = virtio_transport_stream_is_active,
446 .stream_allow = vhost_transport_stream_allow,
447
448 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
449 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
450 .seqpacket_allow = vhost_transport_seqpacket_allow,
451 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
452
453 .msgzerocopy_allow = vhost_transport_msgzerocopy_allow,
454
455 .notify_poll_in = virtio_transport_notify_poll_in,
456 .notify_poll_out = virtio_transport_notify_poll_out,
457 .notify_recv_init = virtio_transport_notify_recv_init,
458 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
459 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
460 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
461 .notify_send_init = virtio_transport_notify_send_init,
462 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
463 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
464 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
465 .notify_buffer_size = virtio_transport_notify_buffer_size,
466 .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
467
468 .unsent_bytes = virtio_transport_unsent_bytes,
469
470 .read_skb = virtio_transport_read_skb,
471 },
472
473 .send_pkt = vhost_transport_send_pkt,
474 };
475
vhost_transport_seqpacket_allow(struct vsock_sock * vsk,u32 remote_cid)476 static bool vhost_transport_seqpacket_allow(struct vsock_sock *vsk,
477 u32 remote_cid)
478 {
479 struct net *net = sock_net(sk_vsock(vsk));
480 struct vhost_vsock *vsock;
481 bool seqpacket_allow = false;
482
483 rcu_read_lock();
484 vsock = vhost_vsock_get(remote_cid, net);
485
486 if (vsock)
487 seqpacket_allow = vsock->seqpacket_allow;
488
489 rcu_read_unlock();
490
491 return seqpacket_allow;
492 }
493
vhost_vsock_handle_tx_kick(struct vhost_work * work)494 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
495 {
496 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
497 poll.work);
498 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
499 dev);
500 int head, pkts = 0, total_len = 0;
501 unsigned int out, in;
502 struct sk_buff *skb;
503 bool added = false;
504
505 mutex_lock(&vq->mutex);
506
507 if (!vhost_vq_get_backend(vq))
508 goto out;
509
510 if (!vq_meta_prefetch(vq))
511 goto out;
512
513 vhost_disable_notify(&vsock->dev, vq);
514 do {
515 struct virtio_vsock_hdr *hdr;
516
517 if (!vhost_vsock_more_replies(vsock)) {
518 /* Stop tx until the device processes already
519 * pending replies. Leave tx virtqueue
520 * callbacks disabled.
521 */
522 goto no_more_replies;
523 }
524
525 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
526 &out, &in, NULL, NULL);
527 if (head < 0)
528 break;
529
530 if (head == vq->num) {
531 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
532 vhost_disable_notify(&vsock->dev, vq);
533 continue;
534 }
535 break;
536 }
537
538 skb = vhost_vsock_alloc_skb(vq, out, in);
539 if (!skb) {
540 vq_err(vq, "Faulted on pkt\n");
541 continue;
542 }
543
544 total_len += sizeof(*hdr) + skb->len;
545
546 /* Deliver to monitoring devices all received packets */
547 virtio_transport_deliver_tap_pkt(skb);
548
549 hdr = virtio_vsock_hdr(skb);
550
551 /* Only accept correctly addressed packets */
552 if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
553 le64_to_cpu(hdr->dst_cid) ==
554 vhost_transport_get_local_cid())
555 virtio_transport_recv_pkt(&vhost_transport, skb,
556 vsock->net);
557 else
558 kfree_skb(skb);
559
560 vhost_add_used(vq, head, 0);
561 added = true;
562 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
563
564 no_more_replies:
565 if (added)
566 vhost_signal(&vsock->dev, vq);
567
568 out:
569 mutex_unlock(&vq->mutex);
570 }
571
vhost_vsock_handle_rx_kick(struct vhost_work * work)572 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
573 {
574 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
575 poll.work);
576 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
577 dev);
578
579 vhost_transport_do_send_pkt(vsock, vq);
580 }
581
vhost_vsock_start(struct vhost_vsock * vsock)582 static int vhost_vsock_start(struct vhost_vsock *vsock)
583 {
584 struct vhost_virtqueue *vq;
585 size_t i;
586 int ret;
587
588 mutex_lock(&vsock->dev.mutex);
589
590 ret = vhost_dev_check_owner(&vsock->dev);
591 if (ret)
592 goto err;
593
594 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
595 vq = &vsock->vqs[i];
596
597 mutex_lock(&vq->mutex);
598
599 if (!vhost_vq_access_ok(vq)) {
600 ret = -EFAULT;
601 goto err_vq;
602 }
603
604 if (!vhost_vq_get_backend(vq)) {
605 vhost_vq_set_backend(vq, vsock);
606 ret = vhost_vq_init_access(vq);
607 if (ret)
608 goto err_vq;
609 }
610
611 mutex_unlock(&vq->mutex);
612 }
613
614 /* Some packets may have been queued before the device was started,
615 * let's kick the send worker to send them.
616 */
617 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work);
618
619 mutex_unlock(&vsock->dev.mutex);
620 return 0;
621
622 err_vq:
623 vhost_vq_set_backend(vq, NULL);
624 mutex_unlock(&vq->mutex);
625
626 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
627 vq = &vsock->vqs[i];
628
629 mutex_lock(&vq->mutex);
630 vhost_vq_set_backend(vq, NULL);
631 mutex_unlock(&vq->mutex);
632 }
633 err:
634 mutex_unlock(&vsock->dev.mutex);
635 return ret;
636 }
637
vhost_vsock_stop(struct vhost_vsock * vsock,bool check_owner)638 static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
639 {
640 size_t i;
641 int ret = 0;
642
643 mutex_lock(&vsock->dev.mutex);
644
645 if (check_owner) {
646 ret = vhost_dev_check_owner(&vsock->dev);
647 if (ret)
648 goto err;
649 }
650
651 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
652 struct vhost_virtqueue *vq = &vsock->vqs[i];
653
654 mutex_lock(&vq->mutex);
655 vhost_vq_set_backend(vq, NULL);
656 mutex_unlock(&vq->mutex);
657 }
658
659 err:
660 mutex_unlock(&vsock->dev.mutex);
661 return ret;
662 }
663
vhost_vsock_free(struct vhost_vsock * vsock)664 static void vhost_vsock_free(struct vhost_vsock *vsock)
665 {
666 kvfree(vsock);
667 }
668
vhost_vsock_dev_open(struct inode * inode,struct file * file)669 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
670 {
671 struct vhost_virtqueue **vqs;
672 struct vhost_vsock *vsock;
673 struct net *net;
674 int ret;
675
676 /* This struct is large and allocation could fail, fall back to vmalloc
677 * if there is no other way.
678 */
679 vsock = kvmalloc_obj(*vsock, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
680 if (!vsock)
681 return -ENOMEM;
682
683 vqs = kmalloc_objs(*vqs, ARRAY_SIZE(vsock->vqs));
684 if (!vqs) {
685 ret = -ENOMEM;
686 goto out;
687 }
688
689 net = current->nsproxy->net_ns;
690 vsock->net = get_net_track(net, &vsock->ns_tracker, GFP_KERNEL);
691
692 vsock->guest_cid = 0; /* no CID assigned yet */
693 vsock->seqpacket_allow = false;
694
695 atomic_set(&vsock->queued_replies, 0);
696
697 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
698 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
699 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
700 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
701
702 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
703 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
704 VHOST_VSOCK_WEIGHT, true, NULL);
705
706 file->private_data = vsock;
707 skb_queue_head_init(&vsock->send_pkt_queue);
708 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
709 return 0;
710
711 out:
712 vhost_vsock_free(vsock);
713 return ret;
714 }
715
vhost_vsock_flush(struct vhost_vsock * vsock)716 static void vhost_vsock_flush(struct vhost_vsock *vsock)
717 {
718 vhost_dev_flush(&vsock->dev);
719 }
720
vhost_vsock_reset_orphans(struct sock * sk)721 static void vhost_vsock_reset_orphans(struct sock *sk)
722 {
723 struct vsock_sock *vsk = vsock_sk(sk);
724
725 /* vmci_transport.c doesn't take sk_lock here either. At least we're
726 * under vsock_table_lock so the sock cannot disappear while we're
727 * executing.
728 */
729
730 rcu_read_lock();
731
732 /* If the peer is still valid, no need to reset connection */
733 if (vhost_vsock_get(vsk->remote_addr.svm_cid, sock_net(sk))) {
734 rcu_read_unlock();
735 return;
736 }
737
738 rcu_read_unlock();
739
740 /* If the close timeout is pending, let it expire. This avoids races
741 * with the timeout callback.
742 */
743 if (vsk->close_work_scheduled)
744 return;
745
746 sock_set_flag(sk, SOCK_DONE);
747 vsk->peer_shutdown = SHUTDOWN_MASK;
748 sk->sk_state = SS_UNCONNECTED;
749 sk->sk_err = ECONNRESET;
750 sk_error_report(sk);
751 }
752
vhost_vsock_dev_release(struct inode * inode,struct file * file)753 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
754 {
755 struct vhost_vsock *vsock = file->private_data;
756
757 mutex_lock(&vhost_vsock_mutex);
758 if (vsock->guest_cid)
759 hash_del_rcu(&vsock->hash);
760 mutex_unlock(&vhost_vsock_mutex);
761
762 /* Wait for other CPUs to finish using vsock */
763 synchronize_rcu();
764
765 /* Iterating over all connections for all CIDs to find orphans is
766 * inefficient. Room for improvement here. */
767 vsock_for_each_connected_socket(&vhost_transport.transport,
768 vhost_vsock_reset_orphans);
769
770 /* Don't check the owner, because we are in the release path, so we
771 * need to stop the vsock device in any case.
772 * vhost_vsock_stop() can not fail in this case, so we don't need to
773 * check the return code.
774 */
775 vhost_vsock_stop(vsock, false);
776 vhost_vsock_flush(vsock);
777 vhost_dev_stop(&vsock->dev);
778
779 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
780
781 vhost_dev_cleanup(&vsock->dev);
782 put_net_track(vsock->net, &vsock->ns_tracker);
783 kfree(vsock->dev.vqs);
784 vhost_vsock_free(vsock);
785 return 0;
786 }
787
vhost_vsock_set_cid(struct vhost_vsock * vsock,u64 guest_cid)788 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
789 {
790 struct vhost_vsock *other;
791
792 /* Refuse reserved CIDs */
793 if (guest_cid <= VMADDR_CID_HOST ||
794 guest_cid == U32_MAX)
795 return -EINVAL;
796
797 /* 64-bit CIDs are not yet supported */
798 if (guest_cid > U32_MAX)
799 return -EINVAL;
800
801 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
802 * VM), to make the loopback work.
803 */
804 if (vsock_find_cid(guest_cid))
805 return -EADDRINUSE;
806
807 /* Refuse if CID is already in use */
808 mutex_lock(&vhost_vsock_mutex);
809 other = vhost_vsock_get(guest_cid, vsock->net);
810 if (other && other != vsock) {
811 mutex_unlock(&vhost_vsock_mutex);
812 return -EADDRINUSE;
813 }
814
815 if (vsock->guest_cid)
816 hash_del_rcu(&vsock->hash);
817
818 vsock->guest_cid = guest_cid;
819 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
820 mutex_unlock(&vhost_vsock_mutex);
821
822 return 0;
823 }
824
vhost_vsock_set_features(struct vhost_vsock * vsock,u64 features)825 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
826 {
827 struct vhost_virtqueue *vq;
828 int i;
829
830 if (features & ~VHOST_VSOCK_FEATURES)
831 return -EOPNOTSUPP;
832
833 mutex_lock(&vsock->dev.mutex);
834 if ((features & (1 << VHOST_F_LOG_ALL)) &&
835 !vhost_log_access_ok(&vsock->dev)) {
836 goto err;
837 }
838
839 if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
840 if (vhost_init_device_iotlb(&vsock->dev))
841 goto err;
842 }
843
844 vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET);
845
846 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
847 vq = &vsock->vqs[i];
848 mutex_lock(&vq->mutex);
849 vq->acked_features = features;
850 mutex_unlock(&vq->mutex);
851 }
852 mutex_unlock(&vsock->dev.mutex);
853 return 0;
854
855 err:
856 mutex_unlock(&vsock->dev.mutex);
857 return -EFAULT;
858 }
859
vhost_vsock_dev_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)860 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
861 unsigned long arg)
862 {
863 struct vhost_vsock *vsock = f->private_data;
864 void __user *argp = (void __user *)arg;
865 u64 guest_cid;
866 u64 features;
867 int start;
868 int r;
869
870 switch (ioctl) {
871 case VHOST_VSOCK_SET_GUEST_CID:
872 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
873 return -EFAULT;
874 return vhost_vsock_set_cid(vsock, guest_cid);
875 case VHOST_VSOCK_SET_RUNNING:
876 if (copy_from_user(&start, argp, sizeof(start)))
877 return -EFAULT;
878 if (start)
879 return vhost_vsock_start(vsock);
880 else
881 return vhost_vsock_stop(vsock, true);
882 case VHOST_GET_FEATURES:
883 features = VHOST_VSOCK_FEATURES;
884 if (copy_to_user(argp, &features, sizeof(features)))
885 return -EFAULT;
886 return 0;
887 case VHOST_SET_FEATURES:
888 if (copy_from_user(&features, argp, sizeof(features)))
889 return -EFAULT;
890 return vhost_vsock_set_features(vsock, features);
891 case VHOST_GET_BACKEND_FEATURES:
892 features = VHOST_VSOCK_BACKEND_FEATURES;
893 if (copy_to_user(argp, &features, sizeof(features)))
894 return -EFAULT;
895 return 0;
896 case VHOST_SET_BACKEND_FEATURES:
897 if (copy_from_user(&features, argp, sizeof(features)))
898 return -EFAULT;
899 if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
900 return -EOPNOTSUPP;
901 vhost_set_backend_features(&vsock->dev, features);
902 return 0;
903 default:
904 mutex_lock(&vsock->dev.mutex);
905 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
906 if (r == -ENOIOCTLCMD)
907 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
908 else
909 vhost_vsock_flush(vsock);
910 mutex_unlock(&vsock->dev.mutex);
911 return r;
912 }
913 }
914
vhost_vsock_chr_read_iter(struct kiocb * iocb,struct iov_iter * to)915 static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
916 {
917 struct file *file = iocb->ki_filp;
918 struct vhost_vsock *vsock = file->private_data;
919 struct vhost_dev *dev = &vsock->dev;
920 int noblock = file->f_flags & O_NONBLOCK;
921
922 return vhost_chr_read_iter(dev, to, noblock);
923 }
924
vhost_vsock_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)925 static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
926 struct iov_iter *from)
927 {
928 struct file *file = iocb->ki_filp;
929 struct vhost_vsock *vsock = file->private_data;
930 struct vhost_dev *dev = &vsock->dev;
931
932 return vhost_chr_write_iter(dev, from);
933 }
934
vhost_vsock_chr_poll(struct file * file,poll_table * wait)935 static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
936 {
937 struct vhost_vsock *vsock = file->private_data;
938 struct vhost_dev *dev = &vsock->dev;
939
940 return vhost_chr_poll(file, dev, wait);
941 }
942
943 static const struct file_operations vhost_vsock_fops = {
944 .owner = THIS_MODULE,
945 .open = vhost_vsock_dev_open,
946 .release = vhost_vsock_dev_release,
947 .llseek = noop_llseek,
948 .unlocked_ioctl = vhost_vsock_dev_ioctl,
949 .compat_ioctl = compat_ptr_ioctl,
950 .read_iter = vhost_vsock_chr_read_iter,
951 .write_iter = vhost_vsock_chr_write_iter,
952 .poll = vhost_vsock_chr_poll,
953 };
954
955 static struct miscdevice vhost_vsock_misc = {
956 .minor = VHOST_VSOCK_MINOR,
957 .name = "vhost-vsock",
958 .fops = &vhost_vsock_fops,
959 };
960
vhost_vsock_init(void)961 static int __init vhost_vsock_init(void)
962 {
963 int ret;
964
965 ret = vsock_core_register(&vhost_transport.transport,
966 VSOCK_TRANSPORT_F_H2G);
967 if (ret < 0)
968 return ret;
969
970 ret = misc_register(&vhost_vsock_misc);
971 if (ret) {
972 vsock_core_unregister(&vhost_transport.transport);
973 return ret;
974 }
975
976 return 0;
977 };
978
vhost_vsock_exit(void)979 static void __exit vhost_vsock_exit(void)
980 {
981 misc_deregister(&vhost_vsock_misc);
982 vsock_core_unregister(&vhost_transport.transport);
983 };
984
985 module_init(vhost_vsock_init);
986 module_exit(vhost_vsock_exit);
987 MODULE_LICENSE("GPL v2");
988 MODULE_AUTHOR("Asias He");
989 MODULE_DESCRIPTION("vhost transport for vsock ");
990 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
991 MODULE_ALIAS("devname:vhost-vsock");
992