1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * virtio transport for vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 * 9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s 10 * early virtio-vsock proof-of-concept bits. 11 */ 12 #include <linux/spinlock.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/atomic.h> 16 #include <linux/virtio.h> 17 #include <linux/virtio_ids.h> 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_vsock.h> 20 #include <net/sock.h> 21 #include <linux/mutex.h> 22 #include <net/af_vsock.h> 23 24 static struct workqueue_struct *virtio_vsock_workqueue; 25 static struct virtio_vsock __rcu *the_virtio_vsock; 26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ 27 static struct virtio_transport virtio_transport; /* forward declaration */ 28 29 struct virtio_vsock { 30 struct virtio_device *vdev; 31 struct virtqueue *vqs[VSOCK_VQ_MAX]; 32 33 /* Virtqueue processing is deferred to a workqueue */ 34 struct work_struct tx_work; 35 struct work_struct rx_work; 36 struct work_struct event_work; 37 38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] 39 * must be accessed with tx_lock held. 40 */ 41 struct mutex tx_lock; 42 bool tx_run; 43 44 struct work_struct send_pkt_work; 45 struct sk_buff_head send_pkt_queue; 46 47 atomic_t queued_replies; 48 49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] 50 * must be accessed with rx_lock held. 51 */ 52 struct mutex rx_lock; 53 bool rx_run; 54 int rx_buf_nr; 55 int rx_buf_max_nr; 56 57 /* The following fields are protected by event_lock. 58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. 59 */ 60 struct mutex event_lock; 61 bool event_run; 62 struct virtio_vsock_event event_list[8]; 63 64 u32 guest_cid; 65 bool seqpacket_allow; 66 67 /* These fields are used only in tx path in function 68 * 'virtio_transport_send_pkt_work()', so to save 69 * stack space in it, place both of them here. Each 70 * pointer from 'out_sgs' points to the corresponding 71 * element in 'out_bufs' - this is initialized in 72 * 'virtio_vsock_probe()'. Both fields are protected 73 * by 'tx_lock'. +1 is needed for packet header. 74 */ 75 struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1]; 76 struct scatterlist out_bufs[MAX_SKB_FRAGS + 1]; 77 }; 78 79 static u32 virtio_transport_get_local_cid(void) 80 { 81 struct virtio_vsock *vsock; 82 u32 ret; 83 84 rcu_read_lock(); 85 vsock = rcu_dereference(the_virtio_vsock); 86 if (!vsock) { 87 ret = VMADDR_CID_ANY; 88 goto out_rcu; 89 } 90 91 ret = vsock->guest_cid; 92 out_rcu: 93 rcu_read_unlock(); 94 return ret; 95 } 96 97 /* Caller need to hold vsock->tx_lock on vq */ 98 static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq, 99 struct virtio_vsock *vsock, gfp_t gfp) 100 { 101 int ret, in_sg = 0, out_sg = 0; 102 struct scatterlist **sgs; 103 104 sgs = vsock->out_sgs; 105 sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb), 106 sizeof(*virtio_vsock_hdr(skb))); 107 out_sg++; 108 109 if (!skb_is_nonlinear(skb)) { 110 if (skb->len > 0) { 111 sg_init_one(sgs[out_sg], skb->data, skb->len); 112 out_sg++; 113 } 114 } else { 115 struct skb_shared_info *si; 116 int i; 117 118 /* If skb is nonlinear, then its buffer must contain 119 * only header and nothing more. Data is stored in 120 * the fragged part. 121 */ 122 WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb))); 123 124 si = skb_shinfo(skb); 125 126 for (i = 0; i < si->nr_frags; i++) { 127 skb_frag_t *skb_frag = &si->frags[i]; 128 void *va; 129 130 /* We will use 'page_to_virt()' for the userspace page 131 * here, because virtio or dma-mapping layers will call 132 * 'virt_to_phys()' later to fill the buffer descriptor. 133 * We don't touch memory at "virtual" address of this page. 134 */ 135 va = page_to_virt(skb_frag_page(skb_frag)); 136 sg_init_one(sgs[out_sg], 137 va + skb_frag_off(skb_frag), 138 skb_frag_size(skb_frag)); 139 out_sg++; 140 } 141 } 142 143 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, gfp); 144 /* Usually this means that there is no more space available in 145 * the vq 146 */ 147 if (ret < 0) 148 return ret; 149 150 virtio_transport_deliver_tap_pkt(skb); 151 return 0; 152 } 153 154 static void 155 virtio_transport_send_pkt_work(struct work_struct *work) 156 { 157 struct virtio_vsock *vsock = 158 container_of(work, struct virtio_vsock, send_pkt_work); 159 struct virtqueue *vq; 160 bool added = false; 161 bool restart_rx = false; 162 163 mutex_lock(&vsock->tx_lock); 164 165 if (!vsock->tx_run) 166 goto out; 167 168 vq = vsock->vqs[VSOCK_VQ_TX]; 169 170 for (;;) { 171 struct sk_buff *skb; 172 bool reply; 173 int ret; 174 175 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); 176 if (!skb) 177 break; 178 179 reply = virtio_vsock_skb_reply(skb); 180 181 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_KERNEL); 182 if (ret < 0) { 183 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); 184 break; 185 } 186 187 if (reply) { 188 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 189 int val; 190 191 val = atomic_dec_return(&vsock->queued_replies); 192 193 /* Do we now have resources to resume rx processing? */ 194 if (val + 1 == virtqueue_get_vring_size(rx_vq)) 195 restart_rx = true; 196 } 197 198 added = true; 199 } 200 201 if (added) 202 virtqueue_kick(vq); 203 204 out: 205 mutex_unlock(&vsock->tx_lock); 206 207 if (restart_rx) 208 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 209 } 210 211 /* Caller need to hold RCU for vsock. 212 * Returns 0 if the packet is successfully put on the vq. 213 */ 214 static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb) 215 { 216 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; 217 int ret; 218 219 /* Inside RCU, can't sleep! */ 220 ret = mutex_trylock(&vsock->tx_lock); 221 if (unlikely(ret == 0)) 222 return -EBUSY; 223 224 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_ATOMIC); 225 if (ret == 0) 226 virtqueue_kick(vq); 227 228 mutex_unlock(&vsock->tx_lock); 229 230 return ret; 231 } 232 233 static int 234 virtio_transport_send_pkt(struct sk_buff *skb) 235 { 236 struct virtio_vsock_hdr *hdr; 237 struct virtio_vsock *vsock; 238 int len = skb->len; 239 240 hdr = virtio_vsock_hdr(skb); 241 242 rcu_read_lock(); 243 vsock = rcu_dereference(the_virtio_vsock); 244 if (!vsock) { 245 kfree_skb(skb); 246 len = -ENODEV; 247 goto out_rcu; 248 } 249 250 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { 251 kfree_skb(skb); 252 len = -ENODEV; 253 goto out_rcu; 254 } 255 256 /* If send_pkt_queue is empty, we can safely bypass this queue 257 * because packet order is maintained and (try) to put the packet 258 * on the virtqueue using virtio_transport_send_skb_fast_path. 259 * If this fails we simply put the packet on the intermediate 260 * queue and schedule the worker. 261 */ 262 if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) || 263 virtio_transport_send_skb_fast_path(vsock, skb)) { 264 if (virtio_vsock_skb_reply(skb)) 265 atomic_inc(&vsock->queued_replies); 266 267 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); 268 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 269 } 270 271 out_rcu: 272 rcu_read_unlock(); 273 return len; 274 } 275 276 static int 277 virtio_transport_cancel_pkt(struct vsock_sock *vsk) 278 { 279 struct virtio_vsock *vsock; 280 int cnt = 0, ret; 281 282 rcu_read_lock(); 283 vsock = rcu_dereference(the_virtio_vsock); 284 if (!vsock) { 285 ret = -ENODEV; 286 goto out_rcu; 287 } 288 289 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); 290 291 if (cnt) { 292 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 293 int new_cnt; 294 295 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 296 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && 297 new_cnt < virtqueue_get_vring_size(rx_vq)) 298 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 299 } 300 301 ret = 0; 302 303 out_rcu: 304 rcu_read_unlock(); 305 return ret; 306 } 307 308 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 309 { 310 int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; 311 struct scatterlist pkt, *p; 312 struct virtqueue *vq; 313 struct sk_buff *skb; 314 int ret; 315 316 vq = vsock->vqs[VSOCK_VQ_RX]; 317 318 do { 319 skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL); 320 if (!skb) 321 break; 322 323 memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM); 324 sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len); 325 p = &pkt; 326 ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL); 327 if (ret < 0) { 328 kfree_skb(skb); 329 break; 330 } 331 332 vsock->rx_buf_nr++; 333 } while (vq->num_free); 334 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) 335 vsock->rx_buf_max_nr = vsock->rx_buf_nr; 336 virtqueue_kick(vq); 337 } 338 339 static void virtio_transport_tx_work(struct work_struct *work) 340 { 341 struct virtio_vsock *vsock = 342 container_of(work, struct virtio_vsock, tx_work); 343 struct virtqueue *vq; 344 bool added = false; 345 346 vq = vsock->vqs[VSOCK_VQ_TX]; 347 mutex_lock(&vsock->tx_lock); 348 349 if (!vsock->tx_run) 350 goto out; 351 352 do { 353 struct sk_buff *skb; 354 unsigned int len; 355 356 virtqueue_disable_cb(vq); 357 while ((skb = virtqueue_get_buf(vq, &len)) != NULL) { 358 virtio_transport_consume_skb_sent(skb, true); 359 added = true; 360 } 361 } while (!virtqueue_enable_cb(vq)); 362 363 out: 364 mutex_unlock(&vsock->tx_lock); 365 366 if (added) 367 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 368 } 369 370 /* Is there space left for replies to rx packets? */ 371 static bool virtio_transport_more_replies(struct virtio_vsock *vsock) 372 { 373 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; 374 int val; 375 376 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ 377 val = atomic_read(&vsock->queued_replies); 378 379 return val < virtqueue_get_vring_size(vq); 380 } 381 382 /* event_lock must be held */ 383 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, 384 struct virtio_vsock_event *event) 385 { 386 struct scatterlist sg; 387 struct virtqueue *vq; 388 389 vq = vsock->vqs[VSOCK_VQ_EVENT]; 390 391 sg_init_one(&sg, event, sizeof(*event)); 392 393 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); 394 } 395 396 /* event_lock must be held */ 397 static void virtio_vsock_event_fill(struct virtio_vsock *vsock) 398 { 399 size_t i; 400 401 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { 402 struct virtio_vsock_event *event = &vsock->event_list[i]; 403 404 virtio_vsock_event_fill_one(vsock, event); 405 } 406 407 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 408 } 409 410 static void virtio_vsock_reset_sock(struct sock *sk) 411 { 412 /* vmci_transport.c doesn't take sk_lock here either. At least we're 413 * under vsock_table_lock so the sock cannot disappear while we're 414 * executing. 415 */ 416 417 sk->sk_state = TCP_CLOSE; 418 sk->sk_err = ECONNRESET; 419 sk_error_report(sk); 420 } 421 422 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) 423 { 424 struct virtio_device *vdev = vsock->vdev; 425 __le64 guest_cid; 426 427 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), 428 &guest_cid, sizeof(guest_cid)); 429 vsock->guest_cid = le64_to_cpu(guest_cid); 430 } 431 432 /* event_lock must be held */ 433 static void virtio_vsock_event_handle(struct virtio_vsock *vsock, 434 struct virtio_vsock_event *event) 435 { 436 switch (le32_to_cpu(event->id)) { 437 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: 438 virtio_vsock_update_guest_cid(vsock); 439 vsock_for_each_connected_socket(&virtio_transport.transport, 440 virtio_vsock_reset_sock); 441 break; 442 } 443 } 444 445 static void virtio_transport_event_work(struct work_struct *work) 446 { 447 struct virtio_vsock *vsock = 448 container_of(work, struct virtio_vsock, event_work); 449 struct virtqueue *vq; 450 451 vq = vsock->vqs[VSOCK_VQ_EVENT]; 452 453 mutex_lock(&vsock->event_lock); 454 455 if (!vsock->event_run) 456 goto out; 457 458 do { 459 struct virtio_vsock_event *event; 460 unsigned int len; 461 462 virtqueue_disable_cb(vq); 463 while ((event = virtqueue_get_buf(vq, &len)) != NULL) { 464 if (len == sizeof(*event)) 465 virtio_vsock_event_handle(vsock, event); 466 467 virtio_vsock_event_fill_one(vsock, event); 468 } 469 } while (!virtqueue_enable_cb(vq)); 470 471 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 472 out: 473 mutex_unlock(&vsock->event_lock); 474 } 475 476 static void virtio_vsock_event_done(struct virtqueue *vq) 477 { 478 struct virtio_vsock *vsock = vq->vdev->priv; 479 480 if (!vsock) 481 return; 482 queue_work(virtio_vsock_workqueue, &vsock->event_work); 483 } 484 485 static void virtio_vsock_tx_done(struct virtqueue *vq) 486 { 487 struct virtio_vsock *vsock = vq->vdev->priv; 488 489 if (!vsock) 490 return; 491 queue_work(virtio_vsock_workqueue, &vsock->tx_work); 492 } 493 494 static void virtio_vsock_rx_done(struct virtqueue *vq) 495 { 496 struct virtio_vsock *vsock = vq->vdev->priv; 497 498 if (!vsock) 499 return; 500 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 501 } 502 503 static bool virtio_transport_can_msgzerocopy(int bufs_num) 504 { 505 struct virtio_vsock *vsock; 506 bool res = false; 507 508 rcu_read_lock(); 509 510 vsock = rcu_dereference(the_virtio_vsock); 511 if (vsock) { 512 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; 513 514 /* Check that tx queue is large enough to keep whole 515 * data to send. This is needed, because when there is 516 * not enough free space in the queue, current skb to 517 * send will be reinserted to the head of tx list of 518 * the socket to retry transmission later, so if skb 519 * is bigger than whole queue, it will be reinserted 520 * again and again, thus blocking other skbs to be sent. 521 * Each page of the user provided buffer will be added 522 * as a single buffer to the tx virtqueue, so compare 523 * number of pages against maximum capacity of the queue. 524 */ 525 if (bufs_num <= vq->num_max) 526 res = true; 527 } 528 529 rcu_read_unlock(); 530 531 return res; 532 } 533 534 static bool virtio_transport_msgzerocopy_allow(void) 535 { 536 return true; 537 } 538 539 static bool virtio_transport_seqpacket_allow(u32 remote_cid); 540 541 static struct virtio_transport virtio_transport = { 542 .transport = { 543 .module = THIS_MODULE, 544 545 .get_local_cid = virtio_transport_get_local_cid, 546 547 .init = virtio_transport_do_socket_init, 548 .destruct = virtio_transport_destruct, 549 .release = virtio_transport_release, 550 .connect = virtio_transport_connect, 551 .shutdown = virtio_transport_shutdown, 552 .cancel_pkt = virtio_transport_cancel_pkt, 553 554 .dgram_bind = virtio_transport_dgram_bind, 555 .dgram_dequeue = virtio_transport_dgram_dequeue, 556 .dgram_enqueue = virtio_transport_dgram_enqueue, 557 .dgram_allow = virtio_transport_dgram_allow, 558 559 .stream_dequeue = virtio_transport_stream_dequeue, 560 .stream_enqueue = virtio_transport_stream_enqueue, 561 .stream_has_data = virtio_transport_stream_has_data, 562 .stream_has_space = virtio_transport_stream_has_space, 563 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, 564 .stream_is_active = virtio_transport_stream_is_active, 565 .stream_allow = virtio_transport_stream_allow, 566 567 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, 568 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, 569 .seqpacket_allow = virtio_transport_seqpacket_allow, 570 .seqpacket_has_data = virtio_transport_seqpacket_has_data, 571 572 .msgzerocopy_allow = virtio_transport_msgzerocopy_allow, 573 574 .notify_poll_in = virtio_transport_notify_poll_in, 575 .notify_poll_out = virtio_transport_notify_poll_out, 576 .notify_recv_init = virtio_transport_notify_recv_init, 577 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, 578 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, 579 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, 580 .notify_send_init = virtio_transport_notify_send_init, 581 .notify_send_pre_block = virtio_transport_notify_send_pre_block, 582 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, 583 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, 584 .notify_buffer_size = virtio_transport_notify_buffer_size, 585 .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat, 586 587 .unsent_bytes = virtio_transport_unsent_bytes, 588 589 .read_skb = virtio_transport_read_skb, 590 }, 591 592 .send_pkt = virtio_transport_send_pkt, 593 .can_msgzerocopy = virtio_transport_can_msgzerocopy, 594 }; 595 596 static bool virtio_transport_seqpacket_allow(u32 remote_cid) 597 { 598 struct virtio_vsock *vsock; 599 bool seqpacket_allow; 600 601 seqpacket_allow = false; 602 rcu_read_lock(); 603 vsock = rcu_dereference(the_virtio_vsock); 604 if (vsock) 605 seqpacket_allow = vsock->seqpacket_allow; 606 rcu_read_unlock(); 607 608 return seqpacket_allow; 609 } 610 611 static void virtio_transport_rx_work(struct work_struct *work) 612 { 613 struct virtio_vsock *vsock = 614 container_of(work, struct virtio_vsock, rx_work); 615 struct virtqueue *vq; 616 617 vq = vsock->vqs[VSOCK_VQ_RX]; 618 619 mutex_lock(&vsock->rx_lock); 620 621 if (!vsock->rx_run) 622 goto out; 623 624 do { 625 virtqueue_disable_cb(vq); 626 for (;;) { 627 struct sk_buff *skb; 628 unsigned int len; 629 630 if (!virtio_transport_more_replies(vsock)) { 631 /* Stop rx until the device processes already 632 * pending replies. Leave rx virtqueue 633 * callbacks disabled. 634 */ 635 goto out; 636 } 637 638 skb = virtqueue_get_buf(vq, &len); 639 if (!skb) 640 break; 641 642 vsock->rx_buf_nr--; 643 644 /* Drop short/long packets */ 645 if (unlikely(len < sizeof(struct virtio_vsock_hdr) || 646 len > virtio_vsock_skb_len(skb))) { 647 kfree_skb(skb); 648 continue; 649 } 650 651 virtio_vsock_skb_rx_put(skb); 652 virtio_transport_deliver_tap_pkt(skb); 653 virtio_transport_recv_pkt(&virtio_transport, skb); 654 } 655 } while (!virtqueue_enable_cb(vq)); 656 657 out: 658 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) 659 virtio_vsock_rx_fill(vsock); 660 mutex_unlock(&vsock->rx_lock); 661 } 662 663 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) 664 { 665 struct virtio_device *vdev = vsock->vdev; 666 struct virtqueue_info vqs_info[] = { 667 { "rx", virtio_vsock_rx_done }, 668 { "tx", virtio_vsock_tx_done }, 669 { "event", virtio_vsock_event_done }, 670 }; 671 int ret; 672 673 mutex_lock(&vsock->rx_lock); 674 vsock->rx_buf_nr = 0; 675 vsock->rx_buf_max_nr = 0; 676 mutex_unlock(&vsock->rx_lock); 677 678 atomic_set(&vsock->queued_replies, 0); 679 680 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL); 681 if (ret < 0) 682 return ret; 683 684 virtio_vsock_update_guest_cid(vsock); 685 686 virtio_device_ready(vdev); 687 688 return 0; 689 } 690 691 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) 692 { 693 mutex_lock(&vsock->tx_lock); 694 vsock->tx_run = true; 695 mutex_unlock(&vsock->tx_lock); 696 697 mutex_lock(&vsock->rx_lock); 698 virtio_vsock_rx_fill(vsock); 699 vsock->rx_run = true; 700 mutex_unlock(&vsock->rx_lock); 701 702 mutex_lock(&vsock->event_lock); 703 virtio_vsock_event_fill(vsock); 704 vsock->event_run = true; 705 mutex_unlock(&vsock->event_lock); 706 707 /* virtio_transport_send_pkt() can queue packets once 708 * the_virtio_vsock is set, but they won't be processed until 709 * vsock->tx_run is set to true. We queue vsock->send_pkt_work 710 * when initialization finishes to send those packets queued 711 * earlier. 712 * We don't need to queue the other workers (rx, event) because 713 * as long as we don't fill the queues with empty buffers, the 714 * host can't send us any notification. 715 */ 716 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 717 } 718 719 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) 720 { 721 struct virtio_device *vdev = vsock->vdev; 722 struct sk_buff *skb; 723 724 /* Reset all connected sockets when the VQs disappear */ 725 vsock_for_each_connected_socket(&virtio_transport.transport, 726 virtio_vsock_reset_sock); 727 728 /* Stop all work handlers to make sure no one is accessing the device, 729 * so we can safely call virtio_reset_device(). 730 */ 731 mutex_lock(&vsock->rx_lock); 732 vsock->rx_run = false; 733 mutex_unlock(&vsock->rx_lock); 734 735 mutex_lock(&vsock->tx_lock); 736 vsock->tx_run = false; 737 mutex_unlock(&vsock->tx_lock); 738 739 mutex_lock(&vsock->event_lock); 740 vsock->event_run = false; 741 mutex_unlock(&vsock->event_lock); 742 743 /* Flush all device writes and interrupts, device will not use any 744 * more buffers. 745 */ 746 virtio_reset_device(vdev); 747 748 mutex_lock(&vsock->rx_lock); 749 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) 750 kfree_skb(skb); 751 mutex_unlock(&vsock->rx_lock); 752 753 mutex_lock(&vsock->tx_lock); 754 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) 755 kfree_skb(skb); 756 mutex_unlock(&vsock->tx_lock); 757 758 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); 759 760 /* Delete virtqueues and flush outstanding callbacks if any */ 761 vdev->config->del_vqs(vdev); 762 } 763 764 static int virtio_vsock_probe(struct virtio_device *vdev) 765 { 766 struct virtio_vsock *vsock = NULL; 767 int ret; 768 int i; 769 770 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); 771 if (ret) 772 return ret; 773 774 /* Only one virtio-vsock device per guest is supported */ 775 if (rcu_dereference_protected(the_virtio_vsock, 776 lockdep_is_held(&the_virtio_vsock_mutex))) { 777 ret = -EBUSY; 778 goto out; 779 } 780 781 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); 782 if (!vsock) { 783 ret = -ENOMEM; 784 goto out; 785 } 786 787 vsock->vdev = vdev; 788 789 790 mutex_init(&vsock->tx_lock); 791 mutex_init(&vsock->rx_lock); 792 mutex_init(&vsock->event_lock); 793 skb_queue_head_init(&vsock->send_pkt_queue); 794 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); 795 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); 796 INIT_WORK(&vsock->event_work, virtio_transport_event_work); 797 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); 798 799 if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) 800 vsock->seqpacket_allow = true; 801 802 vdev->priv = vsock; 803 804 ret = virtio_vsock_vqs_init(vsock); 805 if (ret < 0) 806 goto out; 807 808 for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++) 809 vsock->out_sgs[i] = &vsock->out_bufs[i]; 810 811 rcu_assign_pointer(the_virtio_vsock, vsock); 812 virtio_vsock_vqs_start(vsock); 813 814 mutex_unlock(&the_virtio_vsock_mutex); 815 816 return 0; 817 818 out: 819 kfree(vsock); 820 mutex_unlock(&the_virtio_vsock_mutex); 821 return ret; 822 } 823 824 static void virtio_vsock_remove(struct virtio_device *vdev) 825 { 826 struct virtio_vsock *vsock = vdev->priv; 827 828 mutex_lock(&the_virtio_vsock_mutex); 829 830 vdev->priv = NULL; 831 rcu_assign_pointer(the_virtio_vsock, NULL); 832 synchronize_rcu(); 833 834 virtio_vsock_vqs_del(vsock); 835 836 /* Other works can be queued before 'config->del_vqs()', so we flush 837 * all works before to free the vsock object to avoid use after free. 838 */ 839 flush_work(&vsock->rx_work); 840 flush_work(&vsock->tx_work); 841 flush_work(&vsock->event_work); 842 flush_work(&vsock->send_pkt_work); 843 844 mutex_unlock(&the_virtio_vsock_mutex); 845 846 kfree(vsock); 847 } 848 849 #ifdef CONFIG_PM_SLEEP 850 static int virtio_vsock_freeze(struct virtio_device *vdev) 851 { 852 struct virtio_vsock *vsock = vdev->priv; 853 854 mutex_lock(&the_virtio_vsock_mutex); 855 856 rcu_assign_pointer(the_virtio_vsock, NULL); 857 synchronize_rcu(); 858 859 virtio_vsock_vqs_del(vsock); 860 861 mutex_unlock(&the_virtio_vsock_mutex); 862 863 return 0; 864 } 865 866 static int virtio_vsock_restore(struct virtio_device *vdev) 867 { 868 struct virtio_vsock *vsock = vdev->priv; 869 int ret; 870 871 mutex_lock(&the_virtio_vsock_mutex); 872 873 /* Only one virtio-vsock device per guest is supported */ 874 if (rcu_dereference_protected(the_virtio_vsock, 875 lockdep_is_held(&the_virtio_vsock_mutex))) { 876 ret = -EBUSY; 877 goto out; 878 } 879 880 ret = virtio_vsock_vqs_init(vsock); 881 if (ret < 0) 882 goto out; 883 884 rcu_assign_pointer(the_virtio_vsock, vsock); 885 virtio_vsock_vqs_start(vsock); 886 887 out: 888 mutex_unlock(&the_virtio_vsock_mutex); 889 return ret; 890 } 891 #endif /* CONFIG_PM_SLEEP */ 892 893 static struct virtio_device_id id_table[] = { 894 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, 895 { 0 }, 896 }; 897 898 static unsigned int features[] = { 899 VIRTIO_VSOCK_F_SEQPACKET 900 }; 901 902 static struct virtio_driver virtio_vsock_driver = { 903 .feature_table = features, 904 .feature_table_size = ARRAY_SIZE(features), 905 .driver.name = KBUILD_MODNAME, 906 .id_table = id_table, 907 .probe = virtio_vsock_probe, 908 .remove = virtio_vsock_remove, 909 #ifdef CONFIG_PM_SLEEP 910 .freeze = virtio_vsock_freeze, 911 .restore = virtio_vsock_restore, 912 #endif 913 }; 914 915 static int __init virtio_vsock_init(void) 916 { 917 int ret; 918 919 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 920 if (!virtio_vsock_workqueue) 921 return -ENOMEM; 922 923 ret = vsock_core_register(&virtio_transport.transport, 924 VSOCK_TRANSPORT_F_G2H); 925 if (ret) 926 goto out_wq; 927 928 ret = register_virtio_driver(&virtio_vsock_driver); 929 if (ret) 930 goto out_vci; 931 932 return 0; 933 934 out_vci: 935 vsock_core_unregister(&virtio_transport.transport); 936 out_wq: 937 destroy_workqueue(virtio_vsock_workqueue); 938 return ret; 939 } 940 941 static void __exit virtio_vsock_exit(void) 942 { 943 unregister_virtio_driver(&virtio_vsock_driver); 944 vsock_core_unregister(&virtio_transport.transport); 945 destroy_workqueue(virtio_vsock_workqueue); 946 } 947 948 module_init(virtio_vsock_init); 949 module_exit(virtio_vsock_exit); 950 MODULE_LICENSE("GPL v2"); 951 MODULE_AUTHOR("Asias He"); 952 MODULE_DESCRIPTION("virtio transport for vsock"); 953 MODULE_DEVICE_TABLE(virtio, id_table); 954