1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * virtio transport for vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 * 9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s 10 * early virtio-vsock proof-of-concept bits. 11 */ 12 #include <linux/spinlock.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/atomic.h> 16 #include <linux/virtio.h> 17 #include <linux/virtio_ids.h> 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_vsock.h> 20 #include <net/sock.h> 21 #include <linux/mutex.h> 22 #include <net/af_vsock.h> 23 24 static struct workqueue_struct *virtio_vsock_workqueue; 25 static struct virtio_vsock __rcu *the_virtio_vsock; 26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ 27 28 struct virtio_vsock { 29 struct virtio_device *vdev; 30 struct virtqueue *vqs[VSOCK_VQ_MAX]; 31 32 /* Virtqueue processing is deferred to a workqueue */ 33 struct work_struct tx_work; 34 struct work_struct rx_work; 35 struct work_struct event_work; 36 37 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] 38 * must be accessed with tx_lock held. 39 */ 40 struct mutex tx_lock; 41 bool tx_run; 42 43 struct work_struct send_pkt_work; 44 spinlock_t send_pkt_list_lock; 45 struct list_head send_pkt_list; 46 47 atomic_t queued_replies; 48 49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] 50 * must be accessed with rx_lock held. 51 */ 52 struct mutex rx_lock; 53 bool rx_run; 54 int rx_buf_nr; 55 int rx_buf_max_nr; 56 57 /* The following fields are protected by event_lock. 58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. 59 */ 60 struct mutex event_lock; 61 bool event_run; 62 struct virtio_vsock_event event_list[8]; 63 64 u32 guest_cid; 65 bool seqpacket_allow; 66 }; 67 68 static u32 virtio_transport_get_local_cid(void) 69 { 70 struct virtio_vsock *vsock; 71 u32 ret; 72 73 rcu_read_lock(); 74 vsock = rcu_dereference(the_virtio_vsock); 75 if (!vsock) { 76 ret = VMADDR_CID_ANY; 77 goto out_rcu; 78 } 79 80 ret = vsock->guest_cid; 81 out_rcu: 82 rcu_read_unlock(); 83 return ret; 84 } 85 86 static void 87 virtio_transport_send_pkt_work(struct work_struct *work) 88 { 89 struct virtio_vsock *vsock = 90 container_of(work, struct virtio_vsock, send_pkt_work); 91 struct virtqueue *vq; 92 bool added = false; 93 bool restart_rx = false; 94 95 mutex_lock(&vsock->tx_lock); 96 97 if (!vsock->tx_run) 98 goto out; 99 100 vq = vsock->vqs[VSOCK_VQ_TX]; 101 102 for (;;) { 103 struct virtio_vsock_pkt *pkt; 104 struct scatterlist hdr, buf, *sgs[2]; 105 int ret, in_sg = 0, out_sg = 0; 106 bool reply; 107 108 spin_lock_bh(&vsock->send_pkt_list_lock); 109 if (list_empty(&vsock->send_pkt_list)) { 110 spin_unlock_bh(&vsock->send_pkt_list_lock); 111 break; 112 } 113 114 pkt = list_first_entry(&vsock->send_pkt_list, 115 struct virtio_vsock_pkt, list); 116 list_del_init(&pkt->list); 117 spin_unlock_bh(&vsock->send_pkt_list_lock); 118 119 virtio_transport_deliver_tap_pkt(pkt); 120 121 reply = pkt->reply; 122 123 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); 124 sgs[out_sg++] = &hdr; 125 if (pkt->buf) { 126 sg_init_one(&buf, pkt->buf, pkt->len); 127 sgs[out_sg++] = &buf; 128 } 129 130 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); 131 /* Usually this means that there is no more space available in 132 * the vq 133 */ 134 if (ret < 0) { 135 spin_lock_bh(&vsock->send_pkt_list_lock); 136 list_add(&pkt->list, &vsock->send_pkt_list); 137 spin_unlock_bh(&vsock->send_pkt_list_lock); 138 break; 139 } 140 141 if (reply) { 142 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 143 int val; 144 145 val = atomic_dec_return(&vsock->queued_replies); 146 147 /* Do we now have resources to resume rx processing? */ 148 if (val + 1 == virtqueue_get_vring_size(rx_vq)) 149 restart_rx = true; 150 } 151 152 added = true; 153 } 154 155 if (added) 156 virtqueue_kick(vq); 157 158 out: 159 mutex_unlock(&vsock->tx_lock); 160 161 if (restart_rx) 162 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 163 } 164 165 static int 166 virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) 167 { 168 struct virtio_vsock *vsock; 169 int len = pkt->len; 170 171 rcu_read_lock(); 172 vsock = rcu_dereference(the_virtio_vsock); 173 if (!vsock) { 174 virtio_transport_free_pkt(pkt); 175 len = -ENODEV; 176 goto out_rcu; 177 } 178 179 if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { 180 virtio_transport_free_pkt(pkt); 181 len = -ENODEV; 182 goto out_rcu; 183 } 184 185 if (pkt->reply) 186 atomic_inc(&vsock->queued_replies); 187 188 spin_lock_bh(&vsock->send_pkt_list_lock); 189 list_add_tail(&pkt->list, &vsock->send_pkt_list); 190 spin_unlock_bh(&vsock->send_pkt_list_lock); 191 192 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 193 194 out_rcu: 195 rcu_read_unlock(); 196 return len; 197 } 198 199 static int 200 virtio_transport_cancel_pkt(struct vsock_sock *vsk) 201 { 202 struct virtio_vsock *vsock; 203 struct virtio_vsock_pkt *pkt, *n; 204 int cnt = 0, ret; 205 LIST_HEAD(freeme); 206 207 rcu_read_lock(); 208 vsock = rcu_dereference(the_virtio_vsock); 209 if (!vsock) { 210 ret = -ENODEV; 211 goto out_rcu; 212 } 213 214 spin_lock_bh(&vsock->send_pkt_list_lock); 215 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { 216 if (pkt->vsk != vsk) 217 continue; 218 list_move(&pkt->list, &freeme); 219 } 220 spin_unlock_bh(&vsock->send_pkt_list_lock); 221 222 list_for_each_entry_safe(pkt, n, &freeme, list) { 223 if (pkt->reply) 224 cnt++; 225 list_del(&pkt->list); 226 virtio_transport_free_pkt(pkt); 227 } 228 229 if (cnt) { 230 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 231 int new_cnt; 232 233 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 234 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && 235 new_cnt < virtqueue_get_vring_size(rx_vq)) 236 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 237 } 238 239 ret = 0; 240 241 out_rcu: 242 rcu_read_unlock(); 243 return ret; 244 } 245 246 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 247 { 248 int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; 249 struct virtio_vsock_pkt *pkt; 250 struct scatterlist hdr, buf, *sgs[2]; 251 struct virtqueue *vq; 252 int ret; 253 254 vq = vsock->vqs[VSOCK_VQ_RX]; 255 256 do { 257 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); 258 if (!pkt) 259 break; 260 261 pkt->buf = kmalloc(buf_len, GFP_KERNEL); 262 if (!pkt->buf) { 263 virtio_transport_free_pkt(pkt); 264 break; 265 } 266 267 pkt->buf_len = buf_len; 268 pkt->len = buf_len; 269 270 sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr)); 271 sgs[0] = &hdr; 272 273 sg_init_one(&buf, pkt->buf, buf_len); 274 sgs[1] = &buf; 275 ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL); 276 if (ret) { 277 virtio_transport_free_pkt(pkt); 278 break; 279 } 280 vsock->rx_buf_nr++; 281 } while (vq->num_free); 282 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) 283 vsock->rx_buf_max_nr = vsock->rx_buf_nr; 284 virtqueue_kick(vq); 285 } 286 287 static void virtio_transport_tx_work(struct work_struct *work) 288 { 289 struct virtio_vsock *vsock = 290 container_of(work, struct virtio_vsock, tx_work); 291 struct virtqueue *vq; 292 bool added = false; 293 294 vq = vsock->vqs[VSOCK_VQ_TX]; 295 mutex_lock(&vsock->tx_lock); 296 297 if (!vsock->tx_run) 298 goto out; 299 300 do { 301 struct virtio_vsock_pkt *pkt; 302 unsigned int len; 303 304 virtqueue_disable_cb(vq); 305 while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) { 306 virtio_transport_free_pkt(pkt); 307 added = true; 308 } 309 } while (!virtqueue_enable_cb(vq)); 310 311 out: 312 mutex_unlock(&vsock->tx_lock); 313 314 if (added) 315 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 316 } 317 318 /* Is there space left for replies to rx packets? */ 319 static bool virtio_transport_more_replies(struct virtio_vsock *vsock) 320 { 321 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; 322 int val; 323 324 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ 325 val = atomic_read(&vsock->queued_replies); 326 327 return val < virtqueue_get_vring_size(vq); 328 } 329 330 /* event_lock must be held */ 331 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, 332 struct virtio_vsock_event *event) 333 { 334 struct scatterlist sg; 335 struct virtqueue *vq; 336 337 vq = vsock->vqs[VSOCK_VQ_EVENT]; 338 339 sg_init_one(&sg, event, sizeof(*event)); 340 341 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); 342 } 343 344 /* event_lock must be held */ 345 static void virtio_vsock_event_fill(struct virtio_vsock *vsock) 346 { 347 size_t i; 348 349 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { 350 struct virtio_vsock_event *event = &vsock->event_list[i]; 351 352 virtio_vsock_event_fill_one(vsock, event); 353 } 354 355 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 356 } 357 358 static void virtio_vsock_reset_sock(struct sock *sk) 359 { 360 lock_sock(sk); 361 sk->sk_state = TCP_CLOSE; 362 sk->sk_err = ECONNRESET; 363 sk->sk_error_report(sk); 364 release_sock(sk); 365 } 366 367 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) 368 { 369 struct virtio_device *vdev = vsock->vdev; 370 __le64 guest_cid; 371 372 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), 373 &guest_cid, sizeof(guest_cid)); 374 vsock->guest_cid = le64_to_cpu(guest_cid); 375 } 376 377 /* event_lock must be held */ 378 static void virtio_vsock_event_handle(struct virtio_vsock *vsock, 379 struct virtio_vsock_event *event) 380 { 381 switch (le32_to_cpu(event->id)) { 382 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: 383 virtio_vsock_update_guest_cid(vsock); 384 vsock_for_each_connected_socket(virtio_vsock_reset_sock); 385 break; 386 } 387 } 388 389 static void virtio_transport_event_work(struct work_struct *work) 390 { 391 struct virtio_vsock *vsock = 392 container_of(work, struct virtio_vsock, event_work); 393 struct virtqueue *vq; 394 395 vq = vsock->vqs[VSOCK_VQ_EVENT]; 396 397 mutex_lock(&vsock->event_lock); 398 399 if (!vsock->event_run) 400 goto out; 401 402 do { 403 struct virtio_vsock_event *event; 404 unsigned int len; 405 406 virtqueue_disable_cb(vq); 407 while ((event = virtqueue_get_buf(vq, &len)) != NULL) { 408 if (len == sizeof(*event)) 409 virtio_vsock_event_handle(vsock, event); 410 411 virtio_vsock_event_fill_one(vsock, event); 412 } 413 } while (!virtqueue_enable_cb(vq)); 414 415 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 416 out: 417 mutex_unlock(&vsock->event_lock); 418 } 419 420 static void virtio_vsock_event_done(struct virtqueue *vq) 421 { 422 struct virtio_vsock *vsock = vq->vdev->priv; 423 424 if (!vsock) 425 return; 426 queue_work(virtio_vsock_workqueue, &vsock->event_work); 427 } 428 429 static void virtio_vsock_tx_done(struct virtqueue *vq) 430 { 431 struct virtio_vsock *vsock = vq->vdev->priv; 432 433 if (!vsock) 434 return; 435 queue_work(virtio_vsock_workqueue, &vsock->tx_work); 436 } 437 438 static void virtio_vsock_rx_done(struct virtqueue *vq) 439 { 440 struct virtio_vsock *vsock = vq->vdev->priv; 441 442 if (!vsock) 443 return; 444 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 445 } 446 447 static bool virtio_transport_seqpacket_allow(u32 remote_cid); 448 449 static struct virtio_transport virtio_transport = { 450 .transport = { 451 .module = THIS_MODULE, 452 453 .get_local_cid = virtio_transport_get_local_cid, 454 455 .init = virtio_transport_do_socket_init, 456 .destruct = virtio_transport_destruct, 457 .release = virtio_transport_release, 458 .connect = virtio_transport_connect, 459 .shutdown = virtio_transport_shutdown, 460 .cancel_pkt = virtio_transport_cancel_pkt, 461 462 .dgram_bind = virtio_transport_dgram_bind, 463 .dgram_dequeue = virtio_transport_dgram_dequeue, 464 .dgram_enqueue = virtio_transport_dgram_enqueue, 465 .dgram_allow = virtio_transport_dgram_allow, 466 467 .stream_dequeue = virtio_transport_stream_dequeue, 468 .stream_enqueue = virtio_transport_stream_enqueue, 469 .stream_has_data = virtio_transport_stream_has_data, 470 .stream_has_space = virtio_transport_stream_has_space, 471 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, 472 .stream_is_active = virtio_transport_stream_is_active, 473 .stream_allow = virtio_transport_stream_allow, 474 475 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, 476 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, 477 .seqpacket_allow = virtio_transport_seqpacket_allow, 478 .seqpacket_has_data = virtio_transport_seqpacket_has_data, 479 480 .notify_poll_in = virtio_transport_notify_poll_in, 481 .notify_poll_out = virtio_transport_notify_poll_out, 482 .notify_recv_init = virtio_transport_notify_recv_init, 483 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, 484 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, 485 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, 486 .notify_send_init = virtio_transport_notify_send_init, 487 .notify_send_pre_block = virtio_transport_notify_send_pre_block, 488 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, 489 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, 490 .notify_buffer_size = virtio_transport_notify_buffer_size, 491 }, 492 493 .send_pkt = virtio_transport_send_pkt, 494 }; 495 496 static bool virtio_transport_seqpacket_allow(u32 remote_cid) 497 { 498 struct virtio_vsock *vsock; 499 bool seqpacket_allow; 500 501 rcu_read_lock(); 502 vsock = rcu_dereference(the_virtio_vsock); 503 seqpacket_allow = vsock->seqpacket_allow; 504 rcu_read_unlock(); 505 506 return seqpacket_allow; 507 } 508 509 static void virtio_transport_rx_work(struct work_struct *work) 510 { 511 struct virtio_vsock *vsock = 512 container_of(work, struct virtio_vsock, rx_work); 513 struct virtqueue *vq; 514 515 vq = vsock->vqs[VSOCK_VQ_RX]; 516 517 mutex_lock(&vsock->rx_lock); 518 519 if (!vsock->rx_run) 520 goto out; 521 522 do { 523 virtqueue_disable_cb(vq); 524 for (;;) { 525 struct virtio_vsock_pkt *pkt; 526 unsigned int len; 527 528 if (!virtio_transport_more_replies(vsock)) { 529 /* Stop rx until the device processes already 530 * pending replies. Leave rx virtqueue 531 * callbacks disabled. 532 */ 533 goto out; 534 } 535 536 pkt = virtqueue_get_buf(vq, &len); 537 if (!pkt) { 538 break; 539 } 540 541 vsock->rx_buf_nr--; 542 543 /* Drop short/long packets */ 544 if (unlikely(len < sizeof(pkt->hdr) || 545 len > sizeof(pkt->hdr) + pkt->len)) { 546 virtio_transport_free_pkt(pkt); 547 continue; 548 } 549 550 pkt->len = len - sizeof(pkt->hdr); 551 virtio_transport_deliver_tap_pkt(pkt); 552 virtio_transport_recv_pkt(&virtio_transport, pkt); 553 } 554 } while (!virtqueue_enable_cb(vq)); 555 556 out: 557 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) 558 virtio_vsock_rx_fill(vsock); 559 mutex_unlock(&vsock->rx_lock); 560 } 561 562 static int virtio_vsock_probe(struct virtio_device *vdev) 563 { 564 vq_callback_t *callbacks[] = { 565 virtio_vsock_rx_done, 566 virtio_vsock_tx_done, 567 virtio_vsock_event_done, 568 }; 569 static const char * const names[] = { 570 "rx", 571 "tx", 572 "event", 573 }; 574 struct virtio_vsock *vsock = NULL; 575 int ret; 576 577 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); 578 if (ret) 579 return ret; 580 581 /* Only one virtio-vsock device per guest is supported */ 582 if (rcu_dereference_protected(the_virtio_vsock, 583 lockdep_is_held(&the_virtio_vsock_mutex))) { 584 ret = -EBUSY; 585 goto out; 586 } 587 588 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); 589 if (!vsock) { 590 ret = -ENOMEM; 591 goto out; 592 } 593 594 vsock->vdev = vdev; 595 596 ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX, 597 vsock->vqs, callbacks, names, 598 NULL); 599 if (ret < 0) 600 goto out; 601 602 virtio_vsock_update_guest_cid(vsock); 603 604 vsock->rx_buf_nr = 0; 605 vsock->rx_buf_max_nr = 0; 606 atomic_set(&vsock->queued_replies, 0); 607 608 mutex_init(&vsock->tx_lock); 609 mutex_init(&vsock->rx_lock); 610 mutex_init(&vsock->event_lock); 611 spin_lock_init(&vsock->send_pkt_list_lock); 612 INIT_LIST_HEAD(&vsock->send_pkt_list); 613 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); 614 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); 615 INIT_WORK(&vsock->event_work, virtio_transport_event_work); 616 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); 617 618 mutex_lock(&vsock->tx_lock); 619 vsock->tx_run = true; 620 mutex_unlock(&vsock->tx_lock); 621 622 mutex_lock(&vsock->rx_lock); 623 virtio_vsock_rx_fill(vsock); 624 vsock->rx_run = true; 625 mutex_unlock(&vsock->rx_lock); 626 627 mutex_lock(&vsock->event_lock); 628 virtio_vsock_event_fill(vsock); 629 vsock->event_run = true; 630 mutex_unlock(&vsock->event_lock); 631 632 if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) 633 vsock->seqpacket_allow = true; 634 635 vdev->priv = vsock; 636 rcu_assign_pointer(the_virtio_vsock, vsock); 637 638 mutex_unlock(&the_virtio_vsock_mutex); 639 640 return 0; 641 642 out: 643 kfree(vsock); 644 mutex_unlock(&the_virtio_vsock_mutex); 645 return ret; 646 } 647 648 static void virtio_vsock_remove(struct virtio_device *vdev) 649 { 650 struct virtio_vsock *vsock = vdev->priv; 651 struct virtio_vsock_pkt *pkt; 652 653 mutex_lock(&the_virtio_vsock_mutex); 654 655 vdev->priv = NULL; 656 rcu_assign_pointer(the_virtio_vsock, NULL); 657 synchronize_rcu(); 658 659 /* Reset all connected sockets when the device disappear */ 660 vsock_for_each_connected_socket(virtio_vsock_reset_sock); 661 662 /* Stop all work handlers to make sure no one is accessing the device, 663 * so we can safely call vdev->config->reset(). 664 */ 665 mutex_lock(&vsock->rx_lock); 666 vsock->rx_run = false; 667 mutex_unlock(&vsock->rx_lock); 668 669 mutex_lock(&vsock->tx_lock); 670 vsock->tx_run = false; 671 mutex_unlock(&vsock->tx_lock); 672 673 mutex_lock(&vsock->event_lock); 674 vsock->event_run = false; 675 mutex_unlock(&vsock->event_lock); 676 677 /* Flush all device writes and interrupts, device will not use any 678 * more buffers. 679 */ 680 vdev->config->reset(vdev); 681 682 mutex_lock(&vsock->rx_lock); 683 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) 684 virtio_transport_free_pkt(pkt); 685 mutex_unlock(&vsock->rx_lock); 686 687 mutex_lock(&vsock->tx_lock); 688 while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) 689 virtio_transport_free_pkt(pkt); 690 mutex_unlock(&vsock->tx_lock); 691 692 spin_lock_bh(&vsock->send_pkt_list_lock); 693 while (!list_empty(&vsock->send_pkt_list)) { 694 pkt = list_first_entry(&vsock->send_pkt_list, 695 struct virtio_vsock_pkt, list); 696 list_del(&pkt->list); 697 virtio_transport_free_pkt(pkt); 698 } 699 spin_unlock_bh(&vsock->send_pkt_list_lock); 700 701 /* Delete virtqueues and flush outstanding callbacks if any */ 702 vdev->config->del_vqs(vdev); 703 704 /* Other works can be queued before 'config->del_vqs()', so we flush 705 * all works before to free the vsock object to avoid use after free. 706 */ 707 flush_work(&vsock->rx_work); 708 flush_work(&vsock->tx_work); 709 flush_work(&vsock->event_work); 710 flush_work(&vsock->send_pkt_work); 711 712 mutex_unlock(&the_virtio_vsock_mutex); 713 714 kfree(vsock); 715 } 716 717 static struct virtio_device_id id_table[] = { 718 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, 719 { 0 }, 720 }; 721 722 static unsigned int features[] = { 723 VIRTIO_VSOCK_F_SEQPACKET 724 }; 725 726 static struct virtio_driver virtio_vsock_driver = { 727 .feature_table = features, 728 .feature_table_size = ARRAY_SIZE(features), 729 .driver.name = KBUILD_MODNAME, 730 .driver.owner = THIS_MODULE, 731 .id_table = id_table, 732 .probe = virtio_vsock_probe, 733 .remove = virtio_vsock_remove, 734 }; 735 736 static int __init virtio_vsock_init(void) 737 { 738 int ret; 739 740 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 741 if (!virtio_vsock_workqueue) 742 return -ENOMEM; 743 744 ret = vsock_core_register(&virtio_transport.transport, 745 VSOCK_TRANSPORT_F_G2H); 746 if (ret) 747 goto out_wq; 748 749 ret = register_virtio_driver(&virtio_vsock_driver); 750 if (ret) 751 goto out_vci; 752 753 return 0; 754 755 out_vci: 756 vsock_core_unregister(&virtio_transport.transport); 757 out_wq: 758 destroy_workqueue(virtio_vsock_workqueue); 759 return ret; 760 } 761 762 static void __exit virtio_vsock_exit(void) 763 { 764 unregister_virtio_driver(&virtio_vsock_driver); 765 vsock_core_unregister(&virtio_transport.transport); 766 destroy_workqueue(virtio_vsock_workqueue); 767 } 768 769 module_init(virtio_vsock_init); 770 module_exit(virtio_vsock_exit); 771 MODULE_LICENSE("GPL v2"); 772 MODULE_AUTHOR("Asias He"); 773 MODULE_DESCRIPTION("virtio transport for vsock"); 774 MODULE_DEVICE_TABLE(virtio, id_table); 775