1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * virtio transport for vsock 4 * 5 * Copyright (C) 2013-2015 Red Hat, Inc. 6 * Author: Asias He <asias@redhat.com> 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 * 9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s 10 * early virtio-vsock proof-of-concept bits. 11 */ 12 #include <linux/spinlock.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/atomic.h> 16 #include <linux/virtio.h> 17 #include <linux/virtio_ids.h> 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_vsock.h> 20 #include <linux/dma-mapping.h> 21 #include <net/sock.h> 22 #include <linux/mutex.h> 23 #include <net/af_vsock.h> 24 25 static struct workqueue_struct *virtio_vsock_workqueue; 26 static struct virtio_vsock __rcu *the_virtio_vsock; 27 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ 28 static struct virtio_transport virtio_transport; /* forward declaration */ 29 30 struct virtio_vsock { 31 struct virtio_device *vdev; 32 struct virtqueue *vqs[VSOCK_VQ_MAX]; 33 34 /* Virtqueue processing is deferred to a workqueue */ 35 struct work_struct tx_work; 36 struct work_struct rx_work; 37 struct work_struct event_work; 38 39 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] 40 * must be accessed with tx_lock held. 41 */ 42 struct mutex tx_lock; 43 bool tx_run; 44 45 struct work_struct send_pkt_work; 46 struct sk_buff_head send_pkt_queue; 47 48 atomic_t queued_replies; 49 50 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] 51 * must be accessed with rx_lock held. 52 */ 53 struct mutex rx_lock; 54 bool rx_run; 55 int rx_buf_nr; 56 int rx_buf_max_nr; 57 58 u32 guest_cid; 59 bool seqpacket_allow; 60 61 /* These fields are used only in tx path in function 62 * 'virtio_transport_send_pkt_work()', so to save 63 * stack space in it, place both of them here. Each 64 * pointer from 'out_sgs' points to the corresponding 65 * element in 'out_bufs' - this is initialized in 66 * 'virtio_vsock_probe()'. Both fields are protected 67 * by 'tx_lock'. +1 is needed for packet header. 68 */ 69 struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1]; 70 struct scatterlist out_bufs[MAX_SKB_FRAGS + 1]; 71 72 /* The following fields are protected by event_lock. 73 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. 74 */ 75 struct mutex event_lock; 76 bool event_run; 77 __dma_from_device_group_begin(); 78 struct virtio_vsock_event event_list[8]; 79 __dma_from_device_group_end(); 80 }; 81 82 static u32 virtio_transport_get_local_cid(void) 83 { 84 struct virtio_vsock *vsock; 85 u32 ret; 86 87 rcu_read_lock(); 88 vsock = rcu_dereference(the_virtio_vsock); 89 if (!vsock) { 90 ret = VMADDR_CID_ANY; 91 goto out_rcu; 92 } 93 94 ret = vsock->guest_cid; 95 out_rcu: 96 rcu_read_unlock(); 97 return ret; 98 } 99 100 /* Caller need to hold vsock->tx_lock on vq */ 101 static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq, 102 struct virtio_vsock *vsock, gfp_t gfp) 103 { 104 int ret, in_sg = 0, out_sg = 0; 105 struct scatterlist **sgs; 106 107 sgs = vsock->out_sgs; 108 sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb), 109 sizeof(*virtio_vsock_hdr(skb))); 110 out_sg++; 111 112 if (!skb_is_nonlinear(skb)) { 113 if (skb->len > 0) { 114 sg_init_one(sgs[out_sg], skb->data, skb->len); 115 out_sg++; 116 } 117 } else { 118 struct skb_shared_info *si; 119 int i; 120 121 /* If skb is nonlinear, then its buffer must contain 122 * only header and nothing more. Data is stored in 123 * the fragged part. 124 */ 125 WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb))); 126 127 si = skb_shinfo(skb); 128 129 for (i = 0; i < si->nr_frags; i++) { 130 skb_frag_t *skb_frag = &si->frags[i]; 131 void *va; 132 133 /* We will use 'page_to_virt()' for the userspace page 134 * here, because virtio or dma-mapping layers will call 135 * 'virt_to_phys()' later to fill the buffer descriptor. 136 * We don't touch memory at "virtual" address of this page. 137 */ 138 va = page_to_virt(skb_frag_page(skb_frag)); 139 sg_init_one(sgs[out_sg], 140 va + skb_frag_off(skb_frag), 141 skb_frag_size(skb_frag)); 142 out_sg++; 143 } 144 } 145 146 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, gfp); 147 /* Usually this means that there is no more space available in 148 * the vq 149 */ 150 if (ret < 0) 151 return ret; 152 153 virtio_transport_deliver_tap_pkt(skb); 154 return 0; 155 } 156 157 static void 158 virtio_transport_send_pkt_work(struct work_struct *work) 159 { 160 struct virtio_vsock *vsock = 161 container_of(work, struct virtio_vsock, send_pkt_work); 162 struct virtqueue *vq; 163 bool added = false; 164 bool restart_rx = false; 165 166 mutex_lock(&vsock->tx_lock); 167 168 if (!vsock->tx_run) 169 goto out; 170 171 vq = vsock->vqs[VSOCK_VQ_TX]; 172 173 for (;;) { 174 struct sk_buff *skb; 175 bool reply; 176 int ret; 177 178 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); 179 if (!skb) 180 break; 181 182 reply = virtio_vsock_skb_reply(skb); 183 184 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_KERNEL); 185 if (ret < 0) { 186 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); 187 break; 188 } 189 190 if (reply) { 191 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 192 int val; 193 194 val = atomic_dec_return(&vsock->queued_replies); 195 196 /* Do we now have resources to resume rx processing? */ 197 if (val + 1 == virtqueue_get_vring_size(rx_vq)) 198 restart_rx = true; 199 } 200 201 added = true; 202 } 203 204 if (added) 205 virtqueue_kick(vq); 206 207 out: 208 mutex_unlock(&vsock->tx_lock); 209 210 if (restart_rx) 211 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 212 } 213 214 /* Caller need to hold RCU for vsock. 215 * Returns 0 if the packet is successfully put on the vq. 216 */ 217 static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb) 218 { 219 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; 220 int ret; 221 222 /* Inside RCU, can't sleep! */ 223 ret = mutex_trylock(&vsock->tx_lock); 224 if (unlikely(ret == 0)) 225 return -EBUSY; 226 227 ret = virtio_transport_send_skb(skb, vq, vsock, GFP_ATOMIC); 228 if (ret == 0) 229 virtqueue_kick(vq); 230 231 mutex_unlock(&vsock->tx_lock); 232 233 return ret; 234 } 235 236 static int 237 virtio_transport_send_pkt(struct sk_buff *skb, struct net *net) 238 { 239 struct virtio_vsock_hdr *hdr; 240 struct virtio_vsock *vsock; 241 int len = skb->len; 242 243 hdr = virtio_vsock_hdr(skb); 244 245 rcu_read_lock(); 246 vsock = rcu_dereference(the_virtio_vsock); 247 if (!vsock) { 248 kfree_skb(skb); 249 len = -ENODEV; 250 goto out_rcu; 251 } 252 253 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { 254 kfree_skb(skb); 255 len = -ENODEV; 256 goto out_rcu; 257 } 258 259 /* If send_pkt_queue is empty, we can safely bypass this queue 260 * because packet order is maintained and (try) to put the packet 261 * on the virtqueue using virtio_transport_send_skb_fast_path. 262 * If this fails we simply put the packet on the intermediate 263 * queue and schedule the worker. 264 */ 265 if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) || 266 virtio_transport_send_skb_fast_path(vsock, skb)) { 267 if (virtio_vsock_skb_reply(skb)) 268 atomic_inc(&vsock->queued_replies); 269 270 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); 271 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 272 } 273 274 out_rcu: 275 rcu_read_unlock(); 276 return len; 277 } 278 279 static int 280 virtio_transport_cancel_pkt(struct vsock_sock *vsk) 281 { 282 struct virtio_vsock *vsock; 283 int cnt = 0, ret; 284 285 rcu_read_lock(); 286 vsock = rcu_dereference(the_virtio_vsock); 287 if (!vsock) { 288 ret = -ENODEV; 289 goto out_rcu; 290 } 291 292 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); 293 294 if (cnt) { 295 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; 296 int new_cnt; 297 298 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); 299 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && 300 new_cnt < virtqueue_get_vring_size(rx_vq)) 301 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 302 } 303 304 ret = 0; 305 306 out_rcu: 307 rcu_read_unlock(); 308 return ret; 309 } 310 311 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) 312 { 313 int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; 314 struct scatterlist pkt, *p; 315 struct virtqueue *vq; 316 struct sk_buff *skb; 317 int ret; 318 319 vq = vsock->vqs[VSOCK_VQ_RX]; 320 321 do { 322 skb = virtio_vsock_alloc_linear_skb(total_len, GFP_KERNEL); 323 if (!skb) 324 break; 325 326 memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM); 327 sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len); 328 p = &pkt; 329 ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL); 330 if (ret < 0) { 331 kfree_skb(skb); 332 break; 333 } 334 335 vsock->rx_buf_nr++; 336 } while (vq->num_free); 337 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) 338 vsock->rx_buf_max_nr = vsock->rx_buf_nr; 339 virtqueue_kick(vq); 340 } 341 342 static void virtio_transport_tx_work(struct work_struct *work) 343 { 344 struct virtio_vsock *vsock = 345 container_of(work, struct virtio_vsock, tx_work); 346 struct virtqueue *vq; 347 bool added = false; 348 349 vq = vsock->vqs[VSOCK_VQ_TX]; 350 mutex_lock(&vsock->tx_lock); 351 352 if (!vsock->tx_run) 353 goto out; 354 355 do { 356 struct sk_buff *skb; 357 unsigned int len; 358 359 virtqueue_disable_cb(vq); 360 while ((skb = virtqueue_get_buf(vq, &len)) != NULL) { 361 virtio_transport_consume_skb_sent(skb, true); 362 added = true; 363 } 364 } while (!virtqueue_enable_cb(vq)); 365 366 out: 367 mutex_unlock(&vsock->tx_lock); 368 369 if (added) 370 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 371 } 372 373 /* Is there space left for replies to rx packets? */ 374 static bool virtio_transport_more_replies(struct virtio_vsock *vsock) 375 { 376 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; 377 int val; 378 379 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ 380 val = atomic_read(&vsock->queued_replies); 381 382 return val < virtqueue_get_vring_size(vq); 383 } 384 385 /* event_lock must be held */ 386 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, 387 struct virtio_vsock_event *event) 388 { 389 struct scatterlist sg; 390 struct virtqueue *vq; 391 392 vq = vsock->vqs[VSOCK_VQ_EVENT]; 393 394 sg_init_one(&sg, event, sizeof(*event)); 395 396 return virtqueue_add_inbuf_cache_clean(vq, &sg, 1, event, GFP_KERNEL); 397 } 398 399 /* event_lock must be held */ 400 static void virtio_vsock_event_fill(struct virtio_vsock *vsock) 401 { 402 size_t i; 403 404 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { 405 struct virtio_vsock_event *event = &vsock->event_list[i]; 406 407 virtio_vsock_event_fill_one(vsock, event); 408 } 409 410 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 411 } 412 413 static void virtio_vsock_reset_sock(struct sock *sk) 414 { 415 /* vmci_transport.c doesn't take sk_lock here either. At least we're 416 * under vsock_table_lock so the sock cannot disappear while we're 417 * executing. 418 */ 419 420 sk->sk_state = TCP_CLOSE; 421 sk->sk_err = ECONNRESET; 422 sk_error_report(sk); 423 } 424 425 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) 426 { 427 struct virtio_device *vdev = vsock->vdev; 428 __le64 guest_cid; 429 430 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), 431 &guest_cid, sizeof(guest_cid)); 432 vsock->guest_cid = le64_to_cpu(guest_cid); 433 } 434 435 /* event_lock must be held */ 436 static void virtio_vsock_event_handle(struct virtio_vsock *vsock, 437 struct virtio_vsock_event *event) 438 { 439 switch (le32_to_cpu(event->id)) { 440 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: 441 virtio_vsock_update_guest_cid(vsock); 442 vsock_for_each_connected_socket(&virtio_transport.transport, 443 virtio_vsock_reset_sock); 444 break; 445 } 446 } 447 448 static void virtio_transport_event_work(struct work_struct *work) 449 { 450 struct virtio_vsock *vsock = 451 container_of(work, struct virtio_vsock, event_work); 452 struct virtqueue *vq; 453 454 vq = vsock->vqs[VSOCK_VQ_EVENT]; 455 456 mutex_lock(&vsock->event_lock); 457 458 if (!vsock->event_run) 459 goto out; 460 461 do { 462 struct virtio_vsock_event *event; 463 unsigned int len; 464 465 virtqueue_disable_cb(vq); 466 while ((event = virtqueue_get_buf(vq, &len)) != NULL) { 467 if (len == sizeof(*event)) 468 virtio_vsock_event_handle(vsock, event); 469 470 virtio_vsock_event_fill_one(vsock, event); 471 } 472 } while (!virtqueue_enable_cb(vq)); 473 474 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); 475 out: 476 mutex_unlock(&vsock->event_lock); 477 } 478 479 static void virtio_vsock_event_done(struct virtqueue *vq) 480 { 481 struct virtio_vsock *vsock = vq->vdev->priv; 482 483 if (!vsock) 484 return; 485 queue_work(virtio_vsock_workqueue, &vsock->event_work); 486 } 487 488 static void virtio_vsock_tx_done(struct virtqueue *vq) 489 { 490 struct virtio_vsock *vsock = vq->vdev->priv; 491 492 if (!vsock) 493 return; 494 queue_work(virtio_vsock_workqueue, &vsock->tx_work); 495 } 496 497 static void virtio_vsock_rx_done(struct virtqueue *vq) 498 { 499 struct virtio_vsock *vsock = vq->vdev->priv; 500 501 if (!vsock) 502 return; 503 queue_work(virtio_vsock_workqueue, &vsock->rx_work); 504 } 505 506 static bool virtio_transport_can_msgzerocopy(int bufs_num) 507 { 508 struct virtio_vsock *vsock; 509 bool res = false; 510 511 rcu_read_lock(); 512 513 vsock = rcu_dereference(the_virtio_vsock); 514 if (vsock) { 515 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; 516 517 /* Check that tx queue is large enough to keep whole 518 * data to send. This is needed, because when there is 519 * not enough free space in the queue, current skb to 520 * send will be reinserted to the head of tx list of 521 * the socket to retry transmission later, so if skb 522 * is bigger than whole queue, it will be reinserted 523 * again and again, thus blocking other skbs to be sent. 524 * Each page of the user provided buffer will be added 525 * as a single buffer to the tx virtqueue, so compare 526 * number of pages against maximum capacity of the queue. 527 */ 528 if (bufs_num <= vq->num_max) 529 res = true; 530 } 531 532 rcu_read_unlock(); 533 534 return res; 535 } 536 537 static bool virtio_transport_msgzerocopy_allow(void) 538 { 539 return true; 540 } 541 542 bool virtio_transport_stream_allow(struct vsock_sock *vsk, u32 cid, u32 port) 543 { 544 return vsock_net_mode_global(vsk); 545 } 546 547 static bool virtio_transport_seqpacket_allow(struct vsock_sock *vsk, 548 u32 remote_cid); 549 550 static struct virtio_transport virtio_transport = { 551 .transport = { 552 .module = THIS_MODULE, 553 554 .get_local_cid = virtio_transport_get_local_cid, 555 556 .init = virtio_transport_do_socket_init, 557 .destruct = virtio_transport_destruct, 558 .release = virtio_transport_release, 559 .connect = virtio_transport_connect, 560 .shutdown = virtio_transport_shutdown, 561 .cancel_pkt = virtio_transport_cancel_pkt, 562 563 .dgram_bind = virtio_transport_dgram_bind, 564 .dgram_dequeue = virtio_transport_dgram_dequeue, 565 .dgram_enqueue = virtio_transport_dgram_enqueue, 566 .dgram_allow = virtio_transport_dgram_allow, 567 568 .stream_dequeue = virtio_transport_stream_dequeue, 569 .stream_enqueue = virtio_transport_stream_enqueue, 570 .stream_has_data = virtio_transport_stream_has_data, 571 .stream_has_space = virtio_transport_stream_has_space, 572 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, 573 .stream_is_active = virtio_transport_stream_is_active, 574 .stream_allow = virtio_transport_stream_allow, 575 576 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, 577 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, 578 .seqpacket_allow = virtio_transport_seqpacket_allow, 579 .seqpacket_has_data = virtio_transport_seqpacket_has_data, 580 581 .msgzerocopy_allow = virtio_transport_msgzerocopy_allow, 582 583 .notify_poll_in = virtio_transport_notify_poll_in, 584 .notify_poll_out = virtio_transport_notify_poll_out, 585 .notify_recv_init = virtio_transport_notify_recv_init, 586 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, 587 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, 588 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, 589 .notify_send_init = virtio_transport_notify_send_init, 590 .notify_send_pre_block = virtio_transport_notify_send_pre_block, 591 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, 592 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, 593 .notify_buffer_size = virtio_transport_notify_buffer_size, 594 .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat, 595 596 .unsent_bytes = virtio_transport_unsent_bytes, 597 598 .read_skb = virtio_transport_read_skb, 599 }, 600 601 .send_pkt = virtio_transport_send_pkt, 602 .can_msgzerocopy = virtio_transport_can_msgzerocopy, 603 }; 604 605 static bool 606 virtio_transport_seqpacket_allow(struct vsock_sock *vsk, u32 remote_cid) 607 { 608 struct virtio_vsock *vsock; 609 bool seqpacket_allow; 610 611 if (!vsock_net_mode_global(vsk)) 612 return false; 613 614 seqpacket_allow = false; 615 rcu_read_lock(); 616 vsock = rcu_dereference(the_virtio_vsock); 617 if (vsock) 618 seqpacket_allow = vsock->seqpacket_allow; 619 rcu_read_unlock(); 620 621 return seqpacket_allow; 622 } 623 624 static void virtio_transport_rx_work(struct work_struct *work) 625 { 626 struct virtio_vsock *vsock = 627 container_of(work, struct virtio_vsock, rx_work); 628 struct virtqueue *vq; 629 630 vq = vsock->vqs[VSOCK_VQ_RX]; 631 632 mutex_lock(&vsock->rx_lock); 633 634 if (!vsock->rx_run) 635 goto out; 636 637 do { 638 virtqueue_disable_cb(vq); 639 for (;;) { 640 unsigned int len, payload_len; 641 struct virtio_vsock_hdr *hdr; 642 struct sk_buff *skb; 643 644 if (!virtio_transport_more_replies(vsock)) { 645 /* Stop rx until the device processes already 646 * pending replies. Leave rx virtqueue 647 * callbacks disabled. 648 */ 649 goto out; 650 } 651 652 skb = virtqueue_get_buf(vq, &len); 653 if (!skb) 654 break; 655 656 vsock->rx_buf_nr--; 657 658 /* Drop short/long packets */ 659 if (unlikely(len < sizeof(*hdr) || 660 len > virtio_vsock_skb_len(skb))) { 661 kfree_skb(skb); 662 continue; 663 } 664 665 hdr = virtio_vsock_hdr(skb); 666 payload_len = le32_to_cpu(hdr->len); 667 if (unlikely(payload_len > len - sizeof(*hdr))) { 668 kfree_skb(skb); 669 continue; 670 } 671 672 if (payload_len) 673 virtio_vsock_skb_put(skb, payload_len); 674 675 virtio_transport_deliver_tap_pkt(skb); 676 677 /* Force virtio-transport into global mode since it 678 * does not yet support local-mode namespacing. 679 */ 680 virtio_transport_recv_pkt(&virtio_transport, skb, NULL); 681 } 682 } while (!virtqueue_enable_cb(vq)); 683 684 out: 685 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) 686 virtio_vsock_rx_fill(vsock); 687 mutex_unlock(&vsock->rx_lock); 688 } 689 690 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) 691 { 692 struct virtio_device *vdev = vsock->vdev; 693 struct virtqueue_info vqs_info[] = { 694 { "rx", virtio_vsock_rx_done }, 695 { "tx", virtio_vsock_tx_done }, 696 { "event", virtio_vsock_event_done }, 697 }; 698 int ret; 699 700 mutex_lock(&vsock->rx_lock); 701 vsock->rx_buf_nr = 0; 702 vsock->rx_buf_max_nr = 0; 703 mutex_unlock(&vsock->rx_lock); 704 705 atomic_set(&vsock->queued_replies, 0); 706 707 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL); 708 if (ret < 0) 709 return ret; 710 711 virtio_vsock_update_guest_cid(vsock); 712 713 virtio_device_ready(vdev); 714 715 return 0; 716 } 717 718 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) 719 { 720 mutex_lock(&vsock->tx_lock); 721 vsock->tx_run = true; 722 mutex_unlock(&vsock->tx_lock); 723 724 mutex_lock(&vsock->rx_lock); 725 virtio_vsock_rx_fill(vsock); 726 vsock->rx_run = true; 727 mutex_unlock(&vsock->rx_lock); 728 729 mutex_lock(&vsock->event_lock); 730 virtio_vsock_event_fill(vsock); 731 vsock->event_run = true; 732 mutex_unlock(&vsock->event_lock); 733 734 /* virtio_transport_send_pkt() can queue packets once 735 * the_virtio_vsock is set, but they won't be processed until 736 * vsock->tx_run is set to true. We queue vsock->send_pkt_work 737 * when initialization finishes to send those packets queued 738 * earlier. 739 * We don't need to queue the other workers (rx, event) because 740 * as long as we don't fill the queues with empty buffers, the 741 * host can't send us any notification. 742 */ 743 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); 744 } 745 746 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) 747 { 748 struct virtio_device *vdev = vsock->vdev; 749 struct sk_buff *skb; 750 751 /* Reset all connected sockets when the VQs disappear */ 752 vsock_for_each_connected_socket(&virtio_transport.transport, 753 virtio_vsock_reset_sock); 754 755 /* Stop all work handlers to make sure no one is accessing the device, 756 * so we can safely call virtio_reset_device(). 757 */ 758 mutex_lock(&vsock->rx_lock); 759 vsock->rx_run = false; 760 mutex_unlock(&vsock->rx_lock); 761 762 mutex_lock(&vsock->tx_lock); 763 vsock->tx_run = false; 764 mutex_unlock(&vsock->tx_lock); 765 766 mutex_lock(&vsock->event_lock); 767 vsock->event_run = false; 768 mutex_unlock(&vsock->event_lock); 769 770 /* Flush all device writes and interrupts, device will not use any 771 * more buffers. 772 */ 773 virtio_reset_device(vdev); 774 775 mutex_lock(&vsock->rx_lock); 776 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) 777 kfree_skb(skb); 778 mutex_unlock(&vsock->rx_lock); 779 780 mutex_lock(&vsock->tx_lock); 781 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) 782 kfree_skb(skb); 783 mutex_unlock(&vsock->tx_lock); 784 785 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); 786 787 /* Delete virtqueues and flush outstanding callbacks if any */ 788 vdev->config->del_vqs(vdev); 789 } 790 791 static int virtio_vsock_probe(struct virtio_device *vdev) 792 { 793 struct virtio_vsock *vsock = NULL; 794 int ret; 795 int i; 796 797 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); 798 if (ret) 799 return ret; 800 801 /* Only one virtio-vsock device per guest is supported */ 802 if (rcu_dereference_protected(the_virtio_vsock, 803 lockdep_is_held(&the_virtio_vsock_mutex))) { 804 ret = -EBUSY; 805 goto out; 806 } 807 808 vsock = kzalloc_obj(*vsock); 809 if (!vsock) { 810 ret = -ENOMEM; 811 goto out; 812 } 813 814 vsock->vdev = vdev; 815 816 817 mutex_init(&vsock->tx_lock); 818 mutex_init(&vsock->rx_lock); 819 mutex_init(&vsock->event_lock); 820 skb_queue_head_init(&vsock->send_pkt_queue); 821 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); 822 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); 823 INIT_WORK(&vsock->event_work, virtio_transport_event_work); 824 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); 825 826 if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) 827 vsock->seqpacket_allow = true; 828 829 vdev->priv = vsock; 830 831 ret = virtio_vsock_vqs_init(vsock); 832 if (ret < 0) 833 goto out; 834 835 for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++) 836 vsock->out_sgs[i] = &vsock->out_bufs[i]; 837 838 rcu_assign_pointer(the_virtio_vsock, vsock); 839 virtio_vsock_vqs_start(vsock); 840 841 mutex_unlock(&the_virtio_vsock_mutex); 842 843 return 0; 844 845 out: 846 kfree(vsock); 847 mutex_unlock(&the_virtio_vsock_mutex); 848 return ret; 849 } 850 851 static void virtio_vsock_remove(struct virtio_device *vdev) 852 { 853 struct virtio_vsock *vsock = vdev->priv; 854 855 mutex_lock(&the_virtio_vsock_mutex); 856 857 vdev->priv = NULL; 858 rcu_assign_pointer(the_virtio_vsock, NULL); 859 synchronize_rcu(); 860 861 virtio_vsock_vqs_del(vsock); 862 863 /* Other works can be queued before 'config->del_vqs()', so we flush 864 * all works before to free the vsock object to avoid use after free. 865 */ 866 flush_work(&vsock->rx_work); 867 flush_work(&vsock->tx_work); 868 flush_work(&vsock->event_work); 869 flush_work(&vsock->send_pkt_work); 870 871 mutex_unlock(&the_virtio_vsock_mutex); 872 873 kfree(vsock); 874 } 875 876 #ifdef CONFIG_PM_SLEEP 877 static int virtio_vsock_freeze(struct virtio_device *vdev) 878 { 879 struct virtio_vsock *vsock = vdev->priv; 880 881 mutex_lock(&the_virtio_vsock_mutex); 882 883 rcu_assign_pointer(the_virtio_vsock, NULL); 884 synchronize_rcu(); 885 886 virtio_vsock_vqs_del(vsock); 887 888 mutex_unlock(&the_virtio_vsock_mutex); 889 890 return 0; 891 } 892 893 static int virtio_vsock_restore(struct virtio_device *vdev) 894 { 895 struct virtio_vsock *vsock = vdev->priv; 896 int ret; 897 898 mutex_lock(&the_virtio_vsock_mutex); 899 900 /* Only one virtio-vsock device per guest is supported */ 901 if (rcu_dereference_protected(the_virtio_vsock, 902 lockdep_is_held(&the_virtio_vsock_mutex))) { 903 ret = -EBUSY; 904 goto out; 905 } 906 907 ret = virtio_vsock_vqs_init(vsock); 908 if (ret < 0) 909 goto out; 910 911 rcu_assign_pointer(the_virtio_vsock, vsock); 912 virtio_vsock_vqs_start(vsock); 913 914 out: 915 mutex_unlock(&the_virtio_vsock_mutex); 916 return ret; 917 } 918 #endif /* CONFIG_PM_SLEEP */ 919 920 static struct virtio_device_id id_table[] = { 921 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, 922 { 0 }, 923 }; 924 925 static unsigned int features[] = { 926 VIRTIO_VSOCK_F_SEQPACKET 927 }; 928 929 static struct virtio_driver virtio_vsock_driver = { 930 .feature_table = features, 931 .feature_table_size = ARRAY_SIZE(features), 932 .driver.name = KBUILD_MODNAME, 933 .id_table = id_table, 934 .probe = virtio_vsock_probe, 935 .remove = virtio_vsock_remove, 936 #ifdef CONFIG_PM_SLEEP 937 .freeze = virtio_vsock_freeze, 938 .restore = virtio_vsock_restore, 939 #endif 940 }; 941 942 static int __init virtio_vsock_init(void) 943 { 944 int ret; 945 946 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", WQ_PERCPU, 0); 947 if (!virtio_vsock_workqueue) 948 return -ENOMEM; 949 950 ret = vsock_core_register(&virtio_transport.transport, 951 VSOCK_TRANSPORT_F_G2H); 952 if (ret) 953 goto out_wq; 954 955 ret = register_virtio_driver(&virtio_vsock_driver); 956 if (ret) 957 goto out_vci; 958 959 return 0; 960 961 out_vci: 962 vsock_core_unregister(&virtio_transport.transport); 963 out_wq: 964 destroy_workqueue(virtio_vsock_workqueue); 965 return ret; 966 } 967 968 static void __exit virtio_vsock_exit(void) 969 { 970 unregister_virtio_driver(&virtio_vsock_driver); 971 vsock_core_unregister(&virtio_transport.transport); 972 destroy_workqueue(virtio_vsock_workqueue); 973 } 974 975 module_init(virtio_vsock_init); 976 module_exit(virtio_vsock_exit); 977 MODULE_LICENSE("GPL v2"); 978 MODULE_AUTHOR("Asias He"); 979 MODULE_DESCRIPTION("virtio transport for vsock"); 980 MODULE_DEVICE_TABLE(virtio, id_table); 981