1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio vhost-user driver 4 * 5 * Copyright(c) 2019 Intel Corporation 6 * 7 * This driver allows virtio devices to be used over a vhost-user socket. 8 * 9 * Guest devices can be instantiated by kernel module or command line 10 * parameters. One device will be created for each parameter. Syntax: 11 * 12 * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>] 13 * where: 14 * <socket> := vhost-user socket path to connect 15 * <virtio_id> := virtio device id (as in virtio_ids.h) 16 * <platform_id> := (optional) platform device id 17 * 18 * example: 19 * virtio_uml.device=/var/uml.socket:1 20 * 21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd. 22 */ 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 #include <linux/string_choices.h> 28 #include <linux/virtio.h> 29 #include <linux/virtio_config.h> 30 #include <linux/virtio_ring.h> 31 #include <linux/time-internal.h> 32 #include <linux/virtio-uml.h> 33 #include <shared/as-layout.h> 34 #include <irq_kern.h> 35 #include <init.h> 36 #include <os.h> 37 #include "mconsole_kern.h" 38 #include "vhost_user.h" 39 40 #define MAX_SUPPORTED_QUEUE_SIZE 256 41 42 #define to_virtio_uml_device(_vdev) \ 43 container_of(_vdev, struct virtio_uml_device, vdev) 44 45 struct virtio_uml_platform_data { 46 u32 virtio_device_id; 47 const char *socket_path; 48 struct work_struct conn_broken_wk; 49 struct platform_device *pdev; 50 }; 51 52 struct virtio_uml_device { 53 struct virtio_device vdev; 54 struct platform_device *pdev; 55 struct virtio_uml_platform_data *pdata; 56 57 raw_spinlock_t sock_lock; 58 int sock, req_fd, irq; 59 u64 features; 60 u64 protocol_features; 61 u64 max_vqs; 62 u8 status; 63 u8 registered:1; 64 u8 suspended:1; 65 u8 no_vq_suspend:1; 66 67 u8 config_changed_irq:1; 68 uint64_t vq_irq_vq_map; 69 int recv_rc; 70 }; 71 72 struct virtio_uml_vq_info { 73 int kick_fd, call_fd; 74 char name[32]; 75 bool suspended; 76 }; 77 78 #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__) 79 80 /* Vhost-user protocol */ 81 82 static int full_sendmsg_fds(int fd, const void *buf, unsigned int len, 83 const int *fds, unsigned int fds_num) 84 { 85 int rc; 86 87 do { 88 rc = os_sendmsg_fds(fd, buf, len, fds, fds_num); 89 if (rc > 0) { 90 buf += rc; 91 len -= rc; 92 fds = NULL; 93 fds_num = 0; 94 } 95 } while (len && (rc >= 0 || rc == -EINTR)); 96 97 if (rc < 0) 98 return rc; 99 return 0; 100 } 101 102 static int full_read(int fd, void *buf, int len, bool abortable) 103 { 104 int rc; 105 106 if (!len) 107 return 0; 108 109 do { 110 rc = os_read_file(fd, buf, len); 111 if (rc > 0) { 112 buf += rc; 113 len -= rc; 114 } 115 } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN))); 116 117 if (rc < 0) 118 return rc; 119 if (rc == 0) 120 return -ECONNRESET; 121 return 0; 122 } 123 124 static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg) 125 { 126 return full_read(fd, msg, sizeof(msg->header), true); 127 } 128 129 static int vhost_user_recv(struct virtio_uml_device *vu_dev, 130 int fd, struct vhost_user_msg *msg, 131 size_t max_payload_size, bool wait) 132 { 133 size_t size; 134 int rc; 135 136 /* 137 * In virtio time-travel mode, we're handling all the vhost-user 138 * FDs by polling them whenever appropriate. However, we may get 139 * into a situation where we're sending out an interrupt message 140 * to a device (e.g. a net device) and need to handle a simulation 141 * time message while doing so, e.g. one that tells us to update 142 * our idea of how long we can run without scheduling. 143 * 144 * Thus, we need to not just read() from the given fd, but need 145 * to also handle messages for the simulation time - this function 146 * does that for us while waiting for the given fd to be readable. 147 */ 148 if (wait) 149 time_travel_wait_readable(fd); 150 151 rc = vhost_user_recv_header(fd, msg); 152 153 if (rc) 154 return rc; 155 size = msg->header.size; 156 if (size > max_payload_size) 157 return -EPROTO; 158 return full_read(fd, &msg->payload, size, false); 159 } 160 161 static void vhost_user_check_reset(struct virtio_uml_device *vu_dev, 162 int rc) 163 { 164 struct virtio_uml_platform_data *pdata = vu_dev->pdata; 165 166 if (rc != -ECONNRESET) 167 return; 168 169 if (!vu_dev->registered) 170 return; 171 172 vu_dev->registered = 0; 173 174 schedule_work(&pdata->conn_broken_wk); 175 } 176 177 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev, 178 struct vhost_user_msg *msg, 179 size_t max_payload_size) 180 { 181 int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, 182 max_payload_size, true); 183 184 if (rc) { 185 vhost_user_check_reset(vu_dev, rc); 186 return rc; 187 } 188 189 if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION)) 190 return -EPROTO; 191 192 return 0; 193 } 194 195 static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev, 196 u64 *value) 197 { 198 struct vhost_user_msg msg; 199 int rc = vhost_user_recv_resp(vu_dev, &msg, 200 sizeof(msg.payload.integer)); 201 202 if (rc) 203 return rc; 204 if (msg.header.size != sizeof(msg.payload.integer)) 205 return -EPROTO; 206 *value = msg.payload.integer; 207 return 0; 208 } 209 210 static int vhost_user_recv_req(struct virtio_uml_device *vu_dev, 211 struct vhost_user_msg *msg, 212 size_t max_payload_size) 213 { 214 int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg, 215 max_payload_size, false); 216 217 if (rc) 218 return rc; 219 220 if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) != 221 VHOST_USER_VERSION) 222 return -EPROTO; 223 224 return 0; 225 } 226 227 static int vhost_user_send(struct virtio_uml_device *vu_dev, 228 bool need_response, struct vhost_user_msg *msg, 229 int *fds, size_t num_fds) 230 { 231 size_t size = sizeof(msg->header) + msg->header.size; 232 unsigned long flags; 233 bool request_ack; 234 int rc; 235 236 msg->header.flags |= VHOST_USER_VERSION; 237 238 /* 239 * The need_response flag indicates that we already need a response, 240 * e.g. to read the features. In these cases, don't request an ACK as 241 * it is meaningless. Also request an ACK only if supported. 242 */ 243 request_ack = !need_response; 244 if (!(vu_dev->protocol_features & 245 BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK))) 246 request_ack = false; 247 248 if (request_ack) 249 msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY; 250 251 raw_spin_lock_irqsave(&vu_dev->sock_lock, flags); 252 rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds); 253 if (rc < 0) 254 goto out; 255 256 if (request_ack) { 257 uint64_t status; 258 259 rc = vhost_user_recv_u64(vu_dev, &status); 260 if (rc) 261 goto out; 262 263 if (status) { 264 vu_err(vu_dev, "slave reports error: %llu\n", status); 265 rc = -EIO; 266 goto out; 267 } 268 } 269 270 out: 271 raw_spin_unlock_irqrestore(&vu_dev->sock_lock, flags); 272 return rc; 273 } 274 275 static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev, 276 bool need_response, u32 request) 277 { 278 struct vhost_user_msg msg = { 279 .header.request = request, 280 }; 281 282 return vhost_user_send(vu_dev, need_response, &msg, NULL, 0); 283 } 284 285 static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev, 286 u32 request, int fd) 287 { 288 struct vhost_user_msg msg = { 289 .header.request = request, 290 }; 291 292 return vhost_user_send(vu_dev, false, &msg, &fd, 1); 293 } 294 295 static int vhost_user_send_u64(struct virtio_uml_device *vu_dev, 296 u32 request, u64 value) 297 { 298 struct vhost_user_msg msg = { 299 .header.request = request, 300 .header.size = sizeof(msg.payload.integer), 301 .payload.integer = value, 302 }; 303 304 return vhost_user_send(vu_dev, false, &msg, NULL, 0); 305 } 306 307 static int vhost_user_set_owner(struct virtio_uml_device *vu_dev) 308 { 309 return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER); 310 } 311 312 static int vhost_user_get_features(struct virtio_uml_device *vu_dev, 313 u64 *features) 314 { 315 int rc = vhost_user_send_no_payload(vu_dev, true, 316 VHOST_USER_GET_FEATURES); 317 318 if (rc) 319 return rc; 320 return vhost_user_recv_u64(vu_dev, features); 321 } 322 323 static int vhost_user_set_features(struct virtio_uml_device *vu_dev, 324 u64 features) 325 { 326 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features); 327 } 328 329 static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev, 330 u64 *protocol_features) 331 { 332 int rc = vhost_user_send_no_payload(vu_dev, true, 333 VHOST_USER_GET_PROTOCOL_FEATURES); 334 335 if (rc) 336 return rc; 337 return vhost_user_recv_u64(vu_dev, protocol_features); 338 } 339 340 static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev, 341 u64 protocol_features) 342 { 343 return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES, 344 protocol_features); 345 } 346 347 static int vhost_user_get_queue_num(struct virtio_uml_device *vu_dev, 348 u64 *queue_num) 349 { 350 int rc = vhost_user_send_no_payload(vu_dev, true, 351 VHOST_USER_GET_QUEUE_NUM); 352 353 if (rc) 354 return rc; 355 return vhost_user_recv_u64(vu_dev, queue_num); 356 } 357 358 static void vhost_user_reply(struct virtio_uml_device *vu_dev, 359 struct vhost_user_msg *msg, int response) 360 { 361 struct vhost_user_msg reply = { 362 .payload.integer = response, 363 }; 364 size_t size = sizeof(reply.header) + sizeof(reply.payload.integer); 365 int rc; 366 367 reply.header = msg->header; 368 reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY; 369 reply.header.flags |= VHOST_USER_FLAG_REPLY; 370 reply.header.size = sizeof(reply.payload.integer); 371 372 rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0); 373 374 if (rc) 375 vu_err(vu_dev, 376 "sending reply to slave request failed: %d (size %zu)\n", 377 rc, size); 378 } 379 380 static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev, 381 struct time_travel_event *ev) 382 { 383 struct virtqueue *vq; 384 int response = 1; 385 struct { 386 struct vhost_user_msg msg; 387 u8 extra_payload[512]; 388 } msg; 389 int rc; 390 irqreturn_t irq_rc = IRQ_NONE; 391 392 while (1) { 393 rc = vhost_user_recv_req(vu_dev, &msg.msg, 394 sizeof(msg.msg.payload) + 395 sizeof(msg.extra_payload)); 396 if (rc) 397 break; 398 399 switch (msg.msg.header.request) { 400 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG: 401 vu_dev->config_changed_irq = true; 402 response = 0; 403 break; 404 case VHOST_USER_SLAVE_VRING_CALL: 405 virtio_device_for_each_vq((&vu_dev->vdev), vq) { 406 if (vq->index == msg.msg.payload.vring_state.index) { 407 response = 0; 408 vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index); 409 break; 410 } 411 } 412 break; 413 case VHOST_USER_SLAVE_IOTLB_MSG: 414 /* not supported - VIRTIO_F_ACCESS_PLATFORM */ 415 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: 416 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */ 417 default: 418 vu_err(vu_dev, "unexpected slave request %d\n", 419 msg.msg.header.request); 420 } 421 422 if (ev && !vu_dev->suspended) 423 time_travel_add_irq_event(ev); 424 425 if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY) 426 vhost_user_reply(vu_dev, &msg.msg, response); 427 irq_rc = IRQ_HANDLED; 428 } 429 /* mask EAGAIN as we try non-blocking read until socket is empty */ 430 vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc; 431 return irq_rc; 432 } 433 434 static irqreturn_t vu_req_interrupt(int irq, void *data) 435 { 436 struct virtio_uml_device *vu_dev = data; 437 irqreturn_t ret = IRQ_HANDLED; 438 439 if (!um_irq_timetravel_handler_used()) 440 ret = vu_req_read_message(vu_dev, NULL); 441 442 if (vu_dev->recv_rc) { 443 vhost_user_check_reset(vu_dev, vu_dev->recv_rc); 444 } else if (vu_dev->vq_irq_vq_map) { 445 struct virtqueue *vq; 446 447 virtio_device_for_each_vq((&vu_dev->vdev), vq) { 448 if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index)) 449 vring_interrupt(0 /* ignored */, vq); 450 } 451 vu_dev->vq_irq_vq_map = 0; 452 } else if (vu_dev->config_changed_irq) { 453 virtio_config_changed(&vu_dev->vdev); 454 vu_dev->config_changed_irq = false; 455 } 456 457 return ret; 458 } 459 460 static void vu_req_interrupt_comm_handler(int irq, int fd, void *data, 461 struct time_travel_event *ev) 462 { 463 vu_req_read_message(data, ev); 464 } 465 466 static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev) 467 { 468 int rc, req_fds[2]; 469 470 /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */ 471 rc = os_pipe(req_fds, true, true); 472 if (rc < 0) 473 return rc; 474 vu_dev->req_fd = req_fds[0]; 475 476 rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ, 477 vu_req_interrupt, IRQF_SHARED, 478 vu_dev->pdev->name, vu_dev, 479 vu_req_interrupt_comm_handler); 480 if (rc < 0) 481 goto err_close; 482 483 vu_dev->irq = rc; 484 485 rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD, 486 req_fds[1]); 487 if (rc) 488 goto err_free_irq; 489 490 goto out; 491 492 err_free_irq: 493 um_free_irq(vu_dev->irq, vu_dev); 494 err_close: 495 os_close_file(req_fds[0]); 496 out: 497 /* Close unused write end of request fds */ 498 os_close_file(req_fds[1]); 499 return rc; 500 } 501 502 static int vhost_user_init(struct virtio_uml_device *vu_dev) 503 { 504 int rc = vhost_user_set_owner(vu_dev); 505 506 if (rc) 507 return rc; 508 rc = vhost_user_get_features(vu_dev, &vu_dev->features); 509 if (rc) 510 return rc; 511 512 if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) { 513 rc = vhost_user_get_protocol_features(vu_dev, 514 &vu_dev->protocol_features); 515 if (rc) 516 return rc; 517 vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F; 518 rc = vhost_user_set_protocol_features(vu_dev, 519 vu_dev->protocol_features); 520 if (rc) 521 return rc; 522 } 523 524 if (vu_dev->protocol_features & 525 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { 526 rc = vhost_user_init_slave_req(vu_dev); 527 if (rc) 528 return rc; 529 } 530 531 if (vu_dev->protocol_features & 532 BIT_ULL(VHOST_USER_PROTOCOL_F_MQ)) { 533 rc = vhost_user_get_queue_num(vu_dev, &vu_dev->max_vqs); 534 if (rc) 535 return rc; 536 } else { 537 vu_dev->max_vqs = U64_MAX; 538 } 539 540 return 0; 541 } 542 543 static void vhost_user_get_config(struct virtio_uml_device *vu_dev, 544 u32 offset, void *buf, u32 len) 545 { 546 u32 cfg_size = offset + len; 547 struct vhost_user_msg *msg; 548 size_t payload_size = sizeof(msg->payload.config) + cfg_size; 549 size_t msg_size = sizeof(msg->header) + payload_size; 550 int rc; 551 552 if (!(vu_dev->protocol_features & 553 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG))) 554 return; 555 556 msg = kzalloc(msg_size, GFP_KERNEL); 557 if (!msg) 558 return; 559 msg->header.request = VHOST_USER_GET_CONFIG; 560 msg->header.size = payload_size; 561 msg->payload.config.offset = 0; 562 msg->payload.config.size = cfg_size; 563 564 rc = vhost_user_send(vu_dev, true, msg, NULL, 0); 565 if (rc) { 566 vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n", 567 rc); 568 goto free; 569 } 570 571 rc = vhost_user_recv_resp(vu_dev, msg, msg_size); 572 if (rc) { 573 vu_err(vu_dev, 574 "receiving VHOST_USER_GET_CONFIG response failed: %d\n", 575 rc); 576 goto free; 577 } 578 579 if (msg->header.size != payload_size || 580 msg->payload.config.size != cfg_size) { 581 rc = -EPROTO; 582 vu_err(vu_dev, 583 "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n", 584 msg->header.size, payload_size, 585 msg->payload.config.size, cfg_size); 586 goto free; 587 } 588 memcpy(buf, msg->payload.config.payload + offset, len); 589 590 free: 591 kfree(msg); 592 } 593 594 static void vhost_user_set_config(struct virtio_uml_device *vu_dev, 595 u32 offset, const void *buf, u32 len) 596 { 597 struct vhost_user_msg *msg; 598 size_t payload_size = sizeof(msg->payload.config) + len; 599 size_t msg_size = sizeof(msg->header) + payload_size; 600 int rc; 601 602 if (!(vu_dev->protocol_features & 603 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG))) 604 return; 605 606 msg = kzalloc(msg_size, GFP_KERNEL); 607 if (!msg) 608 return; 609 msg->header.request = VHOST_USER_SET_CONFIG; 610 msg->header.size = payload_size; 611 msg->payload.config.offset = offset; 612 msg->payload.config.size = len; 613 memcpy(msg->payload.config.payload, buf, len); 614 615 rc = vhost_user_send(vu_dev, false, msg, NULL, 0); 616 if (rc) 617 vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n", 618 rc); 619 620 kfree(msg); 621 } 622 623 static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out, 624 struct vhost_user_mem_region *region_out) 625 { 626 unsigned long long mem_offset; 627 int rc = phys_mapping(addr, &mem_offset); 628 629 if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc)) 630 return -EFAULT; 631 *fd_out = rc; 632 region_out->guest_addr = addr; 633 region_out->user_addr = addr; 634 region_out->size = size; 635 region_out->mmap_offset = mem_offset; 636 637 /* Ensure mapping is valid for the entire region */ 638 rc = phys_mapping(addr + size - 1, &mem_offset); 639 if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n", 640 addr + size - 1, rc, *fd_out)) 641 return -EFAULT; 642 return 0; 643 } 644 645 static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev) 646 { 647 struct vhost_user_msg msg = { 648 .header.request = VHOST_USER_SET_MEM_TABLE, 649 .header.size = offsetof(typeof(msg.payload.mem_regions), regions[1]), 650 .payload.mem_regions.num = 1, 651 }; 652 unsigned long reserved = uml_reserved - uml_physmem; 653 int fds[2]; 654 int rc; 655 656 /* 657 * This is a bit tricky, see also the comment with setup_physmem(). 658 * 659 * Essentially, setup_physmem() uses a file to mmap() our physmem, 660 * but the code and data we *already* have is omitted. To us, this 661 * is no difference, since they both become part of our address 662 * space and memory consumption. To somebody looking in from the 663 * outside, however, it is different because the part of our memory 664 * consumption that's already part of the binary (code/data) is not 665 * mapped from the file, so it's not visible to another mmap from 666 * the file descriptor. 667 * 668 * Thus, don't advertise this space to the vhost-user slave. This 669 * means that the slave will likely abort or similar when we give 670 * it an address from the hidden range, since it's not marked as 671 * a valid address, but at least that way we detect the issue and 672 * don't just have the slave read an all-zeroes buffer from the 673 * shared memory file, or write something there that we can never 674 * see (depending on the direction of the virtqueue traffic.) 675 * 676 * Since we usually don't want to use .text for virtio buffers, 677 * this effectively means that you cannot use 678 * 1) global variables, which are in the .bss and not in the shm 679 * file-backed memory 680 * 2) the stack in some processes, depending on where they have 681 * their stack (or maybe only no interrupt stack?) 682 * 683 * The stack is already not typically valid for DMA, so this isn't 684 * much of a restriction, but global variables might be encountered. 685 * 686 * It might be possible to fix it by copying around the data that's 687 * between bss_start and where we map the file now, but it's not 688 * something that you typically encounter with virtio drivers, so 689 * it didn't seem worthwhile. 690 */ 691 rc = vhost_user_init_mem_region(reserved, physmem_size - reserved, 692 &fds[0], 693 &msg.payload.mem_regions.regions[0]); 694 695 if (rc < 0) 696 return rc; 697 698 return vhost_user_send(vu_dev, false, &msg, fds, 699 msg.payload.mem_regions.num); 700 } 701 702 static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev, 703 u32 request, u32 index, u32 num) 704 { 705 struct vhost_user_msg msg = { 706 .header.request = request, 707 .header.size = sizeof(msg.payload.vring_state), 708 .payload.vring_state.index = index, 709 .payload.vring_state.num = num, 710 }; 711 712 return vhost_user_send(vu_dev, false, &msg, NULL, 0); 713 } 714 715 static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev, 716 u32 index, u32 num) 717 { 718 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM, 719 index, num); 720 } 721 722 static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev, 723 u32 index, u32 offset) 724 { 725 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE, 726 index, offset); 727 } 728 729 static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev, 730 u32 index, u64 desc, u64 used, u64 avail, 731 u64 log) 732 { 733 struct vhost_user_msg msg = { 734 .header.request = VHOST_USER_SET_VRING_ADDR, 735 .header.size = sizeof(msg.payload.vring_addr), 736 .payload.vring_addr.index = index, 737 .payload.vring_addr.desc = desc, 738 .payload.vring_addr.used = used, 739 .payload.vring_addr.avail = avail, 740 .payload.vring_addr.log = log, 741 }; 742 743 return vhost_user_send(vu_dev, false, &msg, NULL, 0); 744 } 745 746 static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev, 747 u32 request, int index, int fd) 748 { 749 struct vhost_user_msg msg = { 750 .header.request = request, 751 .header.size = sizeof(msg.payload.integer), 752 .payload.integer = index, 753 }; 754 755 if (index & ~VHOST_USER_VRING_INDEX_MASK) 756 return -EINVAL; 757 if (fd < 0) { 758 msg.payload.integer |= VHOST_USER_VRING_POLL_MASK; 759 return vhost_user_send(vu_dev, false, &msg, NULL, 0); 760 } 761 return vhost_user_send(vu_dev, false, &msg, &fd, 1); 762 } 763 764 static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev, 765 int index, int fd) 766 { 767 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL, 768 index, fd); 769 } 770 771 static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev, 772 int index, int fd) 773 { 774 return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK, 775 index, fd); 776 } 777 778 static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev, 779 u32 index, bool enable) 780 { 781 if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES))) 782 return 0; 783 784 return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE, 785 index, enable); 786 } 787 788 789 /* Virtio interface */ 790 791 static bool vu_notify(struct virtqueue *vq) 792 { 793 struct virtio_uml_vq_info *info = vq->priv; 794 const uint64_t n = 1; 795 int rc; 796 797 if (info->suspended) 798 return true; 799 800 time_travel_propagate_time(); 801 802 if (info->kick_fd < 0) { 803 struct virtio_uml_device *vu_dev; 804 805 vu_dev = to_virtio_uml_device(vq->vdev); 806 807 return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK, 808 vq->index, 0) == 0; 809 } 810 811 do { 812 rc = os_write_file(info->kick_fd, &n, sizeof(n)); 813 } while (rc == -EINTR); 814 return !WARN(rc != sizeof(n), "write returned %d\n", rc); 815 } 816 817 static irqreturn_t vu_interrupt(int irq, void *opaque) 818 { 819 struct virtqueue *vq = opaque; 820 struct virtio_uml_vq_info *info = vq->priv; 821 uint64_t n; 822 int rc; 823 irqreturn_t ret = IRQ_NONE; 824 825 do { 826 rc = os_read_file(info->call_fd, &n, sizeof(n)); 827 if (rc == sizeof(n)) 828 ret |= vring_interrupt(irq, vq); 829 } while (rc == sizeof(n) || rc == -EINTR); 830 WARN(rc != -EAGAIN, "read returned %d\n", rc); 831 return ret; 832 } 833 834 835 static void vu_get(struct virtio_device *vdev, unsigned offset, 836 void *buf, unsigned len) 837 { 838 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 839 840 vhost_user_get_config(vu_dev, offset, buf, len); 841 } 842 843 static void vu_set(struct virtio_device *vdev, unsigned offset, 844 const void *buf, unsigned len) 845 { 846 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 847 848 vhost_user_set_config(vu_dev, offset, buf, len); 849 } 850 851 static u8 vu_get_status(struct virtio_device *vdev) 852 { 853 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 854 855 return vu_dev->status; 856 } 857 858 static void vu_set_status(struct virtio_device *vdev, u8 status) 859 { 860 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 861 862 vu_dev->status = status; 863 } 864 865 static void vu_reset(struct virtio_device *vdev) 866 { 867 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 868 869 vu_dev->status = 0; 870 } 871 872 static void vu_del_vq(struct virtqueue *vq) 873 { 874 struct virtio_uml_vq_info *info = vq->priv; 875 876 if (info->call_fd >= 0) { 877 struct virtio_uml_device *vu_dev; 878 879 vu_dev = to_virtio_uml_device(vq->vdev); 880 881 um_free_irq(vu_dev->irq, vq); 882 os_close_file(info->call_fd); 883 } 884 885 if (info->kick_fd >= 0) 886 os_close_file(info->kick_fd); 887 888 vring_del_virtqueue(vq); 889 kfree(info); 890 } 891 892 static void vu_del_vqs(struct virtio_device *vdev) 893 { 894 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 895 struct virtqueue *vq, *n; 896 u64 features; 897 898 /* Note: reverse order as a workaround to a decoding bug in snabb */ 899 list_for_each_entry_reverse(vq, &vdev->vqs, list) 900 WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false)); 901 902 /* Ensure previous messages have been processed */ 903 WARN_ON(vhost_user_get_features(vu_dev, &features)); 904 905 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 906 vu_del_vq(vq); 907 } 908 909 static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev, 910 struct virtqueue *vq) 911 { 912 struct virtio_uml_vq_info *info = vq->priv; 913 int call_fds[2]; 914 int rc, irq; 915 916 /* no call FD needed/desired in this case */ 917 if (vu_dev->protocol_features & 918 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 919 vu_dev->protocol_features & 920 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { 921 info->call_fd = -1; 922 return 0; 923 } 924 925 /* Use a pipe for call fd, since SIGIO is not supported for eventfd */ 926 rc = os_pipe(call_fds, true, true); 927 if (rc < 0) 928 return rc; 929 930 info->call_fd = call_fds[0]; 931 irq = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ, 932 vu_interrupt, IRQF_SHARED, info->name, vq); 933 if (irq < 0) { 934 rc = irq; 935 goto close_both; 936 } 937 938 rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]); 939 if (rc) 940 goto release_irq; 941 942 vu_dev->irq = irq; 943 944 goto out; 945 946 release_irq: 947 um_free_irq(irq, vq); 948 close_both: 949 os_close_file(call_fds[0]); 950 out: 951 /* Close (unused) write end of call fds */ 952 os_close_file(call_fds[1]); 953 954 return rc; 955 } 956 957 static struct virtqueue *vu_setup_vq(struct virtio_device *vdev, 958 unsigned index, vq_callback_t *callback, 959 const char *name, bool ctx) 960 { 961 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 962 struct platform_device *pdev = vu_dev->pdev; 963 struct virtio_uml_vq_info *info; 964 struct virtqueue *vq; 965 int num = MAX_SUPPORTED_QUEUE_SIZE; 966 int rc; 967 968 info = kzalloc_obj(*info); 969 if (!info) { 970 rc = -ENOMEM; 971 goto error_kzalloc; 972 } 973 snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name, 974 pdev->id, name); 975 976 vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true, 977 ctx, vu_notify, callback, info->name); 978 if (!vq) { 979 rc = -ENOMEM; 980 goto error_create; 981 } 982 vq->priv = info; 983 vq->num_max = num; 984 num = virtqueue_get_vring_size(vq); 985 986 if (vu_dev->protocol_features & 987 BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) { 988 info->kick_fd = -1; 989 } else { 990 rc = os_eventfd(0, 0); 991 if (rc < 0) 992 goto error_kick; 993 info->kick_fd = rc; 994 } 995 996 rc = vu_setup_vq_call_fd(vu_dev, vq); 997 if (rc) 998 goto error_call; 999 1000 rc = vhost_user_set_vring_num(vu_dev, index, num); 1001 if (rc) 1002 goto error_setup; 1003 1004 rc = vhost_user_set_vring_base(vu_dev, index, 0); 1005 if (rc) 1006 goto error_setup; 1007 1008 rc = vhost_user_set_vring_addr(vu_dev, index, 1009 virtqueue_get_desc_addr(vq), 1010 virtqueue_get_used_addr(vq), 1011 virtqueue_get_avail_addr(vq), 1012 (u64) -1); 1013 if (rc) 1014 goto error_setup; 1015 1016 return vq; 1017 1018 error_setup: 1019 if (info->call_fd >= 0) { 1020 um_free_irq(vu_dev->irq, vq); 1021 os_close_file(info->call_fd); 1022 } 1023 error_call: 1024 if (info->kick_fd >= 0) 1025 os_close_file(info->kick_fd); 1026 error_kick: 1027 vring_del_virtqueue(vq); 1028 error_create: 1029 kfree(info); 1030 error_kzalloc: 1031 return ERR_PTR(rc); 1032 } 1033 1034 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs, 1035 struct virtqueue *vqs[], 1036 struct virtqueue_info vqs_info[], 1037 struct irq_affinity *desc) 1038 { 1039 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 1040 int i, queue_idx = 0, rc; 1041 struct virtqueue *vq; 1042 1043 /* not supported for now */ 1044 if (WARN(nvqs > 64 || nvqs > vu_dev->max_vqs, 1045 "%d VQs requested, only up to 64 or %lld supported\n", 1046 nvqs, vu_dev->max_vqs)) 1047 return -EINVAL; 1048 1049 rc = vhost_user_set_mem_table(vu_dev); 1050 if (rc) 1051 return rc; 1052 1053 for (i = 0; i < nvqs; ++i) { 1054 struct virtqueue_info *vqi = &vqs_info[i]; 1055 1056 if (!vqi->name) { 1057 vqs[i] = NULL; 1058 continue; 1059 } 1060 1061 vqs[i] = vu_setup_vq(vdev, queue_idx++, vqi->callback, 1062 vqi->name, vqi->ctx); 1063 if (IS_ERR(vqs[i])) { 1064 rc = PTR_ERR(vqs[i]); 1065 goto error_setup; 1066 } 1067 } 1068 1069 list_for_each_entry(vq, &vdev->vqs, list) { 1070 struct virtio_uml_vq_info *info = vq->priv; 1071 1072 if (info->kick_fd >= 0) { 1073 rc = vhost_user_set_vring_kick(vu_dev, vq->index, 1074 info->kick_fd); 1075 if (rc) 1076 goto error_setup; 1077 } 1078 1079 rc = vhost_user_set_vring_enable(vu_dev, vq->index, true); 1080 if (rc) 1081 goto error_setup; 1082 } 1083 1084 return 0; 1085 1086 error_setup: 1087 vu_del_vqs(vdev); 1088 return rc; 1089 } 1090 1091 static u64 vu_get_features(struct virtio_device *vdev) 1092 { 1093 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 1094 1095 return vu_dev->features; 1096 } 1097 1098 static int vu_finalize_features(struct virtio_device *vdev) 1099 { 1100 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 1101 u64 supported = vdev->features & VHOST_USER_SUPPORTED_F; 1102 1103 vring_transport_features(vdev); 1104 vu_dev->features = vdev->features | supported; 1105 1106 return vhost_user_set_features(vu_dev, vu_dev->features); 1107 } 1108 1109 static const char *vu_bus_name(struct virtio_device *vdev) 1110 { 1111 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 1112 1113 return vu_dev->pdev->name; 1114 } 1115 1116 static const struct virtio_config_ops virtio_uml_config_ops = { 1117 .get = vu_get, 1118 .set = vu_set, 1119 .get_status = vu_get_status, 1120 .set_status = vu_set_status, 1121 .reset = vu_reset, 1122 .find_vqs = vu_find_vqs, 1123 .del_vqs = vu_del_vqs, 1124 .get_features = vu_get_features, 1125 .finalize_features = vu_finalize_features, 1126 .bus_name = vu_bus_name, 1127 }; 1128 1129 static void virtio_uml_release_dev(struct device *d) 1130 { 1131 struct virtio_device *vdev = 1132 container_of(d, struct virtio_device, dev); 1133 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 1134 1135 time_travel_propagate_time(); 1136 1137 /* might not have been opened due to not negotiating the feature */ 1138 if (vu_dev->req_fd >= 0) { 1139 um_free_irq(vu_dev->irq, vu_dev); 1140 os_close_file(vu_dev->req_fd); 1141 } 1142 1143 os_close_file(vu_dev->sock); 1144 kfree(vu_dev); 1145 } 1146 1147 void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev, 1148 bool no_vq_suspend) 1149 { 1150 struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); 1151 1152 if (WARN_ON(vdev->config != &virtio_uml_config_ops)) 1153 return; 1154 1155 vu_dev->no_vq_suspend = no_vq_suspend; 1156 dev_info(&vdev->dev, "%s VQ suspend\n", str_disabled_enabled(no_vq_suspend)); 1157 } 1158 1159 static void vu_of_conn_broken(struct work_struct *wk) 1160 { 1161 struct virtio_uml_platform_data *pdata; 1162 struct virtio_uml_device *vu_dev; 1163 1164 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); 1165 1166 vu_dev = platform_get_drvdata(pdata->pdev); 1167 1168 virtio_break_device(&vu_dev->vdev); 1169 1170 /* 1171 * We can't remove the device from the devicetree so the only thing we 1172 * can do is warn. 1173 */ 1174 WARN_ON(1); 1175 } 1176 1177 /* Platform device */ 1178 1179 static struct virtio_uml_platform_data * 1180 virtio_uml_create_pdata(struct platform_device *pdev) 1181 { 1182 struct device_node *np = pdev->dev.of_node; 1183 struct virtio_uml_platform_data *pdata; 1184 int ret; 1185 1186 if (!np) 1187 return ERR_PTR(-EINVAL); 1188 1189 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1190 if (!pdata) 1191 return ERR_PTR(-ENOMEM); 1192 1193 INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken); 1194 pdata->pdev = pdev; 1195 1196 ret = of_property_read_string(np, "socket-path", &pdata->socket_path); 1197 if (ret) 1198 return ERR_PTR(ret); 1199 1200 ret = of_property_read_u32(np, "virtio-device-id", 1201 &pdata->virtio_device_id); 1202 if (ret) 1203 return ERR_PTR(ret); 1204 1205 return pdata; 1206 } 1207 1208 static int virtio_uml_probe(struct platform_device *pdev) 1209 { 1210 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; 1211 struct virtio_uml_device *vu_dev; 1212 int rc; 1213 1214 if (!pdata) { 1215 pdata = virtio_uml_create_pdata(pdev); 1216 if (IS_ERR(pdata)) 1217 return PTR_ERR(pdata); 1218 } 1219 1220 vu_dev = kzalloc_obj(*vu_dev); 1221 if (!vu_dev) 1222 return -ENOMEM; 1223 1224 vu_dev->pdata = pdata; 1225 vu_dev->vdev.dev.parent = &pdev->dev; 1226 vu_dev->vdev.dev.release = virtio_uml_release_dev; 1227 vu_dev->vdev.config = &virtio_uml_config_ops; 1228 vu_dev->vdev.id.device = pdata->virtio_device_id; 1229 vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID; 1230 vu_dev->pdev = pdev; 1231 vu_dev->req_fd = -1; 1232 vu_dev->irq = UM_IRQ_ALLOC; 1233 1234 time_travel_propagate_time(); 1235 1236 do { 1237 rc = os_connect_socket(pdata->socket_path); 1238 } while (rc == -EINTR); 1239 if (rc < 0) 1240 goto error_free; 1241 vu_dev->sock = rc; 1242 1243 raw_spin_lock_init(&vu_dev->sock_lock); 1244 1245 rc = vhost_user_init(vu_dev); 1246 if (rc) 1247 goto error_init; 1248 1249 platform_set_drvdata(pdev, vu_dev); 1250 1251 device_set_wakeup_capable(&vu_dev->vdev.dev, true); 1252 1253 rc = register_virtio_device(&vu_dev->vdev); 1254 if (rc) { 1255 put_device(&vu_dev->vdev.dev); 1256 return rc; 1257 } 1258 vu_dev->registered = 1; 1259 return 0; 1260 1261 error_init: 1262 os_close_file(vu_dev->sock); 1263 error_free: 1264 kfree(vu_dev); 1265 return rc; 1266 } 1267 1268 static void virtio_uml_remove(struct platform_device *pdev) 1269 { 1270 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); 1271 1272 unregister_virtio_device(&vu_dev->vdev); 1273 } 1274 1275 /* Command line device list */ 1276 1277 static void vu_cmdline_release_dev(struct device *d) 1278 { 1279 } 1280 1281 static struct device vu_cmdline_parent = { 1282 .init_name = "virtio-uml-cmdline", 1283 .release = vu_cmdline_release_dev, 1284 }; 1285 1286 static DEFINE_MUTEX(vu_cmdline_lock); 1287 static bool vu_cmdline_parent_registered; 1288 static int vu_cmdline_id; 1289 1290 static int vu_unregister_cmdline_device(struct device *dev, void *data) 1291 { 1292 struct platform_device *pdev = to_platform_device(dev); 1293 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; 1294 1295 kfree(pdata->socket_path); 1296 platform_device_unregister(pdev); 1297 return 0; 1298 } 1299 1300 static void vu_conn_broken(struct work_struct *wk) 1301 { 1302 struct virtio_uml_platform_data *pdata; 1303 struct virtio_uml_device *vu_dev; 1304 1305 pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); 1306 1307 vu_dev = platform_get_drvdata(pdata->pdev); 1308 1309 virtio_break_device(&vu_dev->vdev); 1310 1311 vu_unregister_cmdline_device(&pdata->pdev->dev, NULL); 1312 } 1313 1314 static int vu_cmdline_set_device(const char *device) 1315 { 1316 const char *ids = strchr(device, ':'); 1317 unsigned int virtio_device_id; 1318 int processed, consumed, err; 1319 char *socket_path; 1320 struct virtio_uml_platform_data pdata, *ppdata; 1321 struct platform_device *pdev; 1322 1323 if (!ids || ids == device) 1324 return -EINVAL; 1325 1326 guard(mutex)(&vu_cmdline_lock); 1327 1328 processed = sscanf(ids, ":%u%n:%d%n", 1329 &virtio_device_id, &consumed, 1330 &vu_cmdline_id, &consumed); 1331 1332 if (processed < 1 || ids[consumed]) 1333 return -EINVAL; 1334 1335 if (!vu_cmdline_parent_registered) { 1336 err = device_register(&vu_cmdline_parent); 1337 if (err) { 1338 pr_err("Failed to register parent device!\n"); 1339 put_device(&vu_cmdline_parent); 1340 return err; 1341 } 1342 vu_cmdline_parent_registered = true; 1343 } 1344 1345 socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL); 1346 if (!socket_path) 1347 return -ENOMEM; 1348 1349 pdata.virtio_device_id = (u32) virtio_device_id; 1350 pdata.socket_path = socket_path; 1351 1352 pr_info("Registering device virtio-uml.%d id=%d at %s\n", 1353 vu_cmdline_id, virtio_device_id, socket_path); 1354 1355 pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml", 1356 vu_cmdline_id++, &pdata, 1357 sizeof(pdata)); 1358 err = PTR_ERR_OR_ZERO(pdev); 1359 if (err) 1360 goto free; 1361 1362 ppdata = pdev->dev.platform_data; 1363 ppdata->pdev = pdev; 1364 INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken); 1365 1366 return 0; 1367 1368 free: 1369 kfree(socket_path); 1370 return err; 1371 } 1372 1373 static int vu_cmdline_set(const char *device, const struct kernel_param *kp) 1374 { 1375 return vu_cmdline_set_device(device); 1376 } 1377 1378 static int vu_cmdline_get_device(struct device *dev, void *data) 1379 { 1380 struct platform_device *pdev = to_platform_device(dev); 1381 struct virtio_uml_platform_data *pdata = pdev->dev.platform_data; 1382 char *buffer = data; 1383 unsigned int len = strlen(buffer); 1384 1385 snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n", 1386 pdata->socket_path, pdata->virtio_device_id, pdev->id); 1387 return 0; 1388 } 1389 1390 static int vu_cmdline_get(char *buffer, const struct kernel_param *kp) 1391 { 1392 guard(mutex)(&vu_cmdline_lock); 1393 1394 buffer[0] = '\0'; 1395 if (vu_cmdline_parent_registered) 1396 device_for_each_child(&vu_cmdline_parent, buffer, 1397 vu_cmdline_get_device); 1398 return strlen(buffer) + 1; 1399 } 1400 1401 static const struct kernel_param_ops vu_cmdline_param_ops = { 1402 .set = vu_cmdline_set, 1403 .get = vu_cmdline_get, 1404 }; 1405 1406 device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR); 1407 __uml_help(vu_cmdline_param_ops, 1408 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n" 1409 " Configure a virtio device over a vhost-user socket.\n" 1410 " See virtio_ids.h for a list of possible virtio device id values.\n" 1411 " Optionally use a specific platform_device id.\n\n" 1412 ); 1413 1414 1415 static void vu_unregister_cmdline_devices(void) 1416 { 1417 guard(mutex)(&vu_cmdline_lock); 1418 1419 if (vu_cmdline_parent_registered) { 1420 device_for_each_child(&vu_cmdline_parent, NULL, 1421 vu_unregister_cmdline_device); 1422 device_unregister(&vu_cmdline_parent); 1423 vu_cmdline_parent_registered = false; 1424 } 1425 } 1426 1427 static int vu_mc_config(char *str, char **error_out) 1428 { 1429 if (*str != '=') { 1430 *error_out = "Invalid config"; 1431 return -EINVAL; 1432 } 1433 str += 1; 1434 return vu_cmdline_set_device(str); 1435 } 1436 1437 static int vu_mc_id(char **str, int *start_out, int *end_out) 1438 { 1439 return -EOPNOTSUPP; 1440 } 1441 1442 static int vu_mc_remove(int n, char **error_out) 1443 { 1444 return -EOPNOTSUPP; 1445 } 1446 1447 static struct mc_device virtio_uml_mc = { 1448 .list = LIST_HEAD_INIT(virtio_uml_mc.list), 1449 .name = "virtio_uml.device", 1450 .config = vu_mc_config, 1451 .get_config = NULL, 1452 .id = vu_mc_id, 1453 .remove = vu_mc_remove, 1454 }; 1455 1456 static int __init virtio_uml_mc_init(void) 1457 { 1458 mconsole_register_dev(&virtio_uml_mc); 1459 return 0; 1460 } 1461 late_initcall(virtio_uml_mc_init); 1462 1463 /* Platform driver */ 1464 1465 static const struct of_device_id virtio_uml_match[] = { 1466 { .compatible = "virtio,uml", }, 1467 { } 1468 }; 1469 MODULE_DEVICE_TABLE(of, virtio_uml_match); 1470 1471 static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state) 1472 { 1473 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); 1474 1475 if (!vu_dev->no_vq_suspend) { 1476 struct virtqueue *vq; 1477 1478 virtio_device_for_each_vq((&vu_dev->vdev), vq) { 1479 struct virtio_uml_vq_info *info = vq->priv; 1480 1481 info->suspended = true; 1482 vhost_user_set_vring_enable(vu_dev, vq->index, false); 1483 } 1484 } 1485 1486 if (!device_may_wakeup(&vu_dev->vdev.dev)) { 1487 vu_dev->suspended = true; 1488 return 0; 1489 } 1490 1491 return irq_set_irq_wake(vu_dev->irq, 1); 1492 } 1493 1494 static int virtio_uml_resume(struct platform_device *pdev) 1495 { 1496 struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev); 1497 1498 if (!vu_dev->no_vq_suspend) { 1499 struct virtqueue *vq; 1500 1501 virtio_device_for_each_vq((&vu_dev->vdev), vq) { 1502 struct virtio_uml_vq_info *info = vq->priv; 1503 1504 info->suspended = false; 1505 vhost_user_set_vring_enable(vu_dev, vq->index, true); 1506 } 1507 } 1508 1509 vu_dev->suspended = false; 1510 1511 if (!device_may_wakeup(&vu_dev->vdev.dev)) 1512 return 0; 1513 1514 return irq_set_irq_wake(vu_dev->irq, 0); 1515 } 1516 1517 static struct platform_driver virtio_uml_driver = { 1518 .probe = virtio_uml_probe, 1519 .remove = virtio_uml_remove, 1520 .driver = { 1521 .name = "virtio-uml", 1522 .of_match_table = virtio_uml_match, 1523 }, 1524 .suspend = virtio_uml_suspend, 1525 .resume = virtio_uml_resume, 1526 }; 1527 1528 static int __init virtio_uml_init(void) 1529 { 1530 return platform_driver_register(&virtio_uml_driver); 1531 } 1532 1533 static void __exit virtio_uml_exit(void) 1534 { 1535 platform_driver_unregister(&virtio_uml_driver); 1536 vu_unregister_cmdline_devices(); 1537 } 1538 1539 module_init(virtio_uml_init); 1540 module_exit(virtio_uml_exit); 1541 __uml_exitcall(virtio_uml_exit); 1542 1543 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices"); 1544 MODULE_LICENSE("GPL"); 1545