1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/virtio.h> 3 #include <linux/spinlock.h> 4 #include <linux/virtio_config.h> 5 #include <linux/virtio_anchor.h> 6 #include <linux/module.h> 7 #include <linux/idr.h> 8 #include <linux/of.h> 9 #include <uapi/linux/virtio_ids.h> 10 11 /* Unique numbering for virtio devices. */ 12 static DEFINE_IDA(virtio_index_ida); 13 14 static ssize_t device_show(struct device *_d, 15 struct device_attribute *attr, char *buf) 16 { 17 struct virtio_device *dev = dev_to_virtio(_d); 18 return sysfs_emit(buf, "0x%04x\n", dev->id.device); 19 } 20 static DEVICE_ATTR_RO(device); 21 22 static ssize_t vendor_show(struct device *_d, 23 struct device_attribute *attr, char *buf) 24 { 25 struct virtio_device *dev = dev_to_virtio(_d); 26 return sysfs_emit(buf, "0x%04x\n", dev->id.vendor); 27 } 28 static DEVICE_ATTR_RO(vendor); 29 30 static ssize_t status_show(struct device *_d, 31 struct device_attribute *attr, char *buf) 32 { 33 struct virtio_device *dev = dev_to_virtio(_d); 34 return sysfs_emit(buf, "0x%08x\n", dev->config->get_status(dev)); 35 } 36 static DEVICE_ATTR_RO(status); 37 38 static ssize_t modalias_show(struct device *_d, 39 struct device_attribute *attr, char *buf) 40 { 41 struct virtio_device *dev = dev_to_virtio(_d); 42 return sysfs_emit(buf, "virtio:d%08Xv%08X\n", 43 dev->id.device, dev->id.vendor); 44 } 45 static DEVICE_ATTR_RO(modalias); 46 47 static ssize_t features_show(struct device *_d, 48 struct device_attribute *attr, char *buf) 49 { 50 struct virtio_device *dev = dev_to_virtio(_d); 51 unsigned int i; 52 ssize_t len = 0; 53 54 /* We actually represent this as a bitstring, as it could be 55 * arbitrary length in future. */ 56 for (i = 0; i < sizeof(dev->features)*8; i++) 57 len += sysfs_emit_at(buf, len, "%c", 58 __virtio_test_bit(dev, i) ? '1' : '0'); 59 len += sysfs_emit_at(buf, len, "\n"); 60 return len; 61 } 62 static DEVICE_ATTR_RO(features); 63 64 static struct attribute *virtio_dev_attrs[] = { 65 &dev_attr_device.attr, 66 &dev_attr_vendor.attr, 67 &dev_attr_status.attr, 68 &dev_attr_modalias.attr, 69 &dev_attr_features.attr, 70 NULL, 71 }; 72 ATTRIBUTE_GROUPS(virtio_dev); 73 74 static inline int virtio_id_match(const struct virtio_device *dev, 75 const struct virtio_device_id *id) 76 { 77 if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) 78 return 0; 79 80 return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; 81 } 82 83 /* This looks through all the IDs a driver claims to support. If any of them 84 * match, we return 1 and the kernel will call virtio_dev_probe(). */ 85 static int virtio_dev_match(struct device *_dv, const struct device_driver *_dr) 86 { 87 unsigned int i; 88 struct virtio_device *dev = dev_to_virtio(_dv); 89 const struct virtio_device_id *ids; 90 91 ids = drv_to_virtio(_dr)->id_table; 92 for (i = 0; ids[i].device; i++) 93 if (virtio_id_match(dev, &ids[i])) 94 return 1; 95 return 0; 96 } 97 98 static int virtio_uevent(const struct device *_dv, struct kobj_uevent_env *env) 99 { 100 const struct virtio_device *dev = dev_to_virtio(_dv); 101 102 return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", 103 dev->id.device, dev->id.vendor); 104 } 105 106 void virtio_check_driver_offered_feature(const struct virtio_device *vdev, 107 unsigned int fbit) 108 { 109 unsigned int i; 110 struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); 111 112 for (i = 0; i < drv->feature_table_size; i++) 113 if (drv->feature_table[i] == fbit) 114 return; 115 116 if (drv->feature_table_legacy) { 117 for (i = 0; i < drv->feature_table_size_legacy; i++) 118 if (drv->feature_table_legacy[i] == fbit) 119 return; 120 } 121 122 BUG(); 123 } 124 EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); 125 126 static void __virtio_config_changed(struct virtio_device *dev) 127 { 128 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 129 130 if (!dev->config_core_enabled || dev->config_driver_disabled) 131 dev->config_change_pending = true; 132 else if (drv && drv->config_changed) { 133 drv->config_changed(dev); 134 dev->config_change_pending = false; 135 } 136 } 137 138 void virtio_config_changed(struct virtio_device *dev) 139 { 140 unsigned long flags; 141 142 spin_lock_irqsave(&dev->config_lock, flags); 143 __virtio_config_changed(dev); 144 spin_unlock_irqrestore(&dev->config_lock, flags); 145 } 146 EXPORT_SYMBOL_GPL(virtio_config_changed); 147 148 /** 149 * virtio_config_driver_disable - disable config change reporting by drivers 150 * @dev: the device to reset 151 * 152 * This is only allowed to be called by a driver and disabling can't 153 * be nested. 154 */ 155 void virtio_config_driver_disable(struct virtio_device *dev) 156 { 157 spin_lock_irq(&dev->config_lock); 158 dev->config_driver_disabled = true; 159 spin_unlock_irq(&dev->config_lock); 160 } 161 EXPORT_SYMBOL_GPL(virtio_config_driver_disable); 162 163 /** 164 * virtio_config_driver_enable - enable config change reporting by drivers 165 * @dev: the device to reset 166 * 167 * This is only allowed to be called by a driver and enabling can't 168 * be nested. 169 */ 170 void virtio_config_driver_enable(struct virtio_device *dev) 171 { 172 spin_lock_irq(&dev->config_lock); 173 dev->config_driver_disabled = false; 174 if (dev->config_change_pending) 175 __virtio_config_changed(dev); 176 spin_unlock_irq(&dev->config_lock); 177 } 178 EXPORT_SYMBOL_GPL(virtio_config_driver_enable); 179 180 static void virtio_config_core_disable(struct virtio_device *dev) 181 { 182 spin_lock_irq(&dev->config_lock); 183 dev->config_core_enabled = false; 184 spin_unlock_irq(&dev->config_lock); 185 } 186 187 static void virtio_config_core_enable(struct virtio_device *dev) 188 { 189 spin_lock_irq(&dev->config_lock); 190 dev->config_core_enabled = true; 191 if (dev->config_change_pending) 192 __virtio_config_changed(dev); 193 spin_unlock_irq(&dev->config_lock); 194 } 195 196 void virtio_add_status(struct virtio_device *dev, unsigned int status) 197 { 198 might_sleep(); 199 dev->config->set_status(dev, dev->config->get_status(dev) | status); 200 } 201 EXPORT_SYMBOL_GPL(virtio_add_status); 202 203 /* Do some validation, then set FEATURES_OK */ 204 static int virtio_features_ok(struct virtio_device *dev) 205 { 206 unsigned int status; 207 208 might_sleep(); 209 210 if (virtio_check_mem_acc_cb(dev)) { 211 if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) { 212 dev_warn(&dev->dev, 213 "device must provide VIRTIO_F_VERSION_1\n"); 214 return -ENODEV; 215 } 216 217 if (!virtio_has_feature(dev, VIRTIO_F_ACCESS_PLATFORM)) { 218 dev_warn(&dev->dev, 219 "device must provide VIRTIO_F_ACCESS_PLATFORM\n"); 220 return -ENODEV; 221 } 222 } 223 224 if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) 225 return 0; 226 227 virtio_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 228 status = dev->config->get_status(dev); 229 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { 230 dev_err(&dev->dev, "virtio: device refuses features: %x\n", 231 status); 232 return -ENODEV; 233 } 234 return 0; 235 } 236 237 /** 238 * virtio_reset_device - quiesce device for removal 239 * @dev: the device to reset 240 * 241 * Prevents device from sending interrupts and accessing memory. 242 * 243 * Generally used for cleanup during driver / device removal. 244 * 245 * Once this has been invoked, caller must ensure that 246 * virtqueue_notify / virtqueue_kick are not in progress. 247 * 248 * Note: this guarantees that vq callbacks are not in progress, however caller 249 * is responsible for preventing access from other contexts, such as a system 250 * call/workqueue/bh. Invoking virtio_break_device then flushing any such 251 * contexts is one way to handle that. 252 * */ 253 void virtio_reset_device(struct virtio_device *dev) 254 { 255 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 256 /* 257 * The below virtio_synchronize_cbs() guarantees that any 258 * interrupt for this line arriving after 259 * virtio_synchronize_vqs() has completed is guaranteed to see 260 * vq->broken as true. 261 */ 262 virtio_break_device(dev); 263 virtio_synchronize_cbs(dev); 264 #endif 265 266 dev->config->reset(dev); 267 } 268 EXPORT_SYMBOL_GPL(virtio_reset_device); 269 270 static int virtio_dev_probe(struct device *_d) 271 { 272 int err, i; 273 struct virtio_device *dev = dev_to_virtio(_d); 274 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 275 u64 device_features; 276 u64 driver_features; 277 u64 driver_features_legacy; 278 279 /* We have a driver! */ 280 virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); 281 282 /* Figure out what features the device supports. */ 283 device_features = dev->config->get_features(dev); 284 285 /* Figure out what features the driver supports. */ 286 driver_features = 0; 287 for (i = 0; i < drv->feature_table_size; i++) { 288 unsigned int f = drv->feature_table[i]; 289 BUG_ON(f >= 64); 290 driver_features |= (1ULL << f); 291 } 292 293 /* Some drivers have a separate feature table for virtio v1.0 */ 294 if (drv->feature_table_legacy) { 295 driver_features_legacy = 0; 296 for (i = 0; i < drv->feature_table_size_legacy; i++) { 297 unsigned int f = drv->feature_table_legacy[i]; 298 BUG_ON(f >= 64); 299 driver_features_legacy |= (1ULL << f); 300 } 301 } else { 302 driver_features_legacy = driver_features; 303 } 304 305 if (device_features & (1ULL << VIRTIO_F_VERSION_1)) 306 dev->features = driver_features & device_features; 307 else 308 dev->features = driver_features_legacy & device_features; 309 310 /* When debugging, user may filter some features by hand. */ 311 virtio_debug_device_filter_features(dev); 312 313 /* Transport features always preserved to pass to finalize_features. */ 314 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) 315 if (device_features & (1ULL << i)) 316 __virtio_set_bit(dev, i); 317 318 err = dev->config->finalize_features(dev); 319 if (err) 320 goto err; 321 322 if (drv->validate) { 323 u64 features = dev->features; 324 325 err = drv->validate(dev); 326 if (err) 327 goto err; 328 329 /* Did validation change any features? Then write them again. */ 330 if (features != dev->features) { 331 err = dev->config->finalize_features(dev); 332 if (err) 333 goto err; 334 } 335 } 336 337 err = virtio_features_ok(dev); 338 if (err) 339 goto err; 340 341 err = drv->probe(dev); 342 if (err) 343 goto err; 344 345 /* If probe didn't do it, mark device DRIVER_OK ourselves. */ 346 if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) 347 virtio_device_ready(dev); 348 349 if (drv->scan) 350 drv->scan(dev); 351 352 virtio_config_core_enable(dev); 353 354 return 0; 355 356 err: 357 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 358 return err; 359 360 } 361 362 static void virtio_dev_remove(struct device *_d) 363 { 364 struct virtio_device *dev = dev_to_virtio(_d); 365 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 366 367 virtio_config_core_disable(dev); 368 369 drv->remove(dev); 370 371 /* Driver should have reset device. */ 372 WARN_ON_ONCE(dev->config->get_status(dev)); 373 374 /* Acknowledge the device's existence again. */ 375 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 376 377 of_node_put(dev->dev.of_node); 378 } 379 380 /* 381 * virtio_irq_get_affinity - get IRQ affinity mask for device 382 * @_d: ptr to dev structure 383 * @irq_vec: interrupt vector number 384 * 385 * Return the CPU affinity mask for @_d and @irq_vec. 386 */ 387 static const struct cpumask *virtio_irq_get_affinity(struct device *_d, 388 unsigned int irq_vec) 389 { 390 struct virtio_device *dev = dev_to_virtio(_d); 391 392 if (!dev->config->get_vq_affinity) 393 return NULL; 394 395 return dev->config->get_vq_affinity(dev, irq_vec); 396 } 397 398 static void virtio_dev_shutdown(struct device *_d) 399 { 400 struct virtio_device *dev = dev_to_virtio(_d); 401 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 402 403 /* 404 * Stop accesses to or from the device. 405 * We only need to do it if there's a driver - no accesses otherwise. 406 */ 407 if (!drv) 408 return; 409 410 /* If the driver has its own shutdown method, use that. */ 411 if (drv->shutdown) { 412 drv->shutdown(dev); 413 return; 414 } 415 416 /* 417 * Some devices get wedged if you kick them after they are 418 * reset. Mark all vqs as broken to make sure we don't. 419 */ 420 virtio_break_device(dev); 421 /* 422 * Guarantee that any callback will see vq->broken as true. 423 */ 424 virtio_synchronize_cbs(dev); 425 /* 426 * As IOMMUs are reset on shutdown, this will block device access to memory. 427 * Some devices get wedged if this happens, so reset to make sure it does not. 428 */ 429 dev->config->reset(dev); 430 } 431 432 static const struct bus_type virtio_bus = { 433 .name = "virtio", 434 .match = virtio_dev_match, 435 .dev_groups = virtio_dev_groups, 436 .uevent = virtio_uevent, 437 .probe = virtio_dev_probe, 438 .remove = virtio_dev_remove, 439 .irq_get_affinity = virtio_irq_get_affinity, 440 .shutdown = virtio_dev_shutdown, 441 }; 442 443 int __register_virtio_driver(struct virtio_driver *driver, struct module *owner) 444 { 445 /* Catch this early. */ 446 BUG_ON(driver->feature_table_size && !driver->feature_table); 447 driver->driver.bus = &virtio_bus; 448 driver->driver.owner = owner; 449 450 return driver_register(&driver->driver); 451 } 452 EXPORT_SYMBOL_GPL(__register_virtio_driver); 453 454 void unregister_virtio_driver(struct virtio_driver *driver) 455 { 456 driver_unregister(&driver->driver); 457 } 458 EXPORT_SYMBOL_GPL(unregister_virtio_driver); 459 460 static int virtio_device_of_init(struct virtio_device *dev) 461 { 462 struct device_node *np, *pnode = dev_of_node(dev->dev.parent); 463 char compat[] = "virtio,deviceXXXXXXXX"; 464 int ret, count; 465 466 if (!pnode) 467 return 0; 468 469 count = of_get_available_child_count(pnode); 470 if (!count) 471 return 0; 472 473 /* There can be only 1 child node */ 474 if (WARN_ON(count > 1)) 475 return -EINVAL; 476 477 np = of_get_next_available_child(pnode, NULL); 478 if (WARN_ON(!np)) 479 return -ENODEV; 480 481 ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device); 482 BUG_ON(ret >= sizeof(compat)); 483 484 /* 485 * On powerpc/pseries virtio devices are PCI devices so PCI 486 * vendor/device ids play the role of the "compatible" property. 487 * Simply don't init of_node in this case. 488 */ 489 if (!of_device_is_compatible(np, compat)) { 490 ret = 0; 491 goto out; 492 } 493 494 dev->dev.of_node = np; 495 return 0; 496 497 out: 498 of_node_put(np); 499 return ret; 500 } 501 502 /** 503 * register_virtio_device - register virtio device 504 * @dev : virtio device to be registered 505 * 506 * On error, the caller must call put_device on &@dev->dev (and not kfree), 507 * as another code path may have obtained a reference to @dev. 508 * 509 * Returns: 0 on suceess, -error on failure 510 */ 511 int register_virtio_device(struct virtio_device *dev) 512 { 513 int err; 514 515 dev->dev.bus = &virtio_bus; 516 device_initialize(&dev->dev); 517 518 /* Assign a unique device index and hence name. */ 519 err = ida_alloc(&virtio_index_ida, GFP_KERNEL); 520 if (err < 0) 521 goto out; 522 523 dev->index = err; 524 err = dev_set_name(&dev->dev, "virtio%u", dev->index); 525 if (err) 526 goto out_ida_remove; 527 528 err = virtio_device_of_init(dev); 529 if (err) 530 goto out_ida_remove; 531 532 spin_lock_init(&dev->config_lock); 533 dev->config_core_enabled = false; 534 dev->config_change_pending = false; 535 536 INIT_LIST_HEAD(&dev->vqs); 537 spin_lock_init(&dev->vqs_list_lock); 538 539 /* We always start by resetting the device, in case a previous 540 * driver messed it up. This also tests that code path a little. */ 541 virtio_reset_device(dev); 542 543 /* Acknowledge that we've seen the device. */ 544 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 545 546 virtio_debug_device_init(dev); 547 548 /* 549 * device_add() causes the bus infrastructure to look for a matching 550 * driver. 551 */ 552 err = device_add(&dev->dev); 553 if (err) 554 goto out_of_node_put; 555 556 return 0; 557 558 out_of_node_put: 559 of_node_put(dev->dev.of_node); 560 out_ida_remove: 561 ida_free(&virtio_index_ida, dev->index); 562 out: 563 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 564 return err; 565 } 566 EXPORT_SYMBOL_GPL(register_virtio_device); 567 568 bool is_virtio_device(struct device *dev) 569 { 570 return dev->bus == &virtio_bus; 571 } 572 EXPORT_SYMBOL_GPL(is_virtio_device); 573 574 void unregister_virtio_device(struct virtio_device *dev) 575 { 576 int index = dev->index; /* save for after device release */ 577 578 device_unregister(&dev->dev); 579 virtio_debug_device_exit(dev); 580 ida_free(&virtio_index_ida, index); 581 } 582 EXPORT_SYMBOL_GPL(unregister_virtio_device); 583 584 static int virtio_device_restore_priv(struct virtio_device *dev, bool restore) 585 { 586 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 587 int ret; 588 589 /* We always start by resetting the device, in case a previous 590 * driver messed it up. */ 591 virtio_reset_device(dev); 592 593 /* Acknowledge that we've seen the device. */ 594 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 595 596 /* Maybe driver failed before freeze. 597 * Restore the failed status, for debugging. */ 598 if (dev->failed) 599 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 600 601 if (!drv) 602 return 0; 603 604 /* We have a driver! */ 605 virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); 606 607 ret = dev->config->finalize_features(dev); 608 if (ret) 609 goto err; 610 611 ret = virtio_features_ok(dev); 612 if (ret) 613 goto err; 614 615 if (restore) { 616 if (drv->restore) { 617 ret = drv->restore(dev); 618 if (ret) 619 goto err; 620 } 621 } else { 622 ret = drv->reset_done(dev); 623 if (ret) 624 goto err; 625 } 626 627 /* If restore didn't do it, mark device DRIVER_OK ourselves. */ 628 if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) 629 virtio_device_ready(dev); 630 631 virtio_config_core_enable(dev); 632 633 return 0; 634 635 err: 636 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 637 return ret; 638 } 639 640 #ifdef CONFIG_PM_SLEEP 641 int virtio_device_freeze(struct virtio_device *dev) 642 { 643 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 644 int ret; 645 646 virtio_config_core_disable(dev); 647 648 dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; 649 650 if (drv && drv->freeze) { 651 ret = drv->freeze(dev); 652 if (ret) { 653 virtio_config_core_enable(dev); 654 return ret; 655 } 656 } 657 658 return 0; 659 } 660 EXPORT_SYMBOL_GPL(virtio_device_freeze); 661 662 int virtio_device_restore(struct virtio_device *dev) 663 { 664 return virtio_device_restore_priv(dev, true); 665 } 666 EXPORT_SYMBOL_GPL(virtio_device_restore); 667 #endif 668 669 int virtio_device_reset_prepare(struct virtio_device *dev) 670 { 671 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 672 int ret; 673 674 if (!drv || !drv->reset_prepare) 675 return -EOPNOTSUPP; 676 677 virtio_config_core_disable(dev); 678 679 dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; 680 681 ret = drv->reset_prepare(dev); 682 if (ret) { 683 virtio_config_core_enable(dev); 684 return ret; 685 } 686 687 return 0; 688 } 689 EXPORT_SYMBOL_GPL(virtio_device_reset_prepare); 690 691 int virtio_device_reset_done(struct virtio_device *dev) 692 { 693 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 694 695 if (!drv || !drv->reset_done) 696 return -EOPNOTSUPP; 697 698 return virtio_device_restore_priv(dev, false); 699 } 700 EXPORT_SYMBOL_GPL(virtio_device_reset_done); 701 702 static int virtio_init(void) 703 { 704 if (bus_register(&virtio_bus) != 0) 705 panic("virtio bus registration failed"); 706 virtio_debug_init(); 707 return 0; 708 } 709 710 static void __exit virtio_exit(void) 711 { 712 virtio_debug_exit(); 713 bus_unregister(&virtio_bus); 714 ida_destroy(&virtio_index_ida); 715 } 716 core_initcall(virtio_init); 717 module_exit(virtio_exit); 718 719 MODULE_DESCRIPTION("Virtio core interface"); 720 MODULE_LICENSE("GPL"); 721