1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/virtio.h> 3 #include <linux/spinlock.h> 4 #include <linux/virtio_config.h> 5 #include <linux/virtio_anchor.h> 6 #include <linux/module.h> 7 #include <linux/idr.h> 8 #include <linux/of.h> 9 #include <uapi/linux/virtio_ids.h> 10 11 /* Unique numbering for virtio devices. */ 12 static DEFINE_IDA(virtio_index_ida); 13 14 static ssize_t device_show(struct device *_d, 15 struct device_attribute *attr, char *buf) 16 { 17 struct virtio_device *dev = dev_to_virtio(_d); 18 return sysfs_emit(buf, "0x%04x\n", dev->id.device); 19 } 20 static DEVICE_ATTR_RO(device); 21 22 static ssize_t vendor_show(struct device *_d, 23 struct device_attribute *attr, char *buf) 24 { 25 struct virtio_device *dev = dev_to_virtio(_d); 26 return sysfs_emit(buf, "0x%04x\n", dev->id.vendor); 27 } 28 static DEVICE_ATTR_RO(vendor); 29 30 static ssize_t status_show(struct device *_d, 31 struct device_attribute *attr, char *buf) 32 { 33 struct virtio_device *dev = dev_to_virtio(_d); 34 return sysfs_emit(buf, "0x%08x\n", dev->config->get_status(dev)); 35 } 36 static DEVICE_ATTR_RO(status); 37 38 static ssize_t modalias_show(struct device *_d, 39 struct device_attribute *attr, char *buf) 40 { 41 struct virtio_device *dev = dev_to_virtio(_d); 42 return sysfs_emit(buf, "virtio:d%08Xv%08X\n", 43 dev->id.device, dev->id.vendor); 44 } 45 static DEVICE_ATTR_RO(modalias); 46 47 static ssize_t features_show(struct device *_d, 48 struct device_attribute *attr, char *buf) 49 { 50 struct virtio_device *dev = dev_to_virtio(_d); 51 unsigned int i; 52 ssize_t len = 0; 53 54 /* We actually represent this as a bitstring, as it could be 55 * arbitrary length in future. */ 56 for (i = 0; i < sizeof(dev->features)*8; i++) 57 len += sysfs_emit_at(buf, len, "%c", 58 __virtio_test_bit(dev, i) ? '1' : '0'); 59 len += sysfs_emit_at(buf, len, "\n"); 60 return len; 61 } 62 static DEVICE_ATTR_RO(features); 63 64 static struct attribute *virtio_dev_attrs[] = { 65 &dev_attr_device.attr, 66 &dev_attr_vendor.attr, 67 &dev_attr_status.attr, 68 &dev_attr_modalias.attr, 69 &dev_attr_features.attr, 70 NULL, 71 }; 72 ATTRIBUTE_GROUPS(virtio_dev); 73 74 static inline int virtio_id_match(const struct virtio_device *dev, 75 const struct virtio_device_id *id) 76 { 77 if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) 78 return 0; 79 80 return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; 81 } 82 83 /* This looks through all the IDs a driver claims to support. If any of them 84 * match, we return 1 and the kernel will call virtio_dev_probe(). */ 85 static int virtio_dev_match(struct device *_dv, const struct device_driver *_dr) 86 { 87 unsigned int i; 88 struct virtio_device *dev = dev_to_virtio(_dv); 89 const struct virtio_device_id *ids; 90 91 ids = drv_to_virtio(_dr)->id_table; 92 for (i = 0; ids[i].device; i++) 93 if (virtio_id_match(dev, &ids[i])) 94 return 1; 95 return 0; 96 } 97 98 static int virtio_uevent(const struct device *_dv, struct kobj_uevent_env *env) 99 { 100 const struct virtio_device *dev = dev_to_virtio(_dv); 101 102 return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", 103 dev->id.device, dev->id.vendor); 104 } 105 106 void virtio_check_driver_offered_feature(const struct virtio_device *vdev, 107 unsigned int fbit) 108 { 109 unsigned int i; 110 struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); 111 112 for (i = 0; i < drv->feature_table_size; i++) 113 if (drv->feature_table[i] == fbit) 114 return; 115 116 if (drv->feature_table_legacy) { 117 for (i = 0; i < drv->feature_table_size_legacy; i++) 118 if (drv->feature_table_legacy[i] == fbit) 119 return; 120 } 121 122 BUG(); 123 } 124 EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); 125 126 static void __virtio_config_changed(struct virtio_device *dev) 127 { 128 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 129 130 if (!dev->config_core_enabled || dev->config_driver_disabled) 131 dev->config_change_pending = true; 132 else if (drv && drv->config_changed) { 133 drv->config_changed(dev); 134 dev->config_change_pending = false; 135 } 136 } 137 138 void virtio_config_changed(struct virtio_device *dev) 139 { 140 unsigned long flags; 141 142 spin_lock_irqsave(&dev->config_lock, flags); 143 __virtio_config_changed(dev); 144 spin_unlock_irqrestore(&dev->config_lock, flags); 145 } 146 EXPORT_SYMBOL_GPL(virtio_config_changed); 147 148 /** 149 * virtio_config_driver_disable - disable config change reporting by drivers 150 * @dev: the device to reset 151 * 152 * This is only allowed to be called by a driver and disabling can't 153 * be nested. 154 */ 155 void virtio_config_driver_disable(struct virtio_device *dev) 156 { 157 spin_lock_irq(&dev->config_lock); 158 dev->config_driver_disabled = true; 159 spin_unlock_irq(&dev->config_lock); 160 } 161 EXPORT_SYMBOL_GPL(virtio_config_driver_disable); 162 163 /** 164 * virtio_config_driver_enable - enable config change reporting by drivers 165 * @dev: the device to reset 166 * 167 * This is only allowed to be called by a driver and enabling can't 168 * be nested. 169 */ 170 void virtio_config_driver_enable(struct virtio_device *dev) 171 { 172 spin_lock_irq(&dev->config_lock); 173 dev->config_driver_disabled = false; 174 if (dev->config_change_pending) 175 __virtio_config_changed(dev); 176 spin_unlock_irq(&dev->config_lock); 177 } 178 EXPORT_SYMBOL_GPL(virtio_config_driver_enable); 179 180 static void virtio_config_core_disable(struct virtio_device *dev) 181 { 182 spin_lock_irq(&dev->config_lock); 183 dev->config_core_enabled = false; 184 spin_unlock_irq(&dev->config_lock); 185 } 186 187 static void virtio_config_core_enable(struct virtio_device *dev) 188 { 189 spin_lock_irq(&dev->config_lock); 190 dev->config_core_enabled = true; 191 if (dev->config_change_pending) 192 __virtio_config_changed(dev); 193 spin_unlock_irq(&dev->config_lock); 194 } 195 196 void virtio_add_status(struct virtio_device *dev, unsigned int status) 197 { 198 might_sleep(); 199 dev->config->set_status(dev, dev->config->get_status(dev) | status); 200 } 201 EXPORT_SYMBOL_GPL(virtio_add_status); 202 203 /* Do some validation, then set FEATURES_OK */ 204 static int virtio_features_ok(struct virtio_device *dev) 205 { 206 unsigned int status; 207 208 might_sleep(); 209 210 if (virtio_check_mem_acc_cb(dev)) { 211 if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) { 212 dev_warn(&dev->dev, 213 "device must provide VIRTIO_F_VERSION_1\n"); 214 return -ENODEV; 215 } 216 217 if (!virtio_has_feature(dev, VIRTIO_F_ACCESS_PLATFORM)) { 218 dev_warn(&dev->dev, 219 "device must provide VIRTIO_F_ACCESS_PLATFORM\n"); 220 return -ENODEV; 221 } 222 } 223 224 if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) 225 return 0; 226 227 virtio_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 228 status = dev->config->get_status(dev); 229 if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { 230 dev_err(&dev->dev, "virtio: device refuses features: %x\n", 231 status); 232 return -ENODEV; 233 } 234 return 0; 235 } 236 237 /** 238 * virtio_reset_device - quiesce device for removal 239 * @dev: the device to reset 240 * 241 * Prevents device from sending interrupts and accessing memory. 242 * 243 * Generally used for cleanup during driver / device removal. 244 * 245 * Once this has been invoked, caller must ensure that 246 * virtqueue_notify / virtqueue_kick are not in progress. 247 * 248 * Note: this guarantees that vq callbacks are not in progress, however caller 249 * is responsible for preventing access from other contexts, such as a system 250 * call/workqueue/bh. Invoking virtio_break_device then flushing any such 251 * contexts is one way to handle that. 252 * */ 253 void virtio_reset_device(struct virtio_device *dev) 254 { 255 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 256 /* 257 * The below virtio_synchronize_cbs() guarantees that any 258 * interrupt for this line arriving after 259 * virtio_synchronize_vqs() has completed is guaranteed to see 260 * vq->broken as true. 261 */ 262 virtio_break_device(dev); 263 virtio_synchronize_cbs(dev); 264 #endif 265 266 dev->config->reset(dev); 267 } 268 EXPORT_SYMBOL_GPL(virtio_reset_device); 269 270 static int virtio_dev_probe(struct device *_d) 271 { 272 int err, i; 273 struct virtio_device *dev = dev_to_virtio(_d); 274 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 275 u64 device_features; 276 u64 driver_features; 277 u64 driver_features_legacy; 278 279 /* We have a driver! */ 280 virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); 281 282 /* Figure out what features the device supports. */ 283 device_features = dev->config->get_features(dev); 284 285 /* Figure out what features the driver supports. */ 286 driver_features = 0; 287 for (i = 0; i < drv->feature_table_size; i++) { 288 unsigned int f = drv->feature_table[i]; 289 BUG_ON(f >= 64); 290 driver_features |= (1ULL << f); 291 } 292 293 /* Some drivers have a separate feature table for virtio v1.0 */ 294 if (drv->feature_table_legacy) { 295 driver_features_legacy = 0; 296 for (i = 0; i < drv->feature_table_size_legacy; i++) { 297 unsigned int f = drv->feature_table_legacy[i]; 298 BUG_ON(f >= 64); 299 driver_features_legacy |= (1ULL << f); 300 } 301 } else { 302 driver_features_legacy = driver_features; 303 } 304 305 if (device_features & (1ULL << VIRTIO_F_VERSION_1)) 306 dev->features = driver_features & device_features; 307 else 308 dev->features = driver_features_legacy & device_features; 309 310 /* When debugging, user may filter some features by hand. */ 311 virtio_debug_device_filter_features(dev); 312 313 /* Transport features always preserved to pass to finalize_features. */ 314 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) 315 if (device_features & (1ULL << i)) 316 __virtio_set_bit(dev, i); 317 318 err = dev->config->finalize_features(dev); 319 if (err) 320 goto err; 321 322 if (drv->validate) { 323 u64 features = dev->features; 324 325 err = drv->validate(dev); 326 if (err) 327 goto err; 328 329 /* Did validation change any features? Then write them again. */ 330 if (features != dev->features) { 331 err = dev->config->finalize_features(dev); 332 if (err) 333 goto err; 334 } 335 } 336 337 err = virtio_features_ok(dev); 338 if (err) 339 goto err; 340 341 err = drv->probe(dev); 342 if (err) 343 goto err; 344 345 /* If probe didn't do it, mark device DRIVER_OK ourselves. */ 346 if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) 347 virtio_device_ready(dev); 348 349 if (drv->scan) 350 drv->scan(dev); 351 352 virtio_config_core_enable(dev); 353 354 return 0; 355 356 err: 357 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 358 return err; 359 360 } 361 362 static void virtio_dev_remove(struct device *_d) 363 { 364 struct virtio_device *dev = dev_to_virtio(_d); 365 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 366 367 virtio_config_core_disable(dev); 368 369 drv->remove(dev); 370 371 /* Driver should have reset device. */ 372 WARN_ON_ONCE(dev->config->get_status(dev)); 373 374 /* Acknowledge the device's existence again. */ 375 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 376 377 of_node_put(dev->dev.of_node); 378 } 379 380 /* 381 * virtio_irq_get_affinity - get IRQ affinity mask for device 382 * @_d: ptr to dev structure 383 * @irq_vec: interrupt vector number 384 * 385 * Return the CPU affinity mask for @_d and @irq_vec. 386 */ 387 static const struct cpumask *virtio_irq_get_affinity(struct device *_d, 388 unsigned int irq_vec) 389 { 390 struct virtio_device *dev = dev_to_virtio(_d); 391 392 if (!dev->config->get_vq_affinity) 393 return NULL; 394 395 return dev->config->get_vq_affinity(dev, irq_vec); 396 } 397 398 static void virtio_dev_shutdown(struct device *_d) 399 { 400 struct virtio_device *dev = dev_to_virtio(_d); 401 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 402 403 /* 404 * Stop accesses to or from the device. 405 * We only need to do it if there's a driver - no accesses otherwise. 406 */ 407 if (!drv) 408 return; 409 410 /* 411 * Some devices get wedged if you kick them after they are 412 * reset. Mark all vqs as broken to make sure we don't. 413 */ 414 virtio_break_device(dev); 415 /* 416 * Guarantee that any callback will see vq->broken as true. 417 */ 418 virtio_synchronize_cbs(dev); 419 /* 420 * As IOMMUs are reset on shutdown, this will block device access to memory. 421 * Some devices get wedged if this happens, so reset to make sure it does not. 422 */ 423 dev->config->reset(dev); 424 } 425 426 static const struct bus_type virtio_bus = { 427 .name = "virtio", 428 .match = virtio_dev_match, 429 .dev_groups = virtio_dev_groups, 430 .uevent = virtio_uevent, 431 .probe = virtio_dev_probe, 432 .remove = virtio_dev_remove, 433 .irq_get_affinity = virtio_irq_get_affinity, 434 .shutdown = virtio_dev_shutdown, 435 }; 436 437 int __register_virtio_driver(struct virtio_driver *driver, struct module *owner) 438 { 439 /* Catch this early. */ 440 BUG_ON(driver->feature_table_size && !driver->feature_table); 441 driver->driver.bus = &virtio_bus; 442 driver->driver.owner = owner; 443 444 return driver_register(&driver->driver); 445 } 446 EXPORT_SYMBOL_GPL(__register_virtio_driver); 447 448 void unregister_virtio_driver(struct virtio_driver *driver) 449 { 450 driver_unregister(&driver->driver); 451 } 452 EXPORT_SYMBOL_GPL(unregister_virtio_driver); 453 454 static int virtio_device_of_init(struct virtio_device *dev) 455 { 456 struct device_node *np, *pnode = dev_of_node(dev->dev.parent); 457 char compat[] = "virtio,deviceXXXXXXXX"; 458 int ret, count; 459 460 if (!pnode) 461 return 0; 462 463 count = of_get_available_child_count(pnode); 464 if (!count) 465 return 0; 466 467 /* There can be only 1 child node */ 468 if (WARN_ON(count > 1)) 469 return -EINVAL; 470 471 np = of_get_next_available_child(pnode, NULL); 472 if (WARN_ON(!np)) 473 return -ENODEV; 474 475 ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device); 476 BUG_ON(ret >= sizeof(compat)); 477 478 /* 479 * On powerpc/pseries virtio devices are PCI devices so PCI 480 * vendor/device ids play the role of the "compatible" property. 481 * Simply don't init of_node in this case. 482 */ 483 if (!of_device_is_compatible(np, compat)) { 484 ret = 0; 485 goto out; 486 } 487 488 dev->dev.of_node = np; 489 return 0; 490 491 out: 492 of_node_put(np); 493 return ret; 494 } 495 496 /** 497 * register_virtio_device - register virtio device 498 * @dev : virtio device to be registered 499 * 500 * On error, the caller must call put_device on &@dev->dev (and not kfree), 501 * as another code path may have obtained a reference to @dev. 502 * 503 * Returns: 0 on suceess, -error on failure 504 */ 505 int register_virtio_device(struct virtio_device *dev) 506 { 507 int err; 508 509 dev->dev.bus = &virtio_bus; 510 device_initialize(&dev->dev); 511 512 /* Assign a unique device index and hence name. */ 513 err = ida_alloc(&virtio_index_ida, GFP_KERNEL); 514 if (err < 0) 515 goto out; 516 517 dev->index = err; 518 err = dev_set_name(&dev->dev, "virtio%u", dev->index); 519 if (err) 520 goto out_ida_remove; 521 522 err = virtio_device_of_init(dev); 523 if (err) 524 goto out_ida_remove; 525 526 spin_lock_init(&dev->config_lock); 527 dev->config_core_enabled = false; 528 dev->config_change_pending = false; 529 530 INIT_LIST_HEAD(&dev->vqs); 531 spin_lock_init(&dev->vqs_list_lock); 532 533 /* We always start by resetting the device, in case a previous 534 * driver messed it up. This also tests that code path a little. */ 535 virtio_reset_device(dev); 536 537 /* Acknowledge that we've seen the device. */ 538 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 539 540 virtio_debug_device_init(dev); 541 542 /* 543 * device_add() causes the bus infrastructure to look for a matching 544 * driver. 545 */ 546 err = device_add(&dev->dev); 547 if (err) 548 goto out_of_node_put; 549 550 return 0; 551 552 out_of_node_put: 553 of_node_put(dev->dev.of_node); 554 out_ida_remove: 555 ida_free(&virtio_index_ida, dev->index); 556 out: 557 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 558 return err; 559 } 560 EXPORT_SYMBOL_GPL(register_virtio_device); 561 562 bool is_virtio_device(struct device *dev) 563 { 564 return dev->bus == &virtio_bus; 565 } 566 EXPORT_SYMBOL_GPL(is_virtio_device); 567 568 void unregister_virtio_device(struct virtio_device *dev) 569 { 570 int index = dev->index; /* save for after device release */ 571 572 device_unregister(&dev->dev); 573 virtio_debug_device_exit(dev); 574 ida_free(&virtio_index_ida, index); 575 } 576 EXPORT_SYMBOL_GPL(unregister_virtio_device); 577 578 static int virtio_device_restore_priv(struct virtio_device *dev, bool restore) 579 { 580 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 581 int ret; 582 583 /* We always start by resetting the device, in case a previous 584 * driver messed it up. */ 585 virtio_reset_device(dev); 586 587 /* Acknowledge that we've seen the device. */ 588 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 589 590 /* Maybe driver failed before freeze. 591 * Restore the failed status, for debugging. */ 592 if (dev->failed) 593 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 594 595 if (!drv) 596 return 0; 597 598 /* We have a driver! */ 599 virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); 600 601 ret = dev->config->finalize_features(dev); 602 if (ret) 603 goto err; 604 605 ret = virtio_features_ok(dev); 606 if (ret) 607 goto err; 608 609 if (restore) { 610 if (drv->restore) { 611 ret = drv->restore(dev); 612 if (ret) 613 goto err; 614 } 615 } else { 616 ret = drv->reset_done(dev); 617 if (ret) 618 goto err; 619 } 620 621 /* If restore didn't do it, mark device DRIVER_OK ourselves. */ 622 if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) 623 virtio_device_ready(dev); 624 625 virtio_config_core_enable(dev); 626 627 return 0; 628 629 err: 630 virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); 631 return ret; 632 } 633 634 #ifdef CONFIG_PM_SLEEP 635 int virtio_device_freeze(struct virtio_device *dev) 636 { 637 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 638 int ret; 639 640 virtio_config_core_disable(dev); 641 642 dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; 643 644 if (drv && drv->freeze) { 645 ret = drv->freeze(dev); 646 if (ret) { 647 virtio_config_core_enable(dev); 648 return ret; 649 } 650 } 651 652 return 0; 653 } 654 EXPORT_SYMBOL_GPL(virtio_device_freeze); 655 656 int virtio_device_restore(struct virtio_device *dev) 657 { 658 return virtio_device_restore_priv(dev, true); 659 } 660 EXPORT_SYMBOL_GPL(virtio_device_restore); 661 #endif 662 663 int virtio_device_reset_prepare(struct virtio_device *dev) 664 { 665 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 666 int ret; 667 668 if (!drv || !drv->reset_prepare) 669 return -EOPNOTSUPP; 670 671 virtio_config_core_disable(dev); 672 673 dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; 674 675 ret = drv->reset_prepare(dev); 676 if (ret) { 677 virtio_config_core_enable(dev); 678 return ret; 679 } 680 681 return 0; 682 } 683 EXPORT_SYMBOL_GPL(virtio_device_reset_prepare); 684 685 int virtio_device_reset_done(struct virtio_device *dev) 686 { 687 struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); 688 689 if (!drv || !drv->reset_done) 690 return -EOPNOTSUPP; 691 692 return virtio_device_restore_priv(dev, false); 693 } 694 EXPORT_SYMBOL_GPL(virtio_device_reset_done); 695 696 static int virtio_init(void) 697 { 698 if (bus_register(&virtio_bus) != 0) 699 panic("virtio bus registration failed"); 700 virtio_debug_init(); 701 return 0; 702 } 703 704 static void __exit virtio_exit(void) 705 { 706 virtio_debug_exit(); 707 bus_unregister(&virtio_bus); 708 ida_destroy(&virtio_index_ida); 709 } 710 core_initcall(virtio_init); 711 module_exit(virtio_exit); 712 713 MODULE_DESCRIPTION("Virtio core interface"); 714 MODULE_LICENSE("GPL"); 715