1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/core.c - core driver model code (device registration, etc) 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> 8 * Copyright (c) 2006 Novell, Inc. 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/device.h> 13 #include <linux/err.h> 14 #include <linux/fwnode.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/string.h> 19 #include <linux/kdev_t.h> 20 #include <linux/notifier.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/genhd.h> 24 #include <linux/mutex.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/netdevice.h> 27 #include <linux/sched/signal.h> 28 #include <linux/sysfs.h> 29 30 #include "base.h" 31 #include "power/power.h" 32 33 #ifdef CONFIG_SYSFS_DEPRECATED 34 #ifdef CONFIG_SYSFS_DEPRECATED_V2 35 long sysfs_deprecated = 1; 36 #else 37 long sysfs_deprecated = 0; 38 #endif 39 static int __init sysfs_deprecated_setup(char *arg) 40 { 41 return kstrtol(arg, 10, &sysfs_deprecated); 42 } 43 early_param("sysfs.deprecated", sysfs_deprecated_setup); 44 #endif 45 46 /* Device links support. */ 47 48 #ifdef CONFIG_SRCU 49 static DEFINE_MUTEX(device_links_lock); 50 DEFINE_STATIC_SRCU(device_links_srcu); 51 52 static inline void device_links_write_lock(void) 53 { 54 mutex_lock(&device_links_lock); 55 } 56 57 static inline void device_links_write_unlock(void) 58 { 59 mutex_unlock(&device_links_lock); 60 } 61 62 int device_links_read_lock(void) 63 { 64 return srcu_read_lock(&device_links_srcu); 65 } 66 67 void device_links_read_unlock(int idx) 68 { 69 srcu_read_unlock(&device_links_srcu, idx); 70 } 71 #else /* !CONFIG_SRCU */ 72 static DECLARE_RWSEM(device_links_lock); 73 74 static inline void device_links_write_lock(void) 75 { 76 down_write(&device_links_lock); 77 } 78 79 static inline void device_links_write_unlock(void) 80 { 81 up_write(&device_links_lock); 82 } 83 84 int device_links_read_lock(void) 85 { 86 down_read(&device_links_lock); 87 return 0; 88 } 89 90 void device_links_read_unlock(int not_used) 91 { 92 up_read(&device_links_lock); 93 } 94 #endif /* !CONFIG_SRCU */ 95 96 /** 97 * device_is_dependent - Check if one device depends on another one 98 * @dev: Device to check dependencies for. 99 * @target: Device to check against. 100 * 101 * Check if @target depends on @dev or any device dependent on it (its child or 102 * its consumer etc). Return 1 if that is the case or 0 otherwise. 103 */ 104 static int device_is_dependent(struct device *dev, void *target) 105 { 106 struct device_link *link; 107 int ret; 108 109 if (dev == target) 110 return 1; 111 112 ret = device_for_each_child(dev, target, device_is_dependent); 113 if (ret) 114 return ret; 115 116 list_for_each_entry(link, &dev->links.consumers, s_node) { 117 if (link->consumer == target) 118 return 1; 119 120 ret = device_is_dependent(link->consumer, target); 121 if (ret) 122 break; 123 } 124 return ret; 125 } 126 127 static int device_reorder_to_tail(struct device *dev, void *not_used) 128 { 129 struct device_link *link; 130 131 /* 132 * Devices that have not been registered yet will be put to the ends 133 * of the lists during the registration, so skip them here. 134 */ 135 if (device_is_registered(dev)) 136 devices_kset_move_last(dev); 137 138 if (device_pm_initialized(dev)) 139 device_pm_move_last(dev); 140 141 device_for_each_child(dev, NULL, device_reorder_to_tail); 142 list_for_each_entry(link, &dev->links.consumers, s_node) 143 device_reorder_to_tail(link->consumer, NULL); 144 145 return 0; 146 } 147 148 /** 149 * device_pm_move_to_tail - Move set of devices to the end of device lists 150 * @dev: Device to move 151 * 152 * This is a device_reorder_to_tail() wrapper taking the requisite locks. 153 * 154 * It moves the @dev along with all of its children and all of its consumers 155 * to the ends of the device_kset and dpm_list, recursively. 156 */ 157 void device_pm_move_to_tail(struct device *dev) 158 { 159 int idx; 160 161 idx = device_links_read_lock(); 162 device_pm_lock(); 163 device_reorder_to_tail(dev, NULL); 164 device_pm_unlock(); 165 device_links_read_unlock(idx); 166 } 167 168 /** 169 * device_link_add - Create a link between two devices. 170 * @consumer: Consumer end of the link. 171 * @supplier: Supplier end of the link. 172 * @flags: Link flags. 173 * 174 * The caller is responsible for the proper synchronization of the link creation 175 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 176 * runtime PM framework to take the link into account. Second, if the 177 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 178 * be forced into the active metastate and reference-counted upon the creation 179 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 180 * ignored. 181 * 182 * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by 183 * the driver core and, in particular, the caller of this function is expected 184 * to drop the reference to the link acquired by it directly. 185 * 186 * If that flag is not set, however, the caller of this function is handing the 187 * management of the link over to the driver core entirely and its return value 188 * can only be used to check whether or not the link is present. In that case, 189 * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link 190 * flags can be used to indicate to the driver core when the link can be safely 191 * deleted. Namely, setting one of them in @flags indicates to the driver core 192 * that the link is not going to be used (by the given caller of this function) 193 * after unbinding the consumer or supplier driver, respectively, from its 194 * device, so the link can be deleted at that point. If none of them is set, 195 * the link will be maintained until one of the devices pointed to by it (either 196 * the consumer or the supplier) is unregistered. 197 * 198 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and 199 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent 200 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can 201 * be used to request the driver core to automaticall probe for a consmer 202 * driver after successfully binding a driver to the supplier device. 203 * 204 * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER 205 * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and 206 * will cause NULL to be returned upfront. 207 * 208 * A side effect of the link creation is re-ordering of dpm_list and the 209 * devices_kset list by moving the consumer device and all devices depending 210 * on it to the ends of these lists (that does not happen to devices that have 211 * not been registered when this function is called). 212 * 213 * The supplier device is required to be registered when this function is called 214 * and NULL will be returned if that is not the case. The consumer device need 215 * not be registered, however. 216 */ 217 struct device_link *device_link_add(struct device *consumer, 218 struct device *supplier, u32 flags) 219 { 220 struct device_link *link; 221 222 if (!consumer || !supplier || 223 (flags & DL_FLAG_STATELESS && 224 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 225 DL_FLAG_AUTOREMOVE_SUPPLIER | 226 DL_FLAG_AUTOPROBE_CONSUMER)) || 227 (flags & DL_FLAG_AUTOPROBE_CONSUMER && 228 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 229 DL_FLAG_AUTOREMOVE_SUPPLIER))) 230 return NULL; 231 232 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { 233 if (pm_runtime_get_sync(supplier) < 0) { 234 pm_runtime_put_noidle(supplier); 235 return NULL; 236 } 237 } 238 239 device_links_write_lock(); 240 device_pm_lock(); 241 242 /* 243 * If the supplier has not been fully registered yet or there is a 244 * reverse dependency between the consumer and the supplier already in 245 * the graph, return NULL. 246 */ 247 if (!device_pm_initialized(supplier) 248 || device_is_dependent(consumer, supplier)) { 249 link = NULL; 250 goto out; 251 } 252 253 /* 254 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed 255 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both 256 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. 257 */ 258 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 259 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 260 261 list_for_each_entry(link, &supplier->links.consumers, s_node) { 262 if (link->consumer != consumer) 263 continue; 264 265 /* 266 * Don't return a stateless link if the caller wants a stateful 267 * one and vice versa. 268 */ 269 if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) { 270 link = NULL; 271 goto out; 272 } 273 274 if (flags & DL_FLAG_PM_RUNTIME) { 275 if (!(link->flags & DL_FLAG_PM_RUNTIME)) { 276 pm_runtime_new_link(consumer); 277 link->flags |= DL_FLAG_PM_RUNTIME; 278 } 279 if (flags & DL_FLAG_RPM_ACTIVE) 280 refcount_inc(&link->rpm_active); 281 } 282 283 if (flags & DL_FLAG_STATELESS) { 284 kref_get(&link->kref); 285 goto out; 286 } 287 288 /* 289 * If the life time of the link following from the new flags is 290 * longer than indicated by the flags of the existing link, 291 * update the existing link to stay around longer. 292 */ 293 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { 294 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 295 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 296 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; 297 } 298 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { 299 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | 300 DL_FLAG_AUTOREMOVE_SUPPLIER); 301 } 302 goto out; 303 } 304 305 link = kzalloc(sizeof(*link), GFP_KERNEL); 306 if (!link) 307 goto out; 308 309 refcount_set(&link->rpm_active, 1); 310 311 if (flags & DL_FLAG_PM_RUNTIME) { 312 if (flags & DL_FLAG_RPM_ACTIVE) 313 refcount_inc(&link->rpm_active); 314 315 pm_runtime_new_link(consumer); 316 } 317 318 get_device(supplier); 319 link->supplier = supplier; 320 INIT_LIST_HEAD(&link->s_node); 321 get_device(consumer); 322 link->consumer = consumer; 323 INIT_LIST_HEAD(&link->c_node); 324 link->flags = flags; 325 kref_init(&link->kref); 326 327 /* Determine the initial link state. */ 328 if (flags & DL_FLAG_STATELESS) { 329 link->status = DL_STATE_NONE; 330 } else { 331 switch (supplier->links.status) { 332 case DL_DEV_PROBING: 333 switch (consumer->links.status) { 334 case DL_DEV_PROBING: 335 /* 336 * A consumer driver can create a link to a 337 * supplier that has not completed its probing 338 * yet as long as it knows that the supplier is 339 * already functional (for example, it has just 340 * acquired some resources from the supplier). 341 */ 342 link->status = DL_STATE_CONSUMER_PROBE; 343 break; 344 default: 345 link->status = DL_STATE_DORMANT; 346 break; 347 } 348 break; 349 case DL_DEV_DRIVER_BOUND: 350 switch (consumer->links.status) { 351 case DL_DEV_PROBING: 352 link->status = DL_STATE_CONSUMER_PROBE; 353 break; 354 case DL_DEV_DRIVER_BOUND: 355 link->status = DL_STATE_ACTIVE; 356 break; 357 default: 358 link->status = DL_STATE_AVAILABLE; 359 break; 360 } 361 break; 362 case DL_DEV_UNBINDING: 363 link->status = DL_STATE_SUPPLIER_UNBIND; 364 break; 365 default: 366 link->status = DL_STATE_DORMANT; 367 break; 368 } 369 } 370 371 /* 372 * Some callers expect the link creation during consumer driver probe to 373 * resume the supplier even without DL_FLAG_RPM_ACTIVE. 374 */ 375 if (link->status == DL_STATE_CONSUMER_PROBE && 376 flags & DL_FLAG_PM_RUNTIME) 377 pm_runtime_resume(supplier); 378 379 /* 380 * Move the consumer and all of the devices depending on it to the end 381 * of dpm_list and the devices_kset list. 382 * 383 * It is necessary to hold dpm_list locked throughout all that or else 384 * we may end up suspending with a wrong ordering of it. 385 */ 386 device_reorder_to_tail(consumer, NULL); 387 388 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 389 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 390 391 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 392 393 out: 394 device_pm_unlock(); 395 device_links_write_unlock(); 396 397 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) 398 pm_runtime_put(supplier); 399 400 return link; 401 } 402 EXPORT_SYMBOL_GPL(device_link_add); 403 404 static void device_link_free(struct device_link *link) 405 { 406 while (refcount_dec_not_one(&link->rpm_active)) 407 pm_runtime_put(link->supplier); 408 409 put_device(link->consumer); 410 put_device(link->supplier); 411 kfree(link); 412 } 413 414 #ifdef CONFIG_SRCU 415 static void __device_link_free_srcu(struct rcu_head *rhead) 416 { 417 device_link_free(container_of(rhead, struct device_link, rcu_head)); 418 } 419 420 static void __device_link_del(struct kref *kref) 421 { 422 struct device_link *link = container_of(kref, struct device_link, kref); 423 424 dev_dbg(link->consumer, "Dropping the link to %s\n", 425 dev_name(link->supplier)); 426 427 if (link->flags & DL_FLAG_PM_RUNTIME) 428 pm_runtime_drop_link(link->consumer); 429 430 list_del_rcu(&link->s_node); 431 list_del_rcu(&link->c_node); 432 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); 433 } 434 #else /* !CONFIG_SRCU */ 435 static void __device_link_del(struct kref *kref) 436 { 437 struct device_link *link = container_of(kref, struct device_link, kref); 438 439 dev_info(link->consumer, "Dropping the link to %s\n", 440 dev_name(link->supplier)); 441 442 if (link->flags & DL_FLAG_PM_RUNTIME) 443 pm_runtime_drop_link(link->consumer); 444 445 list_del(&link->s_node); 446 list_del(&link->c_node); 447 device_link_free(link); 448 } 449 #endif /* !CONFIG_SRCU */ 450 451 static void device_link_put_kref(struct device_link *link) 452 { 453 if (link->flags & DL_FLAG_STATELESS) 454 kref_put(&link->kref, __device_link_del); 455 else 456 WARN(1, "Unable to drop a managed device link reference\n"); 457 } 458 459 /** 460 * device_link_del - Delete a stateless link between two devices. 461 * @link: Device link to delete. 462 * 463 * The caller must ensure proper synchronization of this function with runtime 464 * PM. If the link was added multiple times, it needs to be deleted as often. 465 * Care is required for hotplugged devices: Their links are purged on removal 466 * and calling device_link_del() is then no longer allowed. 467 */ 468 void device_link_del(struct device_link *link) 469 { 470 device_links_write_lock(); 471 device_pm_lock(); 472 device_link_put_kref(link); 473 device_pm_unlock(); 474 device_links_write_unlock(); 475 } 476 EXPORT_SYMBOL_GPL(device_link_del); 477 478 /** 479 * device_link_remove - Delete a stateless link between two devices. 480 * @consumer: Consumer end of the link. 481 * @supplier: Supplier end of the link. 482 * 483 * The caller must ensure proper synchronization of this function with runtime 484 * PM. 485 */ 486 void device_link_remove(void *consumer, struct device *supplier) 487 { 488 struct device_link *link; 489 490 if (WARN_ON(consumer == supplier)) 491 return; 492 493 device_links_write_lock(); 494 device_pm_lock(); 495 496 list_for_each_entry(link, &supplier->links.consumers, s_node) { 497 if (link->consumer == consumer) { 498 device_link_put_kref(link); 499 break; 500 } 501 } 502 503 device_pm_unlock(); 504 device_links_write_unlock(); 505 } 506 EXPORT_SYMBOL_GPL(device_link_remove); 507 508 static void device_links_missing_supplier(struct device *dev) 509 { 510 struct device_link *link; 511 512 list_for_each_entry(link, &dev->links.suppliers, c_node) 513 if (link->status == DL_STATE_CONSUMER_PROBE) 514 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 515 } 516 517 /** 518 * device_links_check_suppliers - Check presence of supplier drivers. 519 * @dev: Consumer device. 520 * 521 * Check links from this device to any suppliers. Walk the list of the device's 522 * links to suppliers and see if all of them are available. If not, simply 523 * return -EPROBE_DEFER. 524 * 525 * We need to guarantee that the supplier will not go away after the check has 526 * been positive here. It only can go away in __device_release_driver() and 527 * that function checks the device's links to consumers. This means we need to 528 * mark the link as "consumer probe in progress" to make the supplier removal 529 * wait for us to complete (or bad things may happen). 530 * 531 * Links with the DL_FLAG_STATELESS flag set are ignored. 532 */ 533 int device_links_check_suppliers(struct device *dev) 534 { 535 struct device_link *link; 536 int ret = 0; 537 538 device_links_write_lock(); 539 540 list_for_each_entry(link, &dev->links.suppliers, c_node) { 541 if (link->flags & DL_FLAG_STATELESS) 542 continue; 543 544 if (link->status != DL_STATE_AVAILABLE) { 545 device_links_missing_supplier(dev); 546 ret = -EPROBE_DEFER; 547 break; 548 } 549 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 550 } 551 dev->links.status = DL_DEV_PROBING; 552 553 device_links_write_unlock(); 554 return ret; 555 } 556 557 /** 558 * device_links_driver_bound - Update device links after probing its driver. 559 * @dev: Device to update the links for. 560 * 561 * The probe has been successful, so update links from this device to any 562 * consumers by changing their status to "available". 563 * 564 * Also change the status of @dev's links to suppliers to "active". 565 * 566 * Links with the DL_FLAG_STATELESS flag set are ignored. 567 */ 568 void device_links_driver_bound(struct device *dev) 569 { 570 struct device_link *link; 571 572 device_links_write_lock(); 573 574 list_for_each_entry(link, &dev->links.consumers, s_node) { 575 if (link->flags & DL_FLAG_STATELESS) 576 continue; 577 578 /* 579 * Links created during consumer probe may be in the "consumer 580 * probe" state to start with if the supplier is still probing 581 * when they are created and they may become "active" if the 582 * consumer probe returns first. Skip them here. 583 */ 584 if (link->status == DL_STATE_CONSUMER_PROBE || 585 link->status == DL_STATE_ACTIVE) 586 continue; 587 588 WARN_ON(link->status != DL_STATE_DORMANT); 589 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 590 591 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) 592 driver_deferred_probe_add(link->consumer); 593 } 594 595 list_for_each_entry(link, &dev->links.suppliers, c_node) { 596 if (link->flags & DL_FLAG_STATELESS) 597 continue; 598 599 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); 600 WRITE_ONCE(link->status, DL_STATE_ACTIVE); 601 } 602 603 dev->links.status = DL_DEV_DRIVER_BOUND; 604 605 device_links_write_unlock(); 606 } 607 608 /** 609 * __device_links_no_driver - Update links of a device without a driver. 610 * @dev: Device without a drvier. 611 * 612 * Delete all non-persistent links from this device to any suppliers. 613 * 614 * Persistent links stay around, but their status is changed to "available", 615 * unless they already are in the "supplier unbind in progress" state in which 616 * case they need not be updated. 617 * 618 * Links with the DL_FLAG_STATELESS flag set are ignored. 619 */ 620 static void __device_links_no_driver(struct device *dev) 621 { 622 struct device_link *link, *ln; 623 624 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 625 if (link->flags & DL_FLAG_STATELESS) 626 continue; 627 628 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) 629 __device_link_del(&link->kref); 630 else if (link->status == DL_STATE_CONSUMER_PROBE || 631 link->status == DL_STATE_ACTIVE) 632 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 633 } 634 635 dev->links.status = DL_DEV_NO_DRIVER; 636 } 637 638 /** 639 * device_links_no_driver - Update links after failing driver probe. 640 * @dev: Device whose driver has just failed to probe. 641 * 642 * Clean up leftover links to consumers for @dev and invoke 643 * %__device_links_no_driver() to update links to suppliers for it as 644 * appropriate. 645 * 646 * Links with the DL_FLAG_STATELESS flag set are ignored. 647 */ 648 void device_links_no_driver(struct device *dev) 649 { 650 struct device_link *link; 651 652 device_links_write_lock(); 653 654 list_for_each_entry(link, &dev->links.consumers, s_node) { 655 if (link->flags & DL_FLAG_STATELESS) 656 continue; 657 658 /* 659 * The probe has failed, so if the status of the link is 660 * "consumer probe" or "active", it must have been added by 661 * a probing consumer while this device was still probing. 662 * Change its state to "dormant", as it represents a valid 663 * relationship, but it is not functionally meaningful. 664 */ 665 if (link->status == DL_STATE_CONSUMER_PROBE || 666 link->status == DL_STATE_ACTIVE) 667 WRITE_ONCE(link->status, DL_STATE_DORMANT); 668 } 669 670 __device_links_no_driver(dev); 671 672 device_links_write_unlock(); 673 } 674 675 /** 676 * device_links_driver_cleanup - Update links after driver removal. 677 * @dev: Device whose driver has just gone away. 678 * 679 * Update links to consumers for @dev by changing their status to "dormant" and 680 * invoke %__device_links_no_driver() to update links to suppliers for it as 681 * appropriate. 682 * 683 * Links with the DL_FLAG_STATELESS flag set are ignored. 684 */ 685 void device_links_driver_cleanup(struct device *dev) 686 { 687 struct device_link *link, *ln; 688 689 device_links_write_lock(); 690 691 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { 692 if (link->flags & DL_FLAG_STATELESS) 693 continue; 694 695 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); 696 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); 697 698 /* 699 * autoremove the links between this @dev and its consumer 700 * devices that are not active, i.e. where the link state 701 * has moved to DL_STATE_SUPPLIER_UNBIND. 702 */ 703 if (link->status == DL_STATE_SUPPLIER_UNBIND && 704 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 705 __device_link_del(&link->kref); 706 707 WRITE_ONCE(link->status, DL_STATE_DORMANT); 708 } 709 710 __device_links_no_driver(dev); 711 712 device_links_write_unlock(); 713 } 714 715 /** 716 * device_links_busy - Check if there are any busy links to consumers. 717 * @dev: Device to check. 718 * 719 * Check each consumer of the device and return 'true' if its link's status 720 * is one of "consumer probe" or "active" (meaning that the given consumer is 721 * probing right now or its driver is present). Otherwise, change the link 722 * state to "supplier unbind" to prevent the consumer from being probed 723 * successfully going forward. 724 * 725 * Return 'false' if there are no probing or active consumers. 726 * 727 * Links with the DL_FLAG_STATELESS flag set are ignored. 728 */ 729 bool device_links_busy(struct device *dev) 730 { 731 struct device_link *link; 732 bool ret = false; 733 734 device_links_write_lock(); 735 736 list_for_each_entry(link, &dev->links.consumers, s_node) { 737 if (link->flags & DL_FLAG_STATELESS) 738 continue; 739 740 if (link->status == DL_STATE_CONSUMER_PROBE 741 || link->status == DL_STATE_ACTIVE) { 742 ret = true; 743 break; 744 } 745 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 746 } 747 748 dev->links.status = DL_DEV_UNBINDING; 749 750 device_links_write_unlock(); 751 return ret; 752 } 753 754 /** 755 * device_links_unbind_consumers - Force unbind consumers of the given device. 756 * @dev: Device to unbind the consumers of. 757 * 758 * Walk the list of links to consumers for @dev and if any of them is in the 759 * "consumer probe" state, wait for all device probes in progress to complete 760 * and start over. 761 * 762 * If that's not the case, change the status of the link to "supplier unbind" 763 * and check if the link was in the "active" state. If so, force the consumer 764 * driver to unbind and start over (the consumer will not re-probe as we have 765 * changed the state of the link already). 766 * 767 * Links with the DL_FLAG_STATELESS flag set are ignored. 768 */ 769 void device_links_unbind_consumers(struct device *dev) 770 { 771 struct device_link *link; 772 773 start: 774 device_links_write_lock(); 775 776 list_for_each_entry(link, &dev->links.consumers, s_node) { 777 enum device_link_state status; 778 779 if (link->flags & DL_FLAG_STATELESS) 780 continue; 781 782 status = link->status; 783 if (status == DL_STATE_CONSUMER_PROBE) { 784 device_links_write_unlock(); 785 786 wait_for_device_probe(); 787 goto start; 788 } 789 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 790 if (status == DL_STATE_ACTIVE) { 791 struct device *consumer = link->consumer; 792 793 get_device(consumer); 794 795 device_links_write_unlock(); 796 797 device_release_driver_internal(consumer, NULL, 798 consumer->parent); 799 put_device(consumer); 800 goto start; 801 } 802 } 803 804 device_links_write_unlock(); 805 } 806 807 /** 808 * device_links_purge - Delete existing links to other devices. 809 * @dev: Target device. 810 */ 811 static void device_links_purge(struct device *dev) 812 { 813 struct device_link *link, *ln; 814 815 /* 816 * Delete all of the remaining links from this device to any other 817 * devices (either consumers or suppliers). 818 */ 819 device_links_write_lock(); 820 821 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 822 WARN_ON(link->status == DL_STATE_ACTIVE); 823 __device_link_del(&link->kref); 824 } 825 826 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { 827 WARN_ON(link->status != DL_STATE_DORMANT && 828 link->status != DL_STATE_NONE); 829 __device_link_del(&link->kref); 830 } 831 832 device_links_write_unlock(); 833 } 834 835 /* Device links support end. */ 836 837 int (*platform_notify)(struct device *dev) = NULL; 838 int (*platform_notify_remove)(struct device *dev) = NULL; 839 static struct kobject *dev_kobj; 840 struct kobject *sysfs_dev_char_kobj; 841 struct kobject *sysfs_dev_block_kobj; 842 843 static DEFINE_MUTEX(device_hotplug_lock); 844 845 void lock_device_hotplug(void) 846 { 847 mutex_lock(&device_hotplug_lock); 848 } 849 850 void unlock_device_hotplug(void) 851 { 852 mutex_unlock(&device_hotplug_lock); 853 } 854 855 int lock_device_hotplug_sysfs(void) 856 { 857 if (mutex_trylock(&device_hotplug_lock)) 858 return 0; 859 860 /* Avoid busy looping (5 ms of sleep should do). */ 861 msleep(5); 862 return restart_syscall(); 863 } 864 865 #ifdef CONFIG_BLOCK 866 static inline int device_is_not_partition(struct device *dev) 867 { 868 return !(dev->type == &part_type); 869 } 870 #else 871 static inline int device_is_not_partition(struct device *dev) 872 { 873 return 1; 874 } 875 #endif 876 877 static int 878 device_platform_notify(struct device *dev, enum kobject_action action) 879 { 880 int ret; 881 882 ret = acpi_platform_notify(dev, action); 883 if (ret) 884 return ret; 885 886 ret = software_node_notify(dev, action); 887 if (ret) 888 return ret; 889 890 if (platform_notify && action == KOBJ_ADD) 891 platform_notify(dev); 892 else if (platform_notify_remove && action == KOBJ_REMOVE) 893 platform_notify_remove(dev); 894 return 0; 895 } 896 897 /** 898 * dev_driver_string - Return a device's driver name, if at all possible 899 * @dev: struct device to get the name of 900 * 901 * Will return the device's driver's name if it is bound to a device. If 902 * the device is not bound to a driver, it will return the name of the bus 903 * it is attached to. If it is not attached to a bus either, an empty 904 * string will be returned. 905 */ 906 const char *dev_driver_string(const struct device *dev) 907 { 908 struct device_driver *drv; 909 910 /* dev->driver can change to NULL underneath us because of unbinding, 911 * so be careful about accessing it. dev->bus and dev->class should 912 * never change once they are set, so they don't need special care. 913 */ 914 drv = READ_ONCE(dev->driver); 915 return drv ? drv->name : 916 (dev->bus ? dev->bus->name : 917 (dev->class ? dev->class->name : "")); 918 } 919 EXPORT_SYMBOL(dev_driver_string); 920 921 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 922 923 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 924 char *buf) 925 { 926 struct device_attribute *dev_attr = to_dev_attr(attr); 927 struct device *dev = kobj_to_dev(kobj); 928 ssize_t ret = -EIO; 929 930 if (dev_attr->show) 931 ret = dev_attr->show(dev, dev_attr, buf); 932 if (ret >= (ssize_t)PAGE_SIZE) { 933 printk("dev_attr_show: %pS returned bad count\n", 934 dev_attr->show); 935 } 936 return ret; 937 } 938 939 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, 940 const char *buf, size_t count) 941 { 942 struct device_attribute *dev_attr = to_dev_attr(attr); 943 struct device *dev = kobj_to_dev(kobj); 944 ssize_t ret = -EIO; 945 946 if (dev_attr->store) 947 ret = dev_attr->store(dev, dev_attr, buf, count); 948 return ret; 949 } 950 951 static const struct sysfs_ops dev_sysfs_ops = { 952 .show = dev_attr_show, 953 .store = dev_attr_store, 954 }; 955 956 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) 957 958 ssize_t device_store_ulong(struct device *dev, 959 struct device_attribute *attr, 960 const char *buf, size_t size) 961 { 962 struct dev_ext_attribute *ea = to_ext_attr(attr); 963 int ret; 964 unsigned long new; 965 966 ret = kstrtoul(buf, 0, &new); 967 if (ret) 968 return ret; 969 *(unsigned long *)(ea->var) = new; 970 /* Always return full write size even if we didn't consume all */ 971 return size; 972 } 973 EXPORT_SYMBOL_GPL(device_store_ulong); 974 975 ssize_t device_show_ulong(struct device *dev, 976 struct device_attribute *attr, 977 char *buf) 978 { 979 struct dev_ext_attribute *ea = to_ext_attr(attr); 980 return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); 981 } 982 EXPORT_SYMBOL_GPL(device_show_ulong); 983 984 ssize_t device_store_int(struct device *dev, 985 struct device_attribute *attr, 986 const char *buf, size_t size) 987 { 988 struct dev_ext_attribute *ea = to_ext_attr(attr); 989 int ret; 990 long new; 991 992 ret = kstrtol(buf, 0, &new); 993 if (ret) 994 return ret; 995 996 if (new > INT_MAX || new < INT_MIN) 997 return -EINVAL; 998 *(int *)(ea->var) = new; 999 /* Always return full write size even if we didn't consume all */ 1000 return size; 1001 } 1002 EXPORT_SYMBOL_GPL(device_store_int); 1003 1004 ssize_t device_show_int(struct device *dev, 1005 struct device_attribute *attr, 1006 char *buf) 1007 { 1008 struct dev_ext_attribute *ea = to_ext_attr(attr); 1009 1010 return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); 1011 } 1012 EXPORT_SYMBOL_GPL(device_show_int); 1013 1014 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 1015 const char *buf, size_t size) 1016 { 1017 struct dev_ext_attribute *ea = to_ext_attr(attr); 1018 1019 if (strtobool(buf, ea->var) < 0) 1020 return -EINVAL; 1021 1022 return size; 1023 } 1024 EXPORT_SYMBOL_GPL(device_store_bool); 1025 1026 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 1027 char *buf) 1028 { 1029 struct dev_ext_attribute *ea = to_ext_attr(attr); 1030 1031 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var)); 1032 } 1033 EXPORT_SYMBOL_GPL(device_show_bool); 1034 1035 /** 1036 * device_release - free device structure. 1037 * @kobj: device's kobject. 1038 * 1039 * This is called once the reference count for the object 1040 * reaches 0. We forward the call to the device's release 1041 * method, which should handle actually freeing the structure. 1042 */ 1043 static void device_release(struct kobject *kobj) 1044 { 1045 struct device *dev = kobj_to_dev(kobj); 1046 struct device_private *p = dev->p; 1047 1048 /* 1049 * Some platform devices are driven without driver attached 1050 * and managed resources may have been acquired. Make sure 1051 * all resources are released. 1052 * 1053 * Drivers still can add resources into device after device 1054 * is deleted but alive, so release devres here to avoid 1055 * possible memory leak. 1056 */ 1057 devres_release_all(dev); 1058 1059 if (dev->release) 1060 dev->release(dev); 1061 else if (dev->type && dev->type->release) 1062 dev->type->release(dev); 1063 else if (dev->class && dev->class->dev_release) 1064 dev->class->dev_release(dev); 1065 else 1066 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n", 1067 dev_name(dev)); 1068 kfree(p); 1069 } 1070 1071 static const void *device_namespace(struct kobject *kobj) 1072 { 1073 struct device *dev = kobj_to_dev(kobj); 1074 const void *ns = NULL; 1075 1076 if (dev->class && dev->class->ns_type) 1077 ns = dev->class->namespace(dev); 1078 1079 return ns; 1080 } 1081 1082 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 1083 { 1084 struct device *dev = kobj_to_dev(kobj); 1085 1086 if (dev->class && dev->class->get_ownership) 1087 dev->class->get_ownership(dev, uid, gid); 1088 } 1089 1090 static struct kobj_type device_ktype = { 1091 .release = device_release, 1092 .sysfs_ops = &dev_sysfs_ops, 1093 .namespace = device_namespace, 1094 .get_ownership = device_get_ownership, 1095 }; 1096 1097 1098 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) 1099 { 1100 struct kobj_type *ktype = get_ktype(kobj); 1101 1102 if (ktype == &device_ktype) { 1103 struct device *dev = kobj_to_dev(kobj); 1104 if (dev->bus) 1105 return 1; 1106 if (dev->class) 1107 return 1; 1108 } 1109 return 0; 1110 } 1111 1112 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) 1113 { 1114 struct device *dev = kobj_to_dev(kobj); 1115 1116 if (dev->bus) 1117 return dev->bus->name; 1118 if (dev->class) 1119 return dev->class->name; 1120 return NULL; 1121 } 1122 1123 static int dev_uevent(struct kset *kset, struct kobject *kobj, 1124 struct kobj_uevent_env *env) 1125 { 1126 struct device *dev = kobj_to_dev(kobj); 1127 int retval = 0; 1128 1129 /* add device node properties if present */ 1130 if (MAJOR(dev->devt)) { 1131 const char *tmp; 1132 const char *name; 1133 umode_t mode = 0; 1134 kuid_t uid = GLOBAL_ROOT_UID; 1135 kgid_t gid = GLOBAL_ROOT_GID; 1136 1137 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 1138 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 1139 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); 1140 if (name) { 1141 add_uevent_var(env, "DEVNAME=%s", name); 1142 if (mode) 1143 add_uevent_var(env, "DEVMODE=%#o", mode & 0777); 1144 if (!uid_eq(uid, GLOBAL_ROOT_UID)) 1145 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); 1146 if (!gid_eq(gid, GLOBAL_ROOT_GID)) 1147 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); 1148 kfree(tmp); 1149 } 1150 } 1151 1152 if (dev->type && dev->type->name) 1153 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 1154 1155 if (dev->driver) 1156 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 1157 1158 /* Add common DT information about the device */ 1159 of_device_uevent(dev, env); 1160 1161 /* have the bus specific function add its stuff */ 1162 if (dev->bus && dev->bus->uevent) { 1163 retval = dev->bus->uevent(dev, env); 1164 if (retval) 1165 pr_debug("device: '%s': %s: bus uevent() returned %d\n", 1166 dev_name(dev), __func__, retval); 1167 } 1168 1169 /* have the class specific function add its stuff */ 1170 if (dev->class && dev->class->dev_uevent) { 1171 retval = dev->class->dev_uevent(dev, env); 1172 if (retval) 1173 pr_debug("device: '%s': %s: class uevent() " 1174 "returned %d\n", dev_name(dev), 1175 __func__, retval); 1176 } 1177 1178 /* have the device type specific function add its stuff */ 1179 if (dev->type && dev->type->uevent) { 1180 retval = dev->type->uevent(dev, env); 1181 if (retval) 1182 pr_debug("device: '%s': %s: dev_type uevent() " 1183 "returned %d\n", dev_name(dev), 1184 __func__, retval); 1185 } 1186 1187 return retval; 1188 } 1189 1190 static const struct kset_uevent_ops device_uevent_ops = { 1191 .filter = dev_uevent_filter, 1192 .name = dev_uevent_name, 1193 .uevent = dev_uevent, 1194 }; 1195 1196 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, 1197 char *buf) 1198 { 1199 struct kobject *top_kobj; 1200 struct kset *kset; 1201 struct kobj_uevent_env *env = NULL; 1202 int i; 1203 size_t count = 0; 1204 int retval; 1205 1206 /* search the kset, the device belongs to */ 1207 top_kobj = &dev->kobj; 1208 while (!top_kobj->kset && top_kobj->parent) 1209 top_kobj = top_kobj->parent; 1210 if (!top_kobj->kset) 1211 goto out; 1212 1213 kset = top_kobj->kset; 1214 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 1215 goto out; 1216 1217 /* respect filter */ 1218 if (kset->uevent_ops && kset->uevent_ops->filter) 1219 if (!kset->uevent_ops->filter(kset, &dev->kobj)) 1220 goto out; 1221 1222 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 1223 if (!env) 1224 return -ENOMEM; 1225 1226 /* let the kset specific function add its keys */ 1227 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); 1228 if (retval) 1229 goto out; 1230 1231 /* copy keys to file */ 1232 for (i = 0; i < env->envp_idx; i++) 1233 count += sprintf(&buf[count], "%s\n", env->envp[i]); 1234 out: 1235 kfree(env); 1236 return count; 1237 } 1238 1239 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, 1240 const char *buf, size_t count) 1241 { 1242 int rc; 1243 1244 rc = kobject_synth_uevent(&dev->kobj, buf, count); 1245 1246 if (rc) { 1247 dev_err(dev, "uevent: failed to send synthetic uevent\n"); 1248 return rc; 1249 } 1250 1251 return count; 1252 } 1253 static DEVICE_ATTR_RW(uevent); 1254 1255 static ssize_t online_show(struct device *dev, struct device_attribute *attr, 1256 char *buf) 1257 { 1258 bool val; 1259 1260 device_lock(dev); 1261 val = !dev->offline; 1262 device_unlock(dev); 1263 return sprintf(buf, "%u\n", val); 1264 } 1265 1266 static ssize_t online_store(struct device *dev, struct device_attribute *attr, 1267 const char *buf, size_t count) 1268 { 1269 bool val; 1270 int ret; 1271 1272 ret = strtobool(buf, &val); 1273 if (ret < 0) 1274 return ret; 1275 1276 ret = lock_device_hotplug_sysfs(); 1277 if (ret) 1278 return ret; 1279 1280 ret = val ? device_online(dev) : device_offline(dev); 1281 unlock_device_hotplug(); 1282 return ret < 0 ? ret : count; 1283 } 1284 static DEVICE_ATTR_RW(online); 1285 1286 int device_add_groups(struct device *dev, const struct attribute_group **groups) 1287 { 1288 return sysfs_create_groups(&dev->kobj, groups); 1289 } 1290 EXPORT_SYMBOL_GPL(device_add_groups); 1291 1292 void device_remove_groups(struct device *dev, 1293 const struct attribute_group **groups) 1294 { 1295 sysfs_remove_groups(&dev->kobj, groups); 1296 } 1297 EXPORT_SYMBOL_GPL(device_remove_groups); 1298 1299 union device_attr_group_devres { 1300 const struct attribute_group *group; 1301 const struct attribute_group **groups; 1302 }; 1303 1304 static int devm_attr_group_match(struct device *dev, void *res, void *data) 1305 { 1306 return ((union device_attr_group_devres *)res)->group == data; 1307 } 1308 1309 static void devm_attr_group_remove(struct device *dev, void *res) 1310 { 1311 union device_attr_group_devres *devres = res; 1312 const struct attribute_group *group = devres->group; 1313 1314 dev_dbg(dev, "%s: removing group %p\n", __func__, group); 1315 sysfs_remove_group(&dev->kobj, group); 1316 } 1317 1318 static void devm_attr_groups_remove(struct device *dev, void *res) 1319 { 1320 union device_attr_group_devres *devres = res; 1321 const struct attribute_group **groups = devres->groups; 1322 1323 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 1324 sysfs_remove_groups(&dev->kobj, groups); 1325 } 1326 1327 /** 1328 * devm_device_add_group - given a device, create a managed attribute group 1329 * @dev: The device to create the group for 1330 * @grp: The attribute group to create 1331 * 1332 * This function creates a group for the first time. It will explicitly 1333 * warn and error if any of the attribute files being created already exist. 1334 * 1335 * Returns 0 on success or error code on failure. 1336 */ 1337 int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 1338 { 1339 union device_attr_group_devres *devres; 1340 int error; 1341 1342 devres = devres_alloc(devm_attr_group_remove, 1343 sizeof(*devres), GFP_KERNEL); 1344 if (!devres) 1345 return -ENOMEM; 1346 1347 error = sysfs_create_group(&dev->kobj, grp); 1348 if (error) { 1349 devres_free(devres); 1350 return error; 1351 } 1352 1353 devres->group = grp; 1354 devres_add(dev, devres); 1355 return 0; 1356 } 1357 EXPORT_SYMBOL_GPL(devm_device_add_group); 1358 1359 /** 1360 * devm_device_remove_group: remove a managed group from a device 1361 * @dev: device to remove the group from 1362 * @grp: group to remove 1363 * 1364 * This function removes a group of attributes from a device. The attributes 1365 * previously have to have been created for this group, otherwise it will fail. 1366 */ 1367 void devm_device_remove_group(struct device *dev, 1368 const struct attribute_group *grp) 1369 { 1370 WARN_ON(devres_release(dev, devm_attr_group_remove, 1371 devm_attr_group_match, 1372 /* cast away const */ (void *)grp)); 1373 } 1374 EXPORT_SYMBOL_GPL(devm_device_remove_group); 1375 1376 /** 1377 * devm_device_add_groups - create a bunch of managed attribute groups 1378 * @dev: The device to create the group for 1379 * @groups: The attribute groups to create, NULL terminated 1380 * 1381 * This function creates a bunch of managed attribute groups. If an error 1382 * occurs when creating a group, all previously created groups will be 1383 * removed, unwinding everything back to the original state when this 1384 * function was called. It will explicitly warn and error if any of the 1385 * attribute files being created already exist. 1386 * 1387 * Returns 0 on success or error code from sysfs_create_group on failure. 1388 */ 1389 int devm_device_add_groups(struct device *dev, 1390 const struct attribute_group **groups) 1391 { 1392 union device_attr_group_devres *devres; 1393 int error; 1394 1395 devres = devres_alloc(devm_attr_groups_remove, 1396 sizeof(*devres), GFP_KERNEL); 1397 if (!devres) 1398 return -ENOMEM; 1399 1400 error = sysfs_create_groups(&dev->kobj, groups); 1401 if (error) { 1402 devres_free(devres); 1403 return error; 1404 } 1405 1406 devres->groups = groups; 1407 devres_add(dev, devres); 1408 return 0; 1409 } 1410 EXPORT_SYMBOL_GPL(devm_device_add_groups); 1411 1412 /** 1413 * devm_device_remove_groups - remove a list of managed groups 1414 * 1415 * @dev: The device for the groups to be removed from 1416 * @groups: NULL terminated list of groups to be removed 1417 * 1418 * If groups is not NULL, remove the specified groups from the device. 1419 */ 1420 void devm_device_remove_groups(struct device *dev, 1421 const struct attribute_group **groups) 1422 { 1423 WARN_ON(devres_release(dev, devm_attr_groups_remove, 1424 devm_attr_group_match, 1425 /* cast away const */ (void *)groups)); 1426 } 1427 EXPORT_SYMBOL_GPL(devm_device_remove_groups); 1428 1429 static int device_add_attrs(struct device *dev) 1430 { 1431 struct class *class = dev->class; 1432 const struct device_type *type = dev->type; 1433 int error; 1434 1435 if (class) { 1436 error = device_add_groups(dev, class->dev_groups); 1437 if (error) 1438 return error; 1439 } 1440 1441 if (type) { 1442 error = device_add_groups(dev, type->groups); 1443 if (error) 1444 goto err_remove_class_groups; 1445 } 1446 1447 error = device_add_groups(dev, dev->groups); 1448 if (error) 1449 goto err_remove_type_groups; 1450 1451 if (device_supports_offline(dev) && !dev->offline_disabled) { 1452 error = device_create_file(dev, &dev_attr_online); 1453 if (error) 1454 goto err_remove_dev_groups; 1455 } 1456 1457 return 0; 1458 1459 err_remove_dev_groups: 1460 device_remove_groups(dev, dev->groups); 1461 err_remove_type_groups: 1462 if (type) 1463 device_remove_groups(dev, type->groups); 1464 err_remove_class_groups: 1465 if (class) 1466 device_remove_groups(dev, class->dev_groups); 1467 1468 return error; 1469 } 1470 1471 static void device_remove_attrs(struct device *dev) 1472 { 1473 struct class *class = dev->class; 1474 const struct device_type *type = dev->type; 1475 1476 device_remove_file(dev, &dev_attr_online); 1477 device_remove_groups(dev, dev->groups); 1478 1479 if (type) 1480 device_remove_groups(dev, type->groups); 1481 1482 if (class) 1483 device_remove_groups(dev, class->dev_groups); 1484 } 1485 1486 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 1487 char *buf) 1488 { 1489 return print_dev_t(buf, dev->devt); 1490 } 1491 static DEVICE_ATTR_RO(dev); 1492 1493 /* /sys/devices/ */ 1494 struct kset *devices_kset; 1495 1496 /** 1497 * devices_kset_move_before - Move device in the devices_kset's list. 1498 * @deva: Device to move. 1499 * @devb: Device @deva should come before. 1500 */ 1501 static void devices_kset_move_before(struct device *deva, struct device *devb) 1502 { 1503 if (!devices_kset) 1504 return; 1505 pr_debug("devices_kset: Moving %s before %s\n", 1506 dev_name(deva), dev_name(devb)); 1507 spin_lock(&devices_kset->list_lock); 1508 list_move_tail(&deva->kobj.entry, &devb->kobj.entry); 1509 spin_unlock(&devices_kset->list_lock); 1510 } 1511 1512 /** 1513 * devices_kset_move_after - Move device in the devices_kset's list. 1514 * @deva: Device to move 1515 * @devb: Device @deva should come after. 1516 */ 1517 static void devices_kset_move_after(struct device *deva, struct device *devb) 1518 { 1519 if (!devices_kset) 1520 return; 1521 pr_debug("devices_kset: Moving %s after %s\n", 1522 dev_name(deva), dev_name(devb)); 1523 spin_lock(&devices_kset->list_lock); 1524 list_move(&deva->kobj.entry, &devb->kobj.entry); 1525 spin_unlock(&devices_kset->list_lock); 1526 } 1527 1528 /** 1529 * devices_kset_move_last - move the device to the end of devices_kset's list. 1530 * @dev: device to move 1531 */ 1532 void devices_kset_move_last(struct device *dev) 1533 { 1534 if (!devices_kset) 1535 return; 1536 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); 1537 spin_lock(&devices_kset->list_lock); 1538 list_move_tail(&dev->kobj.entry, &devices_kset->list); 1539 spin_unlock(&devices_kset->list_lock); 1540 } 1541 1542 /** 1543 * device_create_file - create sysfs attribute file for device. 1544 * @dev: device. 1545 * @attr: device attribute descriptor. 1546 */ 1547 int device_create_file(struct device *dev, 1548 const struct device_attribute *attr) 1549 { 1550 int error = 0; 1551 1552 if (dev) { 1553 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 1554 "Attribute %s: write permission without 'store'\n", 1555 attr->attr.name); 1556 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 1557 "Attribute %s: read permission without 'show'\n", 1558 attr->attr.name); 1559 error = sysfs_create_file(&dev->kobj, &attr->attr); 1560 } 1561 1562 return error; 1563 } 1564 EXPORT_SYMBOL_GPL(device_create_file); 1565 1566 /** 1567 * device_remove_file - remove sysfs attribute file. 1568 * @dev: device. 1569 * @attr: device attribute descriptor. 1570 */ 1571 void device_remove_file(struct device *dev, 1572 const struct device_attribute *attr) 1573 { 1574 if (dev) 1575 sysfs_remove_file(&dev->kobj, &attr->attr); 1576 } 1577 EXPORT_SYMBOL_GPL(device_remove_file); 1578 1579 /** 1580 * device_remove_file_self - remove sysfs attribute file from its own method. 1581 * @dev: device. 1582 * @attr: device attribute descriptor. 1583 * 1584 * See kernfs_remove_self() for details. 1585 */ 1586 bool device_remove_file_self(struct device *dev, 1587 const struct device_attribute *attr) 1588 { 1589 if (dev) 1590 return sysfs_remove_file_self(&dev->kobj, &attr->attr); 1591 else 1592 return false; 1593 } 1594 EXPORT_SYMBOL_GPL(device_remove_file_self); 1595 1596 /** 1597 * device_create_bin_file - create sysfs binary attribute file for device. 1598 * @dev: device. 1599 * @attr: device binary attribute descriptor. 1600 */ 1601 int device_create_bin_file(struct device *dev, 1602 const struct bin_attribute *attr) 1603 { 1604 int error = -EINVAL; 1605 if (dev) 1606 error = sysfs_create_bin_file(&dev->kobj, attr); 1607 return error; 1608 } 1609 EXPORT_SYMBOL_GPL(device_create_bin_file); 1610 1611 /** 1612 * device_remove_bin_file - remove sysfs binary attribute file 1613 * @dev: device. 1614 * @attr: device binary attribute descriptor. 1615 */ 1616 void device_remove_bin_file(struct device *dev, 1617 const struct bin_attribute *attr) 1618 { 1619 if (dev) 1620 sysfs_remove_bin_file(&dev->kobj, attr); 1621 } 1622 EXPORT_SYMBOL_GPL(device_remove_bin_file); 1623 1624 static void klist_children_get(struct klist_node *n) 1625 { 1626 struct device_private *p = to_device_private_parent(n); 1627 struct device *dev = p->device; 1628 1629 get_device(dev); 1630 } 1631 1632 static void klist_children_put(struct klist_node *n) 1633 { 1634 struct device_private *p = to_device_private_parent(n); 1635 struct device *dev = p->device; 1636 1637 put_device(dev); 1638 } 1639 1640 /** 1641 * device_initialize - init device structure. 1642 * @dev: device. 1643 * 1644 * This prepares the device for use by other layers by initializing 1645 * its fields. 1646 * It is the first half of device_register(), if called by 1647 * that function, though it can also be called separately, so one 1648 * may use @dev's fields. In particular, get_device()/put_device() 1649 * may be used for reference counting of @dev after calling this 1650 * function. 1651 * 1652 * All fields in @dev must be initialized by the caller to 0, except 1653 * for those explicitly set to some other value. The simplest 1654 * approach is to use kzalloc() to allocate the structure containing 1655 * @dev. 1656 * 1657 * NOTE: Use put_device() to give up your reference instead of freeing 1658 * @dev directly once you have called this function. 1659 */ 1660 void device_initialize(struct device *dev) 1661 { 1662 dev->kobj.kset = devices_kset; 1663 kobject_init(&dev->kobj, &device_ktype); 1664 INIT_LIST_HEAD(&dev->dma_pools); 1665 mutex_init(&dev->mutex); 1666 lockdep_set_novalidate_class(&dev->mutex); 1667 spin_lock_init(&dev->devres_lock); 1668 INIT_LIST_HEAD(&dev->devres_head); 1669 device_pm_init(dev); 1670 set_dev_node(dev, -1); 1671 #ifdef CONFIG_GENERIC_MSI_IRQ 1672 INIT_LIST_HEAD(&dev->msi_list); 1673 #endif 1674 INIT_LIST_HEAD(&dev->links.consumers); 1675 INIT_LIST_HEAD(&dev->links.suppliers); 1676 dev->links.status = DL_DEV_NO_DRIVER; 1677 } 1678 EXPORT_SYMBOL_GPL(device_initialize); 1679 1680 struct kobject *virtual_device_parent(struct device *dev) 1681 { 1682 static struct kobject *virtual_dir = NULL; 1683 1684 if (!virtual_dir) 1685 virtual_dir = kobject_create_and_add("virtual", 1686 &devices_kset->kobj); 1687 1688 return virtual_dir; 1689 } 1690 1691 struct class_dir { 1692 struct kobject kobj; 1693 struct class *class; 1694 }; 1695 1696 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) 1697 1698 static void class_dir_release(struct kobject *kobj) 1699 { 1700 struct class_dir *dir = to_class_dir(kobj); 1701 kfree(dir); 1702 } 1703 1704 static const 1705 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 1706 { 1707 struct class_dir *dir = to_class_dir(kobj); 1708 return dir->class->ns_type; 1709 } 1710 1711 static struct kobj_type class_dir_ktype = { 1712 .release = class_dir_release, 1713 .sysfs_ops = &kobj_sysfs_ops, 1714 .child_ns_type = class_dir_child_ns_type 1715 }; 1716 1717 static struct kobject * 1718 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) 1719 { 1720 struct class_dir *dir; 1721 int retval; 1722 1723 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1724 if (!dir) 1725 return ERR_PTR(-ENOMEM); 1726 1727 dir->class = class; 1728 kobject_init(&dir->kobj, &class_dir_ktype); 1729 1730 dir->kobj.kset = &class->p->glue_dirs; 1731 1732 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 1733 if (retval < 0) { 1734 kobject_put(&dir->kobj); 1735 return ERR_PTR(retval); 1736 } 1737 return &dir->kobj; 1738 } 1739 1740 static DEFINE_MUTEX(gdp_mutex); 1741 1742 static struct kobject *get_device_parent(struct device *dev, 1743 struct device *parent) 1744 { 1745 if (dev->class) { 1746 struct kobject *kobj = NULL; 1747 struct kobject *parent_kobj; 1748 struct kobject *k; 1749 1750 #ifdef CONFIG_BLOCK 1751 /* block disks show up in /sys/block */ 1752 if (sysfs_deprecated && dev->class == &block_class) { 1753 if (parent && parent->class == &block_class) 1754 return &parent->kobj; 1755 return &block_class.p->subsys.kobj; 1756 } 1757 #endif 1758 1759 /* 1760 * If we have no parent, we live in "virtual". 1761 * Class-devices with a non class-device as parent, live 1762 * in a "glue" directory to prevent namespace collisions. 1763 */ 1764 if (parent == NULL) 1765 parent_kobj = virtual_device_parent(dev); 1766 else if (parent->class && !dev->class->ns_type) 1767 return &parent->kobj; 1768 else 1769 parent_kobj = &parent->kobj; 1770 1771 mutex_lock(&gdp_mutex); 1772 1773 /* find our class-directory at the parent and reference it */ 1774 spin_lock(&dev->class->p->glue_dirs.list_lock); 1775 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) 1776 if (k->parent == parent_kobj) { 1777 kobj = kobject_get(k); 1778 break; 1779 } 1780 spin_unlock(&dev->class->p->glue_dirs.list_lock); 1781 if (kobj) { 1782 mutex_unlock(&gdp_mutex); 1783 return kobj; 1784 } 1785 1786 /* or create a new class-directory at the parent device */ 1787 k = class_dir_create_and_add(dev->class, parent_kobj); 1788 /* do not emit an uevent for this simple "glue" directory */ 1789 mutex_unlock(&gdp_mutex); 1790 return k; 1791 } 1792 1793 /* subsystems can specify a default root directory for their devices */ 1794 if (!parent && dev->bus && dev->bus->dev_root) 1795 return &dev->bus->dev_root->kobj; 1796 1797 if (parent) 1798 return &parent->kobj; 1799 return NULL; 1800 } 1801 1802 static inline bool live_in_glue_dir(struct kobject *kobj, 1803 struct device *dev) 1804 { 1805 if (!kobj || !dev->class || 1806 kobj->kset != &dev->class->p->glue_dirs) 1807 return false; 1808 return true; 1809 } 1810 1811 static inline struct kobject *get_glue_dir(struct device *dev) 1812 { 1813 return dev->kobj.parent; 1814 } 1815 1816 /* 1817 * make sure cleaning up dir as the last step, we need to make 1818 * sure .release handler of kobject is run with holding the 1819 * global lock 1820 */ 1821 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 1822 { 1823 /* see if we live in a "glue" directory */ 1824 if (!live_in_glue_dir(glue_dir, dev)) 1825 return; 1826 1827 mutex_lock(&gdp_mutex); 1828 if (!kobject_has_children(glue_dir)) 1829 kobject_del(glue_dir); 1830 kobject_put(glue_dir); 1831 mutex_unlock(&gdp_mutex); 1832 } 1833 1834 static int device_add_class_symlinks(struct device *dev) 1835 { 1836 struct device_node *of_node = dev_of_node(dev); 1837 int error; 1838 1839 if (of_node) { 1840 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); 1841 if (error) 1842 dev_warn(dev, "Error %d creating of_node link\n",error); 1843 /* An error here doesn't warrant bringing down the device */ 1844 } 1845 1846 if (!dev->class) 1847 return 0; 1848 1849 error = sysfs_create_link(&dev->kobj, 1850 &dev->class->p->subsys.kobj, 1851 "subsystem"); 1852 if (error) 1853 goto out_devnode; 1854 1855 if (dev->parent && device_is_not_partition(dev)) { 1856 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, 1857 "device"); 1858 if (error) 1859 goto out_subsys; 1860 } 1861 1862 #ifdef CONFIG_BLOCK 1863 /* /sys/block has directories and does not need symlinks */ 1864 if (sysfs_deprecated && dev->class == &block_class) 1865 return 0; 1866 #endif 1867 1868 /* link in the class directory pointing to the device */ 1869 error = sysfs_create_link(&dev->class->p->subsys.kobj, 1870 &dev->kobj, dev_name(dev)); 1871 if (error) 1872 goto out_device; 1873 1874 return 0; 1875 1876 out_device: 1877 sysfs_remove_link(&dev->kobj, "device"); 1878 1879 out_subsys: 1880 sysfs_remove_link(&dev->kobj, "subsystem"); 1881 out_devnode: 1882 sysfs_remove_link(&dev->kobj, "of_node"); 1883 return error; 1884 } 1885 1886 static void device_remove_class_symlinks(struct device *dev) 1887 { 1888 if (dev_of_node(dev)) 1889 sysfs_remove_link(&dev->kobj, "of_node"); 1890 1891 if (!dev->class) 1892 return; 1893 1894 if (dev->parent && device_is_not_partition(dev)) 1895 sysfs_remove_link(&dev->kobj, "device"); 1896 sysfs_remove_link(&dev->kobj, "subsystem"); 1897 #ifdef CONFIG_BLOCK 1898 if (sysfs_deprecated && dev->class == &block_class) 1899 return; 1900 #endif 1901 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); 1902 } 1903 1904 /** 1905 * dev_set_name - set a device name 1906 * @dev: device 1907 * @fmt: format string for the device's name 1908 */ 1909 int dev_set_name(struct device *dev, const char *fmt, ...) 1910 { 1911 va_list vargs; 1912 int err; 1913 1914 va_start(vargs, fmt); 1915 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 1916 va_end(vargs); 1917 return err; 1918 } 1919 EXPORT_SYMBOL_GPL(dev_set_name); 1920 1921 /** 1922 * device_to_dev_kobj - select a /sys/dev/ directory for the device 1923 * @dev: device 1924 * 1925 * By default we select char/ for new entries. Setting class->dev_obj 1926 * to NULL prevents an entry from being created. class->dev_kobj must 1927 * be set (or cleared) before any devices are registered to the class 1928 * otherwise device_create_sys_dev_entry() and 1929 * device_remove_sys_dev_entry() will disagree about the presence of 1930 * the link. 1931 */ 1932 static struct kobject *device_to_dev_kobj(struct device *dev) 1933 { 1934 struct kobject *kobj; 1935 1936 if (dev->class) 1937 kobj = dev->class->dev_kobj; 1938 else 1939 kobj = sysfs_dev_char_kobj; 1940 1941 return kobj; 1942 } 1943 1944 static int device_create_sys_dev_entry(struct device *dev) 1945 { 1946 struct kobject *kobj = device_to_dev_kobj(dev); 1947 int error = 0; 1948 char devt_str[15]; 1949 1950 if (kobj) { 1951 format_dev_t(devt_str, dev->devt); 1952 error = sysfs_create_link(kobj, &dev->kobj, devt_str); 1953 } 1954 1955 return error; 1956 } 1957 1958 static void device_remove_sys_dev_entry(struct device *dev) 1959 { 1960 struct kobject *kobj = device_to_dev_kobj(dev); 1961 char devt_str[15]; 1962 1963 if (kobj) { 1964 format_dev_t(devt_str, dev->devt); 1965 sysfs_remove_link(kobj, devt_str); 1966 } 1967 } 1968 1969 static int device_private_init(struct device *dev) 1970 { 1971 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); 1972 if (!dev->p) 1973 return -ENOMEM; 1974 dev->p->device = dev; 1975 klist_init(&dev->p->klist_children, klist_children_get, 1976 klist_children_put); 1977 INIT_LIST_HEAD(&dev->p->deferred_probe); 1978 return 0; 1979 } 1980 1981 /** 1982 * device_add - add device to device hierarchy. 1983 * @dev: device. 1984 * 1985 * This is part 2 of device_register(), though may be called 1986 * separately _iff_ device_initialize() has been called separately. 1987 * 1988 * This adds @dev to the kobject hierarchy via kobject_add(), adds it 1989 * to the global and sibling lists for the device, then 1990 * adds it to the other relevant subsystems of the driver model. 1991 * 1992 * Do not call this routine or device_register() more than once for 1993 * any device structure. The driver model core is not designed to work 1994 * with devices that get unregistered and then spring back to life. 1995 * (Among other things, it's very hard to guarantee that all references 1996 * to the previous incarnation of @dev have been dropped.) Allocate 1997 * and register a fresh new struct device instead. 1998 * 1999 * NOTE: _Never_ directly free @dev after calling this function, even 2000 * if it returned an error! Always use put_device() to give up your 2001 * reference instead. 2002 * 2003 * Rule of thumb is: if device_add() succeeds, you should call 2004 * device_del() when you want to get rid of it. If device_add() has 2005 * *not* succeeded, use *only* put_device() to drop the reference 2006 * count. 2007 */ 2008 int device_add(struct device *dev) 2009 { 2010 struct device *parent; 2011 struct kobject *kobj; 2012 struct class_interface *class_intf; 2013 int error = -EINVAL; 2014 struct kobject *glue_dir = NULL; 2015 2016 dev = get_device(dev); 2017 if (!dev) 2018 goto done; 2019 2020 if (!dev->p) { 2021 error = device_private_init(dev); 2022 if (error) 2023 goto done; 2024 } 2025 2026 /* 2027 * for statically allocated devices, which should all be converted 2028 * some day, we need to initialize the name. We prevent reading back 2029 * the name, and force the use of dev_name() 2030 */ 2031 if (dev->init_name) { 2032 dev_set_name(dev, "%s", dev->init_name); 2033 dev->init_name = NULL; 2034 } 2035 2036 /* subsystems can specify simple device enumeration */ 2037 if (!dev_name(dev) && dev->bus && dev->bus->dev_name) 2038 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); 2039 2040 if (!dev_name(dev)) { 2041 error = -EINVAL; 2042 goto name_error; 2043 } 2044 2045 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2046 2047 parent = get_device(dev->parent); 2048 kobj = get_device_parent(dev, parent); 2049 if (IS_ERR(kobj)) { 2050 error = PTR_ERR(kobj); 2051 goto parent_error; 2052 } 2053 if (kobj) 2054 dev->kobj.parent = kobj; 2055 2056 /* use parent numa_node */ 2057 if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) 2058 set_dev_node(dev, dev_to_node(parent)); 2059 2060 /* first, register with generic layer. */ 2061 /* we require the name to be set before, and pass NULL */ 2062 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 2063 if (error) { 2064 glue_dir = get_glue_dir(dev); 2065 goto Error; 2066 } 2067 2068 /* notify platform of device entry */ 2069 error = device_platform_notify(dev, KOBJ_ADD); 2070 if (error) 2071 goto platform_error; 2072 2073 error = device_create_file(dev, &dev_attr_uevent); 2074 if (error) 2075 goto attrError; 2076 2077 error = device_add_class_symlinks(dev); 2078 if (error) 2079 goto SymlinkError; 2080 error = device_add_attrs(dev); 2081 if (error) 2082 goto AttrsError; 2083 error = bus_add_device(dev); 2084 if (error) 2085 goto BusError; 2086 error = dpm_sysfs_add(dev); 2087 if (error) 2088 goto DPMError; 2089 device_pm_add(dev); 2090 2091 if (MAJOR(dev->devt)) { 2092 error = device_create_file(dev, &dev_attr_dev); 2093 if (error) 2094 goto DevAttrError; 2095 2096 error = device_create_sys_dev_entry(dev); 2097 if (error) 2098 goto SysEntryError; 2099 2100 devtmpfs_create_node(dev); 2101 } 2102 2103 /* Notify clients of device addition. This call must come 2104 * after dpm_sysfs_add() and before kobject_uevent(). 2105 */ 2106 if (dev->bus) 2107 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2108 BUS_NOTIFY_ADD_DEVICE, dev); 2109 2110 kobject_uevent(&dev->kobj, KOBJ_ADD); 2111 bus_probe_device(dev); 2112 if (parent) 2113 klist_add_tail(&dev->p->knode_parent, 2114 &parent->p->klist_children); 2115 2116 if (dev->class) { 2117 mutex_lock(&dev->class->p->mutex); 2118 /* tie the class to the device */ 2119 klist_add_tail(&dev->p->knode_class, 2120 &dev->class->p->klist_devices); 2121 2122 /* notify any interfaces that the device is here */ 2123 list_for_each_entry(class_intf, 2124 &dev->class->p->interfaces, node) 2125 if (class_intf->add_dev) 2126 class_intf->add_dev(dev, class_intf); 2127 mutex_unlock(&dev->class->p->mutex); 2128 } 2129 done: 2130 put_device(dev); 2131 return error; 2132 SysEntryError: 2133 if (MAJOR(dev->devt)) 2134 device_remove_file(dev, &dev_attr_dev); 2135 DevAttrError: 2136 device_pm_remove(dev); 2137 dpm_sysfs_remove(dev); 2138 DPMError: 2139 bus_remove_device(dev); 2140 BusError: 2141 device_remove_attrs(dev); 2142 AttrsError: 2143 device_remove_class_symlinks(dev); 2144 SymlinkError: 2145 device_remove_file(dev, &dev_attr_uevent); 2146 attrError: 2147 device_platform_notify(dev, KOBJ_REMOVE); 2148 platform_error: 2149 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2150 glue_dir = get_glue_dir(dev); 2151 kobject_del(&dev->kobj); 2152 Error: 2153 cleanup_glue_dir(dev, glue_dir); 2154 parent_error: 2155 put_device(parent); 2156 name_error: 2157 kfree(dev->p); 2158 dev->p = NULL; 2159 goto done; 2160 } 2161 EXPORT_SYMBOL_GPL(device_add); 2162 2163 /** 2164 * device_register - register a device with the system. 2165 * @dev: pointer to the device structure 2166 * 2167 * This happens in two clean steps - initialize the device 2168 * and add it to the system. The two steps can be called 2169 * separately, but this is the easiest and most common. 2170 * I.e. you should only call the two helpers separately if 2171 * have a clearly defined need to use and refcount the device 2172 * before it is added to the hierarchy. 2173 * 2174 * For more information, see the kerneldoc for device_initialize() 2175 * and device_add(). 2176 * 2177 * NOTE: _Never_ directly free @dev after calling this function, even 2178 * if it returned an error! Always use put_device() to give up the 2179 * reference initialized in this function instead. 2180 */ 2181 int device_register(struct device *dev) 2182 { 2183 device_initialize(dev); 2184 return device_add(dev); 2185 } 2186 EXPORT_SYMBOL_GPL(device_register); 2187 2188 /** 2189 * get_device - increment reference count for device. 2190 * @dev: device. 2191 * 2192 * This simply forwards the call to kobject_get(), though 2193 * we do take care to provide for the case that we get a NULL 2194 * pointer passed in. 2195 */ 2196 struct device *get_device(struct device *dev) 2197 { 2198 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; 2199 } 2200 EXPORT_SYMBOL_GPL(get_device); 2201 2202 /** 2203 * put_device - decrement reference count. 2204 * @dev: device in question. 2205 */ 2206 void put_device(struct device *dev) 2207 { 2208 /* might_sleep(); */ 2209 if (dev) 2210 kobject_put(&dev->kobj); 2211 } 2212 EXPORT_SYMBOL_GPL(put_device); 2213 2214 /** 2215 * device_del - delete device from system. 2216 * @dev: device. 2217 * 2218 * This is the first part of the device unregistration 2219 * sequence. This removes the device from the lists we control 2220 * from here, has it removed from the other driver model 2221 * subsystems it was added to in device_add(), and removes it 2222 * from the kobject hierarchy. 2223 * 2224 * NOTE: this should be called manually _iff_ device_add() was 2225 * also called manually. 2226 */ 2227 void device_del(struct device *dev) 2228 { 2229 struct device *parent = dev->parent; 2230 struct kobject *glue_dir = NULL; 2231 struct class_interface *class_intf; 2232 2233 /* 2234 * Hold the device lock and set the "dead" flag to guarantee that 2235 * the update behavior is consistent with the other bitfields near 2236 * it and that we cannot have an asynchronous probe routine trying 2237 * to run while we are tearing out the bus/class/sysfs from 2238 * underneath the device. 2239 */ 2240 device_lock(dev); 2241 dev->p->dead = true; 2242 device_unlock(dev); 2243 2244 /* Notify clients of device removal. This call must come 2245 * before dpm_sysfs_remove(). 2246 */ 2247 if (dev->bus) 2248 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2249 BUS_NOTIFY_DEL_DEVICE, dev); 2250 2251 dpm_sysfs_remove(dev); 2252 if (parent) 2253 klist_del(&dev->p->knode_parent); 2254 if (MAJOR(dev->devt)) { 2255 devtmpfs_delete_node(dev); 2256 device_remove_sys_dev_entry(dev); 2257 device_remove_file(dev, &dev_attr_dev); 2258 } 2259 if (dev->class) { 2260 device_remove_class_symlinks(dev); 2261 2262 mutex_lock(&dev->class->p->mutex); 2263 /* notify any interfaces that the device is now gone */ 2264 list_for_each_entry(class_intf, 2265 &dev->class->p->interfaces, node) 2266 if (class_intf->remove_dev) 2267 class_intf->remove_dev(dev, class_intf); 2268 /* remove the device from the class list */ 2269 klist_del(&dev->p->knode_class); 2270 mutex_unlock(&dev->class->p->mutex); 2271 } 2272 device_remove_file(dev, &dev_attr_uevent); 2273 device_remove_attrs(dev); 2274 bus_remove_device(dev); 2275 device_pm_remove(dev); 2276 driver_deferred_probe_del(dev); 2277 device_platform_notify(dev, KOBJ_REMOVE); 2278 device_remove_properties(dev); 2279 device_links_purge(dev); 2280 2281 if (dev->bus) 2282 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2283 BUS_NOTIFY_REMOVED_DEVICE, dev); 2284 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2285 glue_dir = get_glue_dir(dev); 2286 kobject_del(&dev->kobj); 2287 cleanup_glue_dir(dev, glue_dir); 2288 put_device(parent); 2289 } 2290 EXPORT_SYMBOL_GPL(device_del); 2291 2292 /** 2293 * device_unregister - unregister device from system. 2294 * @dev: device going away. 2295 * 2296 * We do this in two parts, like we do device_register(). First, 2297 * we remove it from all the subsystems with device_del(), then 2298 * we decrement the reference count via put_device(). If that 2299 * is the final reference count, the device will be cleaned up 2300 * via device_release() above. Otherwise, the structure will 2301 * stick around until the final reference to the device is dropped. 2302 */ 2303 void device_unregister(struct device *dev) 2304 { 2305 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2306 device_del(dev); 2307 put_device(dev); 2308 } 2309 EXPORT_SYMBOL_GPL(device_unregister); 2310 2311 static struct device *prev_device(struct klist_iter *i) 2312 { 2313 struct klist_node *n = klist_prev(i); 2314 struct device *dev = NULL; 2315 struct device_private *p; 2316 2317 if (n) { 2318 p = to_device_private_parent(n); 2319 dev = p->device; 2320 } 2321 return dev; 2322 } 2323 2324 static struct device *next_device(struct klist_iter *i) 2325 { 2326 struct klist_node *n = klist_next(i); 2327 struct device *dev = NULL; 2328 struct device_private *p; 2329 2330 if (n) { 2331 p = to_device_private_parent(n); 2332 dev = p->device; 2333 } 2334 return dev; 2335 } 2336 2337 /** 2338 * device_get_devnode - path of device node file 2339 * @dev: device 2340 * @mode: returned file access mode 2341 * @uid: returned file owner 2342 * @gid: returned file group 2343 * @tmp: possibly allocated string 2344 * 2345 * Return the relative path of a possible device node. 2346 * Non-default names may need to allocate a memory to compose 2347 * a name. This memory is returned in tmp and needs to be 2348 * freed by the caller. 2349 */ 2350 const char *device_get_devnode(struct device *dev, 2351 umode_t *mode, kuid_t *uid, kgid_t *gid, 2352 const char **tmp) 2353 { 2354 char *s; 2355 2356 *tmp = NULL; 2357 2358 /* the device type may provide a specific name */ 2359 if (dev->type && dev->type->devnode) 2360 *tmp = dev->type->devnode(dev, mode, uid, gid); 2361 if (*tmp) 2362 return *tmp; 2363 2364 /* the class may provide a specific name */ 2365 if (dev->class && dev->class->devnode) 2366 *tmp = dev->class->devnode(dev, mode); 2367 if (*tmp) 2368 return *tmp; 2369 2370 /* return name without allocation, tmp == NULL */ 2371 if (strchr(dev_name(dev), '!') == NULL) 2372 return dev_name(dev); 2373 2374 /* replace '!' in the name with '/' */ 2375 s = kstrdup(dev_name(dev), GFP_KERNEL); 2376 if (!s) 2377 return NULL; 2378 strreplace(s, '!', '/'); 2379 return *tmp = s; 2380 } 2381 2382 /** 2383 * device_for_each_child - device child iterator. 2384 * @parent: parent struct device. 2385 * @fn: function to be called for each device. 2386 * @data: data for the callback. 2387 * 2388 * Iterate over @parent's child devices, and call @fn for each, 2389 * passing it @data. 2390 * 2391 * We check the return of @fn each time. If it returns anything 2392 * other than 0, we break out and return that value. 2393 */ 2394 int device_for_each_child(struct device *parent, void *data, 2395 int (*fn)(struct device *dev, void *data)) 2396 { 2397 struct klist_iter i; 2398 struct device *child; 2399 int error = 0; 2400 2401 if (!parent->p) 2402 return 0; 2403 2404 klist_iter_init(&parent->p->klist_children, &i); 2405 while (!error && (child = next_device(&i))) 2406 error = fn(child, data); 2407 klist_iter_exit(&i); 2408 return error; 2409 } 2410 EXPORT_SYMBOL_GPL(device_for_each_child); 2411 2412 /** 2413 * device_for_each_child_reverse - device child iterator in reversed order. 2414 * @parent: parent struct device. 2415 * @fn: function to be called for each device. 2416 * @data: data for the callback. 2417 * 2418 * Iterate over @parent's child devices, and call @fn for each, 2419 * passing it @data. 2420 * 2421 * We check the return of @fn each time. If it returns anything 2422 * other than 0, we break out and return that value. 2423 */ 2424 int device_for_each_child_reverse(struct device *parent, void *data, 2425 int (*fn)(struct device *dev, void *data)) 2426 { 2427 struct klist_iter i; 2428 struct device *child; 2429 int error = 0; 2430 2431 if (!parent->p) 2432 return 0; 2433 2434 klist_iter_init(&parent->p->klist_children, &i); 2435 while ((child = prev_device(&i)) && !error) 2436 error = fn(child, data); 2437 klist_iter_exit(&i); 2438 return error; 2439 } 2440 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 2441 2442 /** 2443 * device_find_child - device iterator for locating a particular device. 2444 * @parent: parent struct device 2445 * @match: Callback function to check device 2446 * @data: Data to pass to match function 2447 * 2448 * This is similar to the device_for_each_child() function above, but it 2449 * returns a reference to a device that is 'found' for later use, as 2450 * determined by the @match callback. 2451 * 2452 * The callback should return 0 if the device doesn't match and non-zero 2453 * if it does. If the callback returns non-zero and a reference to the 2454 * current device can be obtained, this function will return to the caller 2455 * and not iterate over any more devices. 2456 * 2457 * NOTE: you will need to drop the reference with put_device() after use. 2458 */ 2459 struct device *device_find_child(struct device *parent, void *data, 2460 int (*match)(struct device *dev, void *data)) 2461 { 2462 struct klist_iter i; 2463 struct device *child; 2464 2465 if (!parent) 2466 return NULL; 2467 2468 klist_iter_init(&parent->p->klist_children, &i); 2469 while ((child = next_device(&i))) 2470 if (match(child, data) && get_device(child)) 2471 break; 2472 klist_iter_exit(&i); 2473 return child; 2474 } 2475 EXPORT_SYMBOL_GPL(device_find_child); 2476 2477 int __init devices_init(void) 2478 { 2479 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); 2480 if (!devices_kset) 2481 return -ENOMEM; 2482 dev_kobj = kobject_create_and_add("dev", NULL); 2483 if (!dev_kobj) 2484 goto dev_kobj_err; 2485 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); 2486 if (!sysfs_dev_block_kobj) 2487 goto block_kobj_err; 2488 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); 2489 if (!sysfs_dev_char_kobj) 2490 goto char_kobj_err; 2491 2492 return 0; 2493 2494 char_kobj_err: 2495 kobject_put(sysfs_dev_block_kobj); 2496 block_kobj_err: 2497 kobject_put(dev_kobj); 2498 dev_kobj_err: 2499 kset_unregister(devices_kset); 2500 return -ENOMEM; 2501 } 2502 2503 static int device_check_offline(struct device *dev, void *not_used) 2504 { 2505 int ret; 2506 2507 ret = device_for_each_child(dev, NULL, device_check_offline); 2508 if (ret) 2509 return ret; 2510 2511 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; 2512 } 2513 2514 /** 2515 * device_offline - Prepare the device for hot-removal. 2516 * @dev: Device to be put offline. 2517 * 2518 * Execute the device bus type's .offline() callback, if present, to prepare 2519 * the device for a subsequent hot-removal. If that succeeds, the device must 2520 * not be used until either it is removed or its bus type's .online() callback 2521 * is executed. 2522 * 2523 * Call under device_hotplug_lock. 2524 */ 2525 int device_offline(struct device *dev) 2526 { 2527 int ret; 2528 2529 if (dev->offline_disabled) 2530 return -EPERM; 2531 2532 ret = device_for_each_child(dev, NULL, device_check_offline); 2533 if (ret) 2534 return ret; 2535 2536 device_lock(dev); 2537 if (device_supports_offline(dev)) { 2538 if (dev->offline) { 2539 ret = 1; 2540 } else { 2541 ret = dev->bus->offline(dev); 2542 if (!ret) { 2543 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 2544 dev->offline = true; 2545 } 2546 } 2547 } 2548 device_unlock(dev); 2549 2550 return ret; 2551 } 2552 2553 /** 2554 * device_online - Put the device back online after successful device_offline(). 2555 * @dev: Device to be put back online. 2556 * 2557 * If device_offline() has been successfully executed for @dev, but the device 2558 * has not been removed subsequently, execute its bus type's .online() callback 2559 * to indicate that the device can be used again. 2560 * 2561 * Call under device_hotplug_lock. 2562 */ 2563 int device_online(struct device *dev) 2564 { 2565 int ret = 0; 2566 2567 device_lock(dev); 2568 if (device_supports_offline(dev)) { 2569 if (dev->offline) { 2570 ret = dev->bus->online(dev); 2571 if (!ret) { 2572 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 2573 dev->offline = false; 2574 } 2575 } else { 2576 ret = 1; 2577 } 2578 } 2579 device_unlock(dev); 2580 2581 return ret; 2582 } 2583 2584 struct root_device { 2585 struct device dev; 2586 struct module *owner; 2587 }; 2588 2589 static inline struct root_device *to_root_device(struct device *d) 2590 { 2591 return container_of(d, struct root_device, dev); 2592 } 2593 2594 static void root_device_release(struct device *dev) 2595 { 2596 kfree(to_root_device(dev)); 2597 } 2598 2599 /** 2600 * __root_device_register - allocate and register a root device 2601 * @name: root device name 2602 * @owner: owner module of the root device, usually THIS_MODULE 2603 * 2604 * This function allocates a root device and registers it 2605 * using device_register(). In order to free the returned 2606 * device, use root_device_unregister(). 2607 * 2608 * Root devices are dummy devices which allow other devices 2609 * to be grouped under /sys/devices. Use this function to 2610 * allocate a root device and then use it as the parent of 2611 * any device which should appear under /sys/devices/{name} 2612 * 2613 * The /sys/devices/{name} directory will also contain a 2614 * 'module' symlink which points to the @owner directory 2615 * in sysfs. 2616 * 2617 * Returns &struct device pointer on success, or ERR_PTR() on error. 2618 * 2619 * Note: You probably want to use root_device_register(). 2620 */ 2621 struct device *__root_device_register(const char *name, struct module *owner) 2622 { 2623 struct root_device *root; 2624 int err = -ENOMEM; 2625 2626 root = kzalloc(sizeof(struct root_device), GFP_KERNEL); 2627 if (!root) 2628 return ERR_PTR(err); 2629 2630 err = dev_set_name(&root->dev, "%s", name); 2631 if (err) { 2632 kfree(root); 2633 return ERR_PTR(err); 2634 } 2635 2636 root->dev.release = root_device_release; 2637 2638 err = device_register(&root->dev); 2639 if (err) { 2640 put_device(&root->dev); 2641 return ERR_PTR(err); 2642 } 2643 2644 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ 2645 if (owner) { 2646 struct module_kobject *mk = &owner->mkobj; 2647 2648 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); 2649 if (err) { 2650 device_unregister(&root->dev); 2651 return ERR_PTR(err); 2652 } 2653 root->owner = owner; 2654 } 2655 #endif 2656 2657 return &root->dev; 2658 } 2659 EXPORT_SYMBOL_GPL(__root_device_register); 2660 2661 /** 2662 * root_device_unregister - unregister and free a root device 2663 * @dev: device going away 2664 * 2665 * This function unregisters and cleans up a device that was created by 2666 * root_device_register(). 2667 */ 2668 void root_device_unregister(struct device *dev) 2669 { 2670 struct root_device *root = to_root_device(dev); 2671 2672 if (root->owner) 2673 sysfs_remove_link(&root->dev.kobj, "module"); 2674 2675 device_unregister(dev); 2676 } 2677 EXPORT_SYMBOL_GPL(root_device_unregister); 2678 2679 2680 static void device_create_release(struct device *dev) 2681 { 2682 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2683 kfree(dev); 2684 } 2685 2686 static __printf(6, 0) struct device * 2687 device_create_groups_vargs(struct class *class, struct device *parent, 2688 dev_t devt, void *drvdata, 2689 const struct attribute_group **groups, 2690 const char *fmt, va_list args) 2691 { 2692 struct device *dev = NULL; 2693 int retval = -ENODEV; 2694 2695 if (class == NULL || IS_ERR(class)) 2696 goto error; 2697 2698 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2699 if (!dev) { 2700 retval = -ENOMEM; 2701 goto error; 2702 } 2703 2704 device_initialize(dev); 2705 dev->devt = devt; 2706 dev->class = class; 2707 dev->parent = parent; 2708 dev->groups = groups; 2709 dev->release = device_create_release; 2710 dev_set_drvdata(dev, drvdata); 2711 2712 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 2713 if (retval) 2714 goto error; 2715 2716 retval = device_add(dev); 2717 if (retval) 2718 goto error; 2719 2720 return dev; 2721 2722 error: 2723 put_device(dev); 2724 return ERR_PTR(retval); 2725 } 2726 2727 /** 2728 * device_create_vargs - creates a device and registers it with sysfs 2729 * @class: pointer to the struct class that this device should be registered to 2730 * @parent: pointer to the parent struct device of this new device, if any 2731 * @devt: the dev_t for the char device to be added 2732 * @drvdata: the data to be added to the device for callbacks 2733 * @fmt: string for the device's name 2734 * @args: va_list for the device's name 2735 * 2736 * This function can be used by char device classes. A struct device 2737 * will be created in sysfs, registered to the specified class. 2738 * 2739 * A "dev" file will be created, showing the dev_t for the device, if 2740 * the dev_t is not 0,0. 2741 * If a pointer to a parent struct device is passed in, the newly created 2742 * struct device will be a child of that device in sysfs. 2743 * The pointer to the struct device will be returned from the call. 2744 * Any further sysfs files that might be required can be created using this 2745 * pointer. 2746 * 2747 * Returns &struct device pointer on success, or ERR_PTR() on error. 2748 * 2749 * Note: the struct class passed to this function must have previously 2750 * been created with a call to class_create(). 2751 */ 2752 struct device *device_create_vargs(struct class *class, struct device *parent, 2753 dev_t devt, void *drvdata, const char *fmt, 2754 va_list args) 2755 { 2756 return device_create_groups_vargs(class, parent, devt, drvdata, NULL, 2757 fmt, args); 2758 } 2759 EXPORT_SYMBOL_GPL(device_create_vargs); 2760 2761 /** 2762 * device_create - creates a device and registers it with sysfs 2763 * @class: pointer to the struct class that this device should be registered to 2764 * @parent: pointer to the parent struct device of this new device, if any 2765 * @devt: the dev_t for the char device to be added 2766 * @drvdata: the data to be added to the device for callbacks 2767 * @fmt: string for the device's name 2768 * 2769 * This function can be used by char device classes. A struct device 2770 * will be created in sysfs, registered to the specified class. 2771 * 2772 * A "dev" file will be created, showing the dev_t for the device, if 2773 * the dev_t is not 0,0. 2774 * If a pointer to a parent struct device is passed in, the newly created 2775 * struct device will be a child of that device in sysfs. 2776 * The pointer to the struct device will be returned from the call. 2777 * Any further sysfs files that might be required can be created using this 2778 * pointer. 2779 * 2780 * Returns &struct device pointer on success, or ERR_PTR() on error. 2781 * 2782 * Note: the struct class passed to this function must have previously 2783 * been created with a call to class_create(). 2784 */ 2785 struct device *device_create(struct class *class, struct device *parent, 2786 dev_t devt, void *drvdata, const char *fmt, ...) 2787 { 2788 va_list vargs; 2789 struct device *dev; 2790 2791 va_start(vargs, fmt); 2792 dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs); 2793 va_end(vargs); 2794 return dev; 2795 } 2796 EXPORT_SYMBOL_GPL(device_create); 2797 2798 /** 2799 * device_create_with_groups - creates a device and registers it with sysfs 2800 * @class: pointer to the struct class that this device should be registered to 2801 * @parent: pointer to the parent struct device of this new device, if any 2802 * @devt: the dev_t for the char device to be added 2803 * @drvdata: the data to be added to the device for callbacks 2804 * @groups: NULL-terminated list of attribute groups to be created 2805 * @fmt: string for the device's name 2806 * 2807 * This function can be used by char device classes. A struct device 2808 * will be created in sysfs, registered to the specified class. 2809 * Additional attributes specified in the groups parameter will also 2810 * be created automatically. 2811 * 2812 * A "dev" file will be created, showing the dev_t for the device, if 2813 * the dev_t is not 0,0. 2814 * If a pointer to a parent struct device is passed in, the newly created 2815 * struct device will be a child of that device in sysfs. 2816 * The pointer to the struct device will be returned from the call. 2817 * Any further sysfs files that might be required can be created using this 2818 * pointer. 2819 * 2820 * Returns &struct device pointer on success, or ERR_PTR() on error. 2821 * 2822 * Note: the struct class passed to this function must have previously 2823 * been created with a call to class_create(). 2824 */ 2825 struct device *device_create_with_groups(struct class *class, 2826 struct device *parent, dev_t devt, 2827 void *drvdata, 2828 const struct attribute_group **groups, 2829 const char *fmt, ...) 2830 { 2831 va_list vargs; 2832 struct device *dev; 2833 2834 va_start(vargs, fmt); 2835 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, 2836 fmt, vargs); 2837 va_end(vargs); 2838 return dev; 2839 } 2840 EXPORT_SYMBOL_GPL(device_create_with_groups); 2841 2842 static int __match_devt(struct device *dev, const void *data) 2843 { 2844 const dev_t *devt = data; 2845 2846 return dev->devt == *devt; 2847 } 2848 2849 /** 2850 * device_destroy - removes a device that was created with device_create() 2851 * @class: pointer to the struct class that this device was registered with 2852 * @devt: the dev_t of the device that was previously registered 2853 * 2854 * This call unregisters and cleans up a device that was created with a 2855 * call to device_create(). 2856 */ 2857 void device_destroy(struct class *class, dev_t devt) 2858 { 2859 struct device *dev; 2860 2861 dev = class_find_device(class, NULL, &devt, __match_devt); 2862 if (dev) { 2863 put_device(dev); 2864 device_unregister(dev); 2865 } 2866 } 2867 EXPORT_SYMBOL_GPL(device_destroy); 2868 2869 /** 2870 * device_rename - renames a device 2871 * @dev: the pointer to the struct device to be renamed 2872 * @new_name: the new name of the device 2873 * 2874 * It is the responsibility of the caller to provide mutual 2875 * exclusion between two different calls of device_rename 2876 * on the same device to ensure that new_name is valid and 2877 * won't conflict with other devices. 2878 * 2879 * Note: Don't call this function. Currently, the networking layer calls this 2880 * function, but that will change. The following text from Kay Sievers offers 2881 * some insight: 2882 * 2883 * Renaming devices is racy at many levels, symlinks and other stuff are not 2884 * replaced atomically, and you get a "move" uevent, but it's not easy to 2885 * connect the event to the old and new device. Device nodes are not renamed at 2886 * all, there isn't even support for that in the kernel now. 2887 * 2888 * In the meantime, during renaming, your target name might be taken by another 2889 * driver, creating conflicts. Or the old name is taken directly after you 2890 * renamed it -- then you get events for the same DEVPATH, before you even see 2891 * the "move" event. It's just a mess, and nothing new should ever rely on 2892 * kernel device renaming. Besides that, it's not even implemented now for 2893 * other things than (driver-core wise very simple) network devices. 2894 * 2895 * We are currently about to change network renaming in udev to completely 2896 * disallow renaming of devices in the same namespace as the kernel uses, 2897 * because we can't solve the problems properly, that arise with swapping names 2898 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only 2899 * be allowed to some other name than eth[0-9]*, for the aforementioned 2900 * reasons. 2901 * 2902 * Make up a "real" name in the driver before you register anything, or add 2903 * some other attributes for userspace to find the device, or use udev to add 2904 * symlinks -- but never rename kernel devices later, it's a complete mess. We 2905 * don't even want to get into that and try to implement the missing pieces in 2906 * the core. We really have other pieces to fix in the driver core mess. :) 2907 */ 2908 int device_rename(struct device *dev, const char *new_name) 2909 { 2910 struct kobject *kobj = &dev->kobj; 2911 char *old_device_name = NULL; 2912 int error; 2913 2914 dev = get_device(dev); 2915 if (!dev) 2916 return -EINVAL; 2917 2918 dev_dbg(dev, "renaming to %s\n", new_name); 2919 2920 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 2921 if (!old_device_name) { 2922 error = -ENOMEM; 2923 goto out; 2924 } 2925 2926 if (dev->class) { 2927 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, 2928 kobj, old_device_name, 2929 new_name, kobject_namespace(kobj)); 2930 if (error) 2931 goto out; 2932 } 2933 2934 error = kobject_rename(kobj, new_name); 2935 if (error) 2936 goto out; 2937 2938 out: 2939 put_device(dev); 2940 2941 kfree(old_device_name); 2942 2943 return error; 2944 } 2945 EXPORT_SYMBOL_GPL(device_rename); 2946 2947 static int device_move_class_links(struct device *dev, 2948 struct device *old_parent, 2949 struct device *new_parent) 2950 { 2951 int error = 0; 2952 2953 if (old_parent) 2954 sysfs_remove_link(&dev->kobj, "device"); 2955 if (new_parent) 2956 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 2957 "device"); 2958 return error; 2959 } 2960 2961 /** 2962 * device_move - moves a device to a new parent 2963 * @dev: the pointer to the struct device to be moved 2964 * @new_parent: the new parent of the device (can be NULL) 2965 * @dpm_order: how to reorder the dpm_list 2966 */ 2967 int device_move(struct device *dev, struct device *new_parent, 2968 enum dpm_order dpm_order) 2969 { 2970 int error; 2971 struct device *old_parent; 2972 struct kobject *new_parent_kobj; 2973 2974 dev = get_device(dev); 2975 if (!dev) 2976 return -EINVAL; 2977 2978 device_pm_lock(); 2979 new_parent = get_device(new_parent); 2980 new_parent_kobj = get_device_parent(dev, new_parent); 2981 if (IS_ERR(new_parent_kobj)) { 2982 error = PTR_ERR(new_parent_kobj); 2983 put_device(new_parent); 2984 goto out; 2985 } 2986 2987 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 2988 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 2989 error = kobject_move(&dev->kobj, new_parent_kobj); 2990 if (error) { 2991 cleanup_glue_dir(dev, new_parent_kobj); 2992 put_device(new_parent); 2993 goto out; 2994 } 2995 old_parent = dev->parent; 2996 dev->parent = new_parent; 2997 if (old_parent) 2998 klist_remove(&dev->p->knode_parent); 2999 if (new_parent) { 3000 klist_add_tail(&dev->p->knode_parent, 3001 &new_parent->p->klist_children); 3002 set_dev_node(dev, dev_to_node(new_parent)); 3003 } 3004 3005 if (dev->class) { 3006 error = device_move_class_links(dev, old_parent, new_parent); 3007 if (error) { 3008 /* We ignore errors on cleanup since we're hosed anyway... */ 3009 device_move_class_links(dev, new_parent, old_parent); 3010 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 3011 if (new_parent) 3012 klist_remove(&dev->p->knode_parent); 3013 dev->parent = old_parent; 3014 if (old_parent) { 3015 klist_add_tail(&dev->p->knode_parent, 3016 &old_parent->p->klist_children); 3017 set_dev_node(dev, dev_to_node(old_parent)); 3018 } 3019 } 3020 cleanup_glue_dir(dev, new_parent_kobj); 3021 put_device(new_parent); 3022 goto out; 3023 } 3024 } 3025 switch (dpm_order) { 3026 case DPM_ORDER_NONE: 3027 break; 3028 case DPM_ORDER_DEV_AFTER_PARENT: 3029 device_pm_move_after(dev, new_parent); 3030 devices_kset_move_after(dev, new_parent); 3031 break; 3032 case DPM_ORDER_PARENT_BEFORE_DEV: 3033 device_pm_move_before(new_parent, dev); 3034 devices_kset_move_before(new_parent, dev); 3035 break; 3036 case DPM_ORDER_DEV_LAST: 3037 device_pm_move_last(dev); 3038 devices_kset_move_last(dev); 3039 break; 3040 } 3041 3042 put_device(old_parent); 3043 out: 3044 device_pm_unlock(); 3045 put_device(dev); 3046 return error; 3047 } 3048 EXPORT_SYMBOL_GPL(device_move); 3049 3050 /** 3051 * device_shutdown - call ->shutdown() on each device to shutdown. 3052 */ 3053 void device_shutdown(void) 3054 { 3055 struct device *dev, *parent; 3056 3057 wait_for_device_probe(); 3058 device_block_probing(); 3059 3060 spin_lock(&devices_kset->list_lock); 3061 /* 3062 * Walk the devices list backward, shutting down each in turn. 3063 * Beware that device unplug events may also start pulling 3064 * devices offline, even as the system is shutting down. 3065 */ 3066 while (!list_empty(&devices_kset->list)) { 3067 dev = list_entry(devices_kset->list.prev, struct device, 3068 kobj.entry); 3069 3070 /* 3071 * hold reference count of device's parent to 3072 * prevent it from being freed because parent's 3073 * lock is to be held 3074 */ 3075 parent = get_device(dev->parent); 3076 get_device(dev); 3077 /* 3078 * Make sure the device is off the kset list, in the 3079 * event that dev->*->shutdown() doesn't remove it. 3080 */ 3081 list_del_init(&dev->kobj.entry); 3082 spin_unlock(&devices_kset->list_lock); 3083 3084 /* hold lock to avoid race with probe/release */ 3085 if (parent) 3086 device_lock(parent); 3087 device_lock(dev); 3088 3089 /* Don't allow any more runtime suspends */ 3090 pm_runtime_get_noresume(dev); 3091 pm_runtime_barrier(dev); 3092 3093 if (dev->class && dev->class->shutdown_pre) { 3094 if (initcall_debug) 3095 dev_info(dev, "shutdown_pre\n"); 3096 dev->class->shutdown_pre(dev); 3097 } 3098 if (dev->bus && dev->bus->shutdown) { 3099 if (initcall_debug) 3100 dev_info(dev, "shutdown\n"); 3101 dev->bus->shutdown(dev); 3102 } else if (dev->driver && dev->driver->shutdown) { 3103 if (initcall_debug) 3104 dev_info(dev, "shutdown\n"); 3105 dev->driver->shutdown(dev); 3106 } 3107 3108 device_unlock(dev); 3109 if (parent) 3110 device_unlock(parent); 3111 3112 put_device(dev); 3113 put_device(parent); 3114 3115 spin_lock(&devices_kset->list_lock); 3116 } 3117 spin_unlock(&devices_kset->list_lock); 3118 } 3119 3120 /* 3121 * Device logging functions 3122 */ 3123 3124 #ifdef CONFIG_PRINTK 3125 static int 3126 create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen) 3127 { 3128 const char *subsys; 3129 size_t pos = 0; 3130 3131 if (dev->class) 3132 subsys = dev->class->name; 3133 else if (dev->bus) 3134 subsys = dev->bus->name; 3135 else 3136 return 0; 3137 3138 pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys); 3139 if (pos >= hdrlen) 3140 goto overflow; 3141 3142 /* 3143 * Add device identifier DEVICE=: 3144 * b12:8 block dev_t 3145 * c127:3 char dev_t 3146 * n8 netdev ifindex 3147 * +sound:card0 subsystem:devname 3148 */ 3149 if (MAJOR(dev->devt)) { 3150 char c; 3151 3152 if (strcmp(subsys, "block") == 0) 3153 c = 'b'; 3154 else 3155 c = 'c'; 3156 pos++; 3157 pos += snprintf(hdr + pos, hdrlen - pos, 3158 "DEVICE=%c%u:%u", 3159 c, MAJOR(dev->devt), MINOR(dev->devt)); 3160 } else if (strcmp(subsys, "net") == 0) { 3161 struct net_device *net = to_net_dev(dev); 3162 3163 pos++; 3164 pos += snprintf(hdr + pos, hdrlen - pos, 3165 "DEVICE=n%u", net->ifindex); 3166 } else { 3167 pos++; 3168 pos += snprintf(hdr + pos, hdrlen - pos, 3169 "DEVICE=+%s:%s", subsys, dev_name(dev)); 3170 } 3171 3172 if (pos >= hdrlen) 3173 goto overflow; 3174 3175 return pos; 3176 3177 overflow: 3178 dev_WARN(dev, "device/subsystem name too long"); 3179 return 0; 3180 } 3181 3182 int dev_vprintk_emit(int level, const struct device *dev, 3183 const char *fmt, va_list args) 3184 { 3185 char hdr[128]; 3186 size_t hdrlen; 3187 3188 hdrlen = create_syslog_header(dev, hdr, sizeof(hdr)); 3189 3190 return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args); 3191 } 3192 EXPORT_SYMBOL(dev_vprintk_emit); 3193 3194 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 3195 { 3196 va_list args; 3197 int r; 3198 3199 va_start(args, fmt); 3200 3201 r = dev_vprintk_emit(level, dev, fmt, args); 3202 3203 va_end(args); 3204 3205 return r; 3206 } 3207 EXPORT_SYMBOL(dev_printk_emit); 3208 3209 static void __dev_printk(const char *level, const struct device *dev, 3210 struct va_format *vaf) 3211 { 3212 if (dev) 3213 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", 3214 dev_driver_string(dev), dev_name(dev), vaf); 3215 else 3216 printk("%s(NULL device *): %pV", level, vaf); 3217 } 3218 3219 void dev_printk(const char *level, const struct device *dev, 3220 const char *fmt, ...) 3221 { 3222 struct va_format vaf; 3223 va_list args; 3224 3225 va_start(args, fmt); 3226 3227 vaf.fmt = fmt; 3228 vaf.va = &args; 3229 3230 __dev_printk(level, dev, &vaf); 3231 3232 va_end(args); 3233 } 3234 EXPORT_SYMBOL(dev_printk); 3235 3236 #define define_dev_printk_level(func, kern_level) \ 3237 void func(const struct device *dev, const char *fmt, ...) \ 3238 { \ 3239 struct va_format vaf; \ 3240 va_list args; \ 3241 \ 3242 va_start(args, fmt); \ 3243 \ 3244 vaf.fmt = fmt; \ 3245 vaf.va = &args; \ 3246 \ 3247 __dev_printk(kern_level, dev, &vaf); \ 3248 \ 3249 va_end(args); \ 3250 } \ 3251 EXPORT_SYMBOL(func); 3252 3253 define_dev_printk_level(_dev_emerg, KERN_EMERG); 3254 define_dev_printk_level(_dev_alert, KERN_ALERT); 3255 define_dev_printk_level(_dev_crit, KERN_CRIT); 3256 define_dev_printk_level(_dev_err, KERN_ERR); 3257 define_dev_printk_level(_dev_warn, KERN_WARNING); 3258 define_dev_printk_level(_dev_notice, KERN_NOTICE); 3259 define_dev_printk_level(_dev_info, KERN_INFO); 3260 3261 #endif 3262 3263 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) 3264 { 3265 return fwnode && !IS_ERR(fwnode->secondary); 3266 } 3267 3268 /** 3269 * set_primary_fwnode - Change the primary firmware node of a given device. 3270 * @dev: Device to handle. 3271 * @fwnode: New primary firmware node of the device. 3272 * 3273 * Set the device's firmware node pointer to @fwnode, but if a secondary 3274 * firmware node of the device is present, preserve it. 3275 */ 3276 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3277 { 3278 if (fwnode) { 3279 struct fwnode_handle *fn = dev->fwnode; 3280 3281 if (fwnode_is_primary(fn)) 3282 fn = fn->secondary; 3283 3284 if (fn) { 3285 WARN_ON(fwnode->secondary); 3286 fwnode->secondary = fn; 3287 } 3288 dev->fwnode = fwnode; 3289 } else { 3290 dev->fwnode = fwnode_is_primary(dev->fwnode) ? 3291 dev->fwnode->secondary : NULL; 3292 } 3293 } 3294 EXPORT_SYMBOL_GPL(set_primary_fwnode); 3295 3296 /** 3297 * set_secondary_fwnode - Change the secondary firmware node of a given device. 3298 * @dev: Device to handle. 3299 * @fwnode: New secondary firmware node of the device. 3300 * 3301 * If a primary firmware node of the device is present, set its secondary 3302 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to 3303 * @fwnode. 3304 */ 3305 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3306 { 3307 if (fwnode) 3308 fwnode->secondary = ERR_PTR(-ENODEV); 3309 3310 if (fwnode_is_primary(dev->fwnode)) 3311 dev->fwnode->secondary = fwnode; 3312 else 3313 dev->fwnode = fwnode; 3314 } 3315 3316 /** 3317 * device_set_of_node_from_dev - reuse device-tree node of another device 3318 * @dev: device whose device-tree node is being set 3319 * @dev2: device whose device-tree node is being reused 3320 * 3321 * Takes another reference to the new device-tree node after first dropping 3322 * any reference held to the old node. 3323 */ 3324 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) 3325 { 3326 of_node_put(dev->of_node); 3327 dev->of_node = of_node_get(dev2->of_node); 3328 dev->of_node_reused = true; 3329 } 3330 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); 3331