1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ioport.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/memblock.h> 22 #include <linux/err.h> 23 #include <linux/slab.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_domain.h> 26 #include <linux/idr.h> 27 #include <linux/acpi.h> 28 #include <linux/clk/clk-conf.h> 29 #include <linux/limits.h> 30 #include <linux/property.h> 31 #include <linux/kmemleak.h> 32 #include <linux/types.h> 33 #include <linux/iommu.h> 34 #include <linux/dma-map-ops.h> 35 36 #include "base.h" 37 #include "power/power.h" 38 39 /* For automatically allocated device IDs */ 40 static DEFINE_IDA(platform_devid_ida); 41 42 struct device platform_bus = { 43 .init_name = "platform", 44 }; 45 EXPORT_SYMBOL_GPL(platform_bus); 46 47 /** 48 * platform_get_resource - get a resource for a device 49 * @dev: platform device 50 * @type: resource type 51 * @num: resource index 52 * 53 * Return: a pointer to the resource or NULL on failure. 54 */ 55 struct resource *platform_get_resource(struct platform_device *dev, 56 unsigned int type, unsigned int num) 57 { 58 u32 i; 59 60 for (i = 0; i < dev->num_resources; i++) { 61 struct resource *r = &dev->resource[i]; 62 63 if (type == resource_type(r) && num-- == 0) 64 return r; 65 } 66 return NULL; 67 } 68 EXPORT_SYMBOL_GPL(platform_get_resource); 69 70 struct resource *platform_get_mem_or_io(struct platform_device *dev, 71 unsigned int num) 72 { 73 u32 i; 74 75 for (i = 0; i < dev->num_resources; i++) { 76 struct resource *r = &dev->resource[i]; 77 78 if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) 79 return r; 80 } 81 return NULL; 82 } 83 EXPORT_SYMBOL_GPL(platform_get_mem_or_io); 84 85 #ifdef CONFIG_HAS_IOMEM 86 /** 87 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a 88 * platform device and get resource 89 * 90 * @pdev: platform device to use both for memory resource lookup as well as 91 * resource management 92 * @index: resource index 93 * @res: optional output parameter to store a pointer to the obtained resource. 94 * 95 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 96 * on failure. 97 */ 98 void __iomem * 99 devm_platform_get_and_ioremap_resource(struct platform_device *pdev, 100 unsigned int index, struct resource **res) 101 { 102 struct resource *r; 103 104 r = platform_get_resource(pdev, IORESOURCE_MEM, index); 105 if (res) 106 *res = r; 107 return devm_ioremap_resource(&pdev->dev, r); 108 } 109 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); 110 111 /** 112 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 113 * device 114 * 115 * @pdev: platform device to use both for memory resource lookup as well as 116 * resource management 117 * @index: resource index 118 * 119 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 120 * on failure. 121 */ 122 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 123 unsigned int index) 124 { 125 return devm_platform_get_and_ioremap_resource(pdev, index, NULL); 126 } 127 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 128 129 /** 130 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for 131 * a platform device, retrieve the 132 * resource by name 133 * 134 * @pdev: platform device to use both for memory resource lookup as well as 135 * resource management 136 * @name: name of the resource 137 * 138 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 139 * on failure. 140 */ 141 void __iomem * 142 devm_platform_ioremap_resource_byname(struct platform_device *pdev, 143 const char *name) 144 { 145 struct resource *res; 146 147 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 148 return devm_ioremap_resource(&pdev->dev, res); 149 } 150 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); 151 #endif /* CONFIG_HAS_IOMEM */ 152 153 static const struct cpumask *get_irq_affinity(struct platform_device *dev, 154 unsigned int num) 155 { 156 const struct cpumask *mask = NULL; 157 #ifndef CONFIG_SPARC 158 struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); 159 160 if (is_of_node(fwnode)) 161 mask = of_irq_get_affinity(to_of_node(fwnode), num); 162 else if (is_acpi_device_node(fwnode)) 163 mask = acpi_irq_get_affinity(ACPI_HANDLE_FWNODE(fwnode), num); 164 #endif 165 166 return mask ?: cpu_possible_mask; 167 } 168 169 /** 170 * platform_get_irq_affinity - get an optional IRQ and its affinity for a device 171 * @dev: platform device 172 * @num: interrupt number index 173 * @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt 174 * 175 * Gets an interupt for a platform device. Device drivers should check the 176 * return value for errors so as to not pass a negative integer value to 177 * the request_irq() APIs. Optional affinity information is provided in the 178 * affinity pointer if available, and NULL otherwise. 179 * 180 * Return: non-zero interrupt number on success, negative error number on failure. 181 */ 182 int platform_get_irq_affinity(struct platform_device *dev, unsigned int num, 183 const struct cpumask **affinity) 184 { 185 int ret; 186 #ifdef CONFIG_SPARC 187 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 188 if (!dev || num >= dev->archdata.num_irqs) 189 goto out_not_found; 190 ret = dev->archdata.irqs[num]; 191 goto out; 192 #else 193 struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); 194 struct resource *r; 195 196 if (is_of_node(fwnode)) { 197 ret = of_irq_get(to_of_node(fwnode), num); 198 if (ret > 0 || ret == -EPROBE_DEFER) 199 goto out; 200 } 201 202 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 203 if (is_acpi_device_node(fwnode)) { 204 if (r && r->flags & IORESOURCE_DISABLED) { 205 ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), num, r); 206 if (ret) 207 goto out; 208 } 209 } 210 211 /* 212 * The resources may pass trigger flags to the irqs that need 213 * to be set up. It so happens that the trigger flags for 214 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 215 * settings. 216 */ 217 if (r && r->flags & IORESOURCE_BITS) { 218 struct irq_data *irqd; 219 220 irqd = irq_get_irq_data(r->start); 221 if (!irqd) 222 goto out_not_found; 223 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 224 } 225 226 if (r) { 227 ret = r->start; 228 goto out; 229 } 230 231 /* 232 * For the index 0 interrupt, allow falling back to GpioInt 233 * resources. While a device could have both Interrupt and GpioInt 234 * resources, making this fallback ambiguous, in many common cases 235 * the device will only expose one IRQ, and this fallback 236 * allows a common code path across either kind of resource. 237 */ 238 if (num == 0 && is_acpi_device_node(fwnode)) { 239 ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), num); 240 /* Our callers expect -ENXIO for missing IRQs. */ 241 if (ret >= 0 || ret == -EPROBE_DEFER) 242 goto out; 243 } 244 245 #endif 246 out_not_found: 247 ret = -ENXIO; 248 out: 249 if (WARN(!ret, "0 is an invalid IRQ number\n")) 250 return -EINVAL; 251 252 if (ret > 0 && affinity) 253 *affinity = get_irq_affinity(dev, num); 254 255 return ret; 256 } 257 EXPORT_SYMBOL_GPL(platform_get_irq_affinity); 258 259 /** 260 * platform_get_irq_optional - get an optional interrupt for a device 261 * @dev: platform device 262 * @num: interrupt number index 263 * 264 * Gets an interrupt for a platform device. Device drivers should check the 265 * return value for errors so as to not pass a negative integer value to 266 * the request_irq() APIs. This is the same as platform_get_irq(), except 267 * that it does not print an error message if an interrupt can not be 268 * obtained. 269 * 270 * For example:: 271 * 272 * int irq = platform_get_irq_optional(pdev, 0); 273 * if (irq < 0) 274 * return irq; 275 * 276 * Return: non-zero interrupt number on success, negative error number on failure. 277 */ 278 int platform_get_irq_optional(struct platform_device *dev, unsigned int num) 279 { 280 return platform_get_irq_affinity(dev, num, NULL); 281 } 282 EXPORT_SYMBOL_GPL(platform_get_irq_optional); 283 284 /** 285 * platform_get_irq - get an IRQ for a device 286 * @dev: platform device 287 * @num: IRQ number index 288 * 289 * Gets an IRQ for a platform device and prints an error message if finding the 290 * IRQ fails. Device drivers should check the return value for errors so as to 291 * not pass a negative integer value to the request_irq() APIs. 292 * 293 * For example:: 294 * 295 * int irq = platform_get_irq(pdev, 0); 296 * if (irq < 0) 297 * return irq; 298 * 299 * Return: non-zero IRQ number on success, negative error number on failure. 300 */ 301 int platform_get_irq(struct platform_device *dev, unsigned int num) 302 { 303 int ret; 304 305 ret = platform_get_irq_optional(dev, num); 306 if (ret < 0) 307 return dev_err_probe(&dev->dev, ret, 308 "IRQ index %u not found\n", num); 309 310 return ret; 311 } 312 EXPORT_SYMBOL_GPL(platform_get_irq); 313 314 /** 315 * platform_irq_count - Count the number of IRQs a platform device uses 316 * @dev: platform device 317 * 318 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 319 */ 320 int platform_irq_count(struct platform_device *dev) 321 { 322 int ret, nr = 0; 323 324 while ((ret = platform_get_irq_optional(dev, nr)) >= 0) 325 nr++; 326 327 if (ret == -EPROBE_DEFER) 328 return ret; 329 330 return nr; 331 } 332 EXPORT_SYMBOL_GPL(platform_irq_count); 333 334 struct irq_affinity_devres { 335 unsigned int count; 336 unsigned int irq[] __counted_by(count); 337 }; 338 339 static void platform_disable_acpi_irq(struct platform_device *pdev, int index) 340 { 341 struct resource *r; 342 343 r = platform_get_resource(pdev, IORESOURCE_IRQ, index); 344 if (r) 345 irqresource_disabled(r, 0); 346 } 347 348 static void devm_platform_get_irqs_affinity_release(struct device *dev, 349 void *res) 350 { 351 struct irq_affinity_devres *ptr = res; 352 int i; 353 354 for (i = 0; i < ptr->count; i++) { 355 irq_dispose_mapping(ptr->irq[i]); 356 357 if (is_acpi_device_node(dev_fwnode(dev))) 358 platform_disable_acpi_irq(to_platform_device(dev), i); 359 } 360 } 361 362 /** 363 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a 364 * device using an interrupt affinity descriptor 365 * @dev: platform device pointer 366 * @affd: affinity descriptor 367 * @minvec: minimum count of interrupt vectors 368 * @maxvec: maximum count of interrupt vectors 369 * @irqs: pointer holder for IRQ numbers 370 * 371 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according 372 * to the passed affinity descriptor 373 * 374 * Return: Number of vectors on success, negative error number on failure. 375 */ 376 int devm_platform_get_irqs_affinity(struct platform_device *dev, 377 struct irq_affinity *affd, 378 unsigned int minvec, 379 unsigned int maxvec, 380 int **irqs) 381 { 382 struct irq_affinity_devres *ptr; 383 struct irq_affinity_desc *desc; 384 size_t size; 385 int i, ret, nvec; 386 387 if (!affd) 388 return -EPERM; 389 390 if (maxvec < minvec) 391 return -ERANGE; 392 393 nvec = platform_irq_count(dev); 394 if (nvec < 0) 395 return nvec; 396 397 if (nvec < minvec) 398 return -ENOSPC; 399 400 nvec = irq_calc_affinity_vectors(minvec, nvec, affd); 401 if (nvec < minvec) 402 return -ENOSPC; 403 404 if (nvec > maxvec) 405 nvec = maxvec; 406 407 size = sizeof(*ptr) + sizeof(unsigned int) * nvec; 408 ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, 409 GFP_KERNEL); 410 if (!ptr) 411 return -ENOMEM; 412 413 ptr->count = nvec; 414 415 for (i = 0; i < nvec; i++) { 416 int irq = platform_get_irq(dev, i); 417 if (irq < 0) { 418 ret = irq; 419 goto err_free_devres; 420 } 421 ptr->irq[i] = irq; 422 } 423 424 desc = irq_create_affinity_masks(nvec, affd); 425 if (!desc) { 426 ret = -ENOMEM; 427 goto err_free_devres; 428 } 429 430 for (i = 0; i < nvec; i++) { 431 ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]); 432 if (ret) { 433 dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n", 434 ptr->irq[i], ret); 435 goto err_free_desc; 436 } 437 } 438 439 devres_add(&dev->dev, ptr); 440 441 kfree(desc); 442 443 *irqs = ptr->irq; 444 445 return nvec; 446 447 err_free_desc: 448 kfree(desc); 449 err_free_devres: 450 devres_free(ptr); 451 return ret; 452 } 453 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); 454 455 /** 456 * platform_get_resource_byname - get a resource for a device by name 457 * @dev: platform device 458 * @type: resource type 459 * @name: resource name 460 */ 461 struct resource *platform_get_resource_byname(struct platform_device *dev, 462 unsigned int type, 463 const char *name) 464 { 465 u32 i; 466 467 for (i = 0; i < dev->num_resources; i++) { 468 struct resource *r = &dev->resource[i]; 469 470 if (unlikely(!r->name)) 471 continue; 472 473 if (type == resource_type(r) && !strcmp(r->name, name)) 474 return r; 475 } 476 return NULL; 477 } 478 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 479 480 static int __platform_get_irq_byname(struct platform_device *dev, 481 const char *name) 482 { 483 struct resource *r; 484 int ret; 485 486 ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); 487 if (ret > 0 || ret == -EPROBE_DEFER) 488 return ret; 489 490 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 491 if (r) { 492 if (WARN(!r->start, "0 is an invalid IRQ number\n")) 493 return -EINVAL; 494 return r->start; 495 } 496 497 return -ENXIO; 498 } 499 500 /** 501 * platform_get_irq_byname - get an IRQ for a device by name 502 * @dev: platform device 503 * @name: IRQ name 504 * 505 * Get an IRQ like platform_get_irq(), but then by name rather then by index. 506 * 507 * Return: non-zero IRQ number on success, negative error number on failure. 508 */ 509 int platform_get_irq_byname(struct platform_device *dev, const char *name) 510 { 511 int ret; 512 513 ret = __platform_get_irq_byname(dev, name); 514 if (ret < 0) 515 return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n", 516 name); 517 return ret; 518 } 519 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 520 521 /** 522 * platform_get_irq_byname_optional - get an optional IRQ for a device by name 523 * @dev: platform device 524 * @name: IRQ name 525 * 526 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it 527 * does not print an error message if an IRQ can not be obtained. 528 * 529 * Return: non-zero IRQ number on success, negative error number on failure. 530 */ 531 int platform_get_irq_byname_optional(struct platform_device *dev, 532 const char *name) 533 { 534 return __platform_get_irq_byname(dev, name); 535 } 536 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); 537 538 /** 539 * platform_add_devices - add a numbers of platform devices 540 * @devs: array of platform devices to add 541 * @num: number of platform devices in array 542 * 543 * Return: 0 on success, negative error number on failure. 544 */ 545 int platform_add_devices(struct platform_device **devs, int num) 546 { 547 int i, ret = 0; 548 549 for (i = 0; i < num; i++) { 550 ret = platform_device_register(devs[i]); 551 if (ret) { 552 while (--i >= 0) 553 platform_device_unregister(devs[i]); 554 break; 555 } 556 } 557 558 return ret; 559 } 560 EXPORT_SYMBOL_GPL(platform_add_devices); 561 562 struct platform_object { 563 struct platform_device pdev; 564 char name[]; 565 }; 566 567 /* 568 * Set up default DMA mask for platform devices if the they weren't 569 * previously set by the architecture / DT. 570 */ 571 static void setup_pdev_dma_masks(struct platform_device *pdev) 572 { 573 pdev->dev.dma_parms = &pdev->dma_parms; 574 575 if (!pdev->dev.coherent_dma_mask) 576 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 577 if (!pdev->dev.dma_mask) { 578 pdev->platform_dma_mask = DMA_BIT_MASK(32); 579 pdev->dev.dma_mask = &pdev->platform_dma_mask; 580 } 581 }; 582 583 /** 584 * platform_device_put - destroy a platform device 585 * @pdev: platform device to free 586 * 587 * Free all memory associated with a platform device. This function must 588 * _only_ be externally called in error cases. All other usage is a bug. 589 */ 590 void platform_device_put(struct platform_device *pdev) 591 { 592 if (!IS_ERR_OR_NULL(pdev)) 593 put_device(&pdev->dev); 594 } 595 EXPORT_SYMBOL_GPL(platform_device_put); 596 597 static void platform_device_release(struct device *dev) 598 { 599 struct platform_object *pa = container_of(dev, struct platform_object, 600 pdev.dev); 601 602 of_node_put(pa->pdev.dev.of_node); 603 kfree(pa->pdev.dev.platform_data); 604 kfree(pa->pdev.mfd_cell); 605 kfree(pa->pdev.resource); 606 kfree(pa); 607 } 608 609 /** 610 * platform_device_alloc - create a platform device 611 * @name: base name of the device we're adding 612 * @id: instance id 613 * 614 * Create a platform device object which can have other objects attached 615 * to it, and which will have attached objects freed when it is released. 616 */ 617 struct platform_device *platform_device_alloc(const char *name, int id) 618 { 619 struct platform_object *pa; 620 621 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 622 if (pa) { 623 strcpy(pa->name, name); 624 pa->pdev.name = pa->name; 625 pa->pdev.id = id; 626 device_initialize(&pa->pdev.dev); 627 pa->pdev.dev.release = platform_device_release; 628 setup_pdev_dma_masks(&pa->pdev); 629 } 630 631 return pa ? &pa->pdev : NULL; 632 } 633 EXPORT_SYMBOL_GPL(platform_device_alloc); 634 635 /** 636 * platform_device_add_resources - add resources to a platform device 637 * @pdev: platform device allocated by platform_device_alloc to add resources to 638 * @res: set of resources that needs to be allocated for the device 639 * @num: number of resources 640 * 641 * Add a copy of the resources to the platform device. The memory 642 * associated with the resources will be freed when the platform device is 643 * released. 644 */ 645 int platform_device_add_resources(struct platform_device *pdev, 646 const struct resource *res, unsigned int num) 647 { 648 struct resource *r = NULL; 649 650 if (res) { 651 r = kmemdup_array(res, num, sizeof(*r), GFP_KERNEL); 652 if (!r) 653 return -ENOMEM; 654 } 655 656 kfree(pdev->resource); 657 pdev->resource = r; 658 pdev->num_resources = num; 659 return 0; 660 } 661 EXPORT_SYMBOL_GPL(platform_device_add_resources); 662 663 /** 664 * platform_device_add_data - add platform-specific data to a platform device 665 * @pdev: platform device allocated by platform_device_alloc to add resources to 666 * @data: platform specific data for this platform device 667 * @size: size of platform specific data 668 * 669 * Add a copy of platform specific data to the platform device's 670 * platform_data pointer. The memory associated with the platform data 671 * will be freed when the platform device is released. 672 */ 673 int platform_device_add_data(struct platform_device *pdev, const void *data, 674 size_t size) 675 { 676 void *d = NULL; 677 678 if (data) { 679 d = kmemdup(data, size, GFP_KERNEL); 680 if (!d) 681 return -ENOMEM; 682 } 683 684 kfree(pdev->dev.platform_data); 685 pdev->dev.platform_data = d; 686 return 0; 687 } 688 EXPORT_SYMBOL_GPL(platform_device_add_data); 689 690 /** 691 * platform_device_add - add a platform device to device hierarchy 692 * @pdev: platform device we're adding 693 * 694 * This is part 2 of platform_device_register(), though may be called 695 * separately _iff_ pdev was allocated by platform_device_alloc(). 696 */ 697 int platform_device_add(struct platform_device *pdev) 698 { 699 struct device *dev = &pdev->dev; 700 u32 i; 701 int ret; 702 703 if (!dev->parent) 704 dev->parent = &platform_bus; 705 706 dev->bus = &platform_bus_type; 707 708 switch (pdev->id) { 709 default: 710 dev_set_name(dev, "%s.%d", pdev->name, pdev->id); 711 break; 712 case PLATFORM_DEVID_NONE: 713 dev_set_name(dev, "%s", pdev->name); 714 break; 715 case PLATFORM_DEVID_AUTO: 716 /* 717 * Automatically allocated device ID. We mark it as such so 718 * that we remember it must be freed, and we append a suffix 719 * to avoid namespace collision with explicit IDs. 720 */ 721 ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); 722 if (ret < 0) 723 return ret; 724 pdev->id = ret; 725 pdev->id_auto = true; 726 dev_set_name(dev, "%s.%d.auto", pdev->name, pdev->id); 727 break; 728 } 729 730 for (i = 0; i < pdev->num_resources; i++) { 731 struct resource *p, *r = &pdev->resource[i]; 732 733 if (r->name == NULL) 734 r->name = dev_name(dev); 735 736 p = r->parent; 737 if (!p) { 738 if (resource_type(r) == IORESOURCE_MEM) 739 p = &iomem_resource; 740 else if (resource_type(r) == IORESOURCE_IO) 741 p = &ioport_resource; 742 } 743 744 if (p) { 745 ret = insert_resource(p, r); 746 if (ret) { 747 dev_err(dev, "failed to claim resource %d: %pR\n", i, r); 748 goto failed; 749 } 750 } 751 } 752 753 pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(dev), 754 dev_name(dev->parent)); 755 756 ret = device_add(dev); 757 if (ret) 758 goto failed; 759 760 return 0; 761 762 failed: 763 if (pdev->id_auto) { 764 ida_free(&platform_devid_ida, pdev->id); 765 pdev->id = PLATFORM_DEVID_AUTO; 766 } 767 768 while (i--) { 769 struct resource *r = &pdev->resource[i]; 770 if (r->parent) 771 release_resource(r); 772 } 773 774 return ret; 775 } 776 EXPORT_SYMBOL_GPL(platform_device_add); 777 778 /** 779 * platform_device_del - remove a platform-level device 780 * @pdev: platform device we're removing 781 * 782 * Note that this function will also release all memory- and port-based 783 * resources owned by the device (@dev->resource). This function must 784 * _only_ be externally called in error cases. All other usage is a bug. 785 */ 786 void platform_device_del(struct platform_device *pdev) 787 { 788 u32 i; 789 790 if (!IS_ERR_OR_NULL(pdev)) { 791 device_del(&pdev->dev); 792 793 if (pdev->id_auto) { 794 ida_free(&platform_devid_ida, pdev->id); 795 pdev->id = PLATFORM_DEVID_AUTO; 796 } 797 798 for (i = 0; i < pdev->num_resources; i++) { 799 struct resource *r = &pdev->resource[i]; 800 if (r->parent) 801 release_resource(r); 802 } 803 } 804 } 805 EXPORT_SYMBOL_GPL(platform_device_del); 806 807 /** 808 * platform_device_register - add a platform-level device 809 * @pdev: platform device we're adding 810 * 811 * NOTE: _Never_ directly free @pdev after calling this function, even if it 812 * returned an error! Always use platform_device_put() to give up the 813 * reference initialised in this function instead. 814 */ 815 int platform_device_register(struct platform_device *pdev) 816 { 817 device_initialize(&pdev->dev); 818 setup_pdev_dma_masks(pdev); 819 return platform_device_add(pdev); 820 } 821 EXPORT_SYMBOL_GPL(platform_device_register); 822 823 /** 824 * platform_device_unregister - unregister a platform-level device 825 * @pdev: platform device we're unregistering 826 * 827 * Unregistration is done in 2 steps. First we release all resources 828 * and remove it from the subsystem, then we drop reference count by 829 * calling platform_device_put(). 830 */ 831 void platform_device_unregister(struct platform_device *pdev) 832 { 833 platform_device_del(pdev); 834 platform_device_put(pdev); 835 } 836 EXPORT_SYMBOL_GPL(platform_device_unregister); 837 838 /** 839 * platform_device_register_full - add a platform-level device with 840 * resources and platform-specific data 841 * 842 * @pdevinfo: data used to create device 843 * 844 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 845 */ 846 struct platform_device *platform_device_register_full( 847 const struct platform_device_info *pdevinfo) 848 { 849 int ret; 850 struct platform_device *pdev; 851 852 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 853 if (!pdev) 854 return ERR_PTR(-ENOMEM); 855 856 pdev->dev.parent = pdevinfo->parent; 857 pdev->dev.fwnode = pdevinfo->fwnode; 858 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 859 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 860 861 if (pdevinfo->dma_mask) { 862 pdev->platform_dma_mask = pdevinfo->dma_mask; 863 pdev->dev.dma_mask = &pdev->platform_dma_mask; 864 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 865 } 866 867 ret = platform_device_add_resources(pdev, 868 pdevinfo->res, pdevinfo->num_res); 869 if (ret) 870 goto err; 871 872 ret = platform_device_add_data(pdev, 873 pdevinfo->data, pdevinfo->size_data); 874 if (ret) 875 goto err; 876 877 if (pdevinfo->properties) { 878 ret = device_create_managed_software_node(&pdev->dev, 879 pdevinfo->properties, NULL); 880 if (ret) 881 goto err; 882 } 883 884 ret = platform_device_add(pdev); 885 if (ret) { 886 err: 887 ACPI_COMPANION_SET(&pdev->dev, NULL); 888 platform_device_put(pdev); 889 return ERR_PTR(ret); 890 } 891 892 return pdev; 893 } 894 EXPORT_SYMBOL_GPL(platform_device_register_full); 895 896 /** 897 * __platform_driver_register - register a driver for platform-level devices 898 * @drv: platform driver structure 899 * @owner: owning module/driver 900 */ 901 int __platform_driver_register(struct platform_driver *drv, 902 struct module *owner) 903 { 904 drv->driver.owner = owner; 905 drv->driver.bus = &platform_bus_type; 906 907 return driver_register(&drv->driver); 908 } 909 EXPORT_SYMBOL_GPL(__platform_driver_register); 910 911 /** 912 * platform_driver_unregister - unregister a driver for platform-level devices 913 * @drv: platform driver structure 914 */ 915 void platform_driver_unregister(struct platform_driver *drv) 916 { 917 driver_unregister(&drv->driver); 918 } 919 EXPORT_SYMBOL_GPL(platform_driver_unregister); 920 921 static int platform_probe_fail(struct platform_device *pdev) 922 { 923 return -ENXIO; 924 } 925 926 static int is_bound_to_driver(struct device *dev, void *driver) 927 { 928 if (dev->driver == driver) 929 return 1; 930 return 0; 931 } 932 933 /** 934 * __platform_driver_probe - register driver for non-hotpluggable device 935 * @drv: platform driver structure 936 * @probe: the driver probe routine, probably from an __init section 937 * @module: module which will be the owner of the driver 938 * 939 * Use this instead of platform_driver_register() when you know the device 940 * is not hotpluggable and has already been registered, and you want to 941 * remove its run-once probe() infrastructure from memory after the driver 942 * has bound to the device. 943 * 944 * One typical use for this would be with drivers for controllers integrated 945 * into system-on-chip processors, where the controller devices have been 946 * configured as part of board setup. 947 * 948 * Note that this is incompatible with deferred probing. 949 * 950 * Returns zero if the driver registered and bound to a device, else returns 951 * a negative error code and with the driver not registered. 952 */ 953 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 954 int (*probe)(struct platform_device *), struct module *module) 955 { 956 int retval; 957 958 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 959 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 960 drv->driver.name, __func__); 961 return -EINVAL; 962 } 963 964 /* 965 * We have to run our probes synchronously because we check if 966 * we find any devices to bind to and exit with error if there 967 * are any. 968 */ 969 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 970 971 /* 972 * Prevent driver from requesting probe deferral to avoid further 973 * futile probe attempts. 974 */ 975 drv->prevent_deferred_probe = true; 976 977 /* make sure driver won't have bind/unbind attributes */ 978 drv->driver.suppress_bind_attrs = true; 979 980 /* temporary section violation during probe() */ 981 drv->probe = probe; 982 retval = __platform_driver_register(drv, module); 983 if (retval) 984 return retval; 985 986 /* Force all new probes of this driver to fail */ 987 drv->probe = platform_probe_fail; 988 989 /* Walk all platform devices and see if any actually bound to this driver. 990 * If not, return an error as the device should have done so by now. 991 */ 992 if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) { 993 retval = -ENODEV; 994 platform_driver_unregister(drv); 995 } 996 997 return retval; 998 } 999 EXPORT_SYMBOL_GPL(__platform_driver_probe); 1000 1001 /** 1002 * __platform_create_bundle - register driver and create corresponding device 1003 * @driver: platform driver structure 1004 * @probe: the driver probe routine, probably from an __init section 1005 * @res: set of resources that needs to be allocated for the device 1006 * @n_res: number of resources 1007 * @data: platform specific data for this platform device 1008 * @size: size of platform specific data 1009 * @module: module which will be the owner of the driver 1010 * 1011 * Use this in legacy-style modules that probe hardware directly and 1012 * register a single platform device and corresponding platform driver. 1013 * 1014 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 1015 */ 1016 struct platform_device * __init_or_module __platform_create_bundle( 1017 struct platform_driver *driver, 1018 int (*probe)(struct platform_device *), 1019 struct resource *res, unsigned int n_res, 1020 const void *data, size_t size, struct module *module) 1021 { 1022 struct platform_device *pdev; 1023 int error; 1024 1025 pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE); 1026 if (!pdev) { 1027 error = -ENOMEM; 1028 goto err_out; 1029 } 1030 1031 error = platform_device_add_resources(pdev, res, n_res); 1032 if (error) 1033 goto err_pdev_put; 1034 1035 error = platform_device_add_data(pdev, data, size); 1036 if (error) 1037 goto err_pdev_put; 1038 1039 error = platform_device_add(pdev); 1040 if (error) 1041 goto err_pdev_put; 1042 1043 error = __platform_driver_probe(driver, probe, module); 1044 if (error) 1045 goto err_pdev_del; 1046 1047 return pdev; 1048 1049 err_pdev_del: 1050 platform_device_del(pdev); 1051 err_pdev_put: 1052 platform_device_put(pdev); 1053 err_out: 1054 return ERR_PTR(error); 1055 } 1056 EXPORT_SYMBOL_GPL(__platform_create_bundle); 1057 1058 /** 1059 * __platform_register_drivers - register an array of platform drivers 1060 * @drivers: an array of drivers to register 1061 * @count: the number of drivers to register 1062 * @owner: module owning the drivers 1063 * 1064 * Registers platform drivers specified by an array. On failure to register a 1065 * driver, all previously registered drivers will be unregistered. Callers of 1066 * this API should use platform_unregister_drivers() to unregister drivers in 1067 * the reverse order. 1068 * 1069 * Returns: 0 on success or a negative error code on failure. 1070 */ 1071 int __platform_register_drivers(struct platform_driver * const *drivers, 1072 unsigned int count, struct module *owner) 1073 { 1074 unsigned int i; 1075 int err; 1076 1077 for (i = 0; i < count; i++) { 1078 pr_debug("registering platform driver %ps\n", drivers[i]); 1079 1080 err = __platform_driver_register(drivers[i], owner); 1081 if (err < 0) { 1082 pr_err("failed to register platform driver %ps: %d\n", 1083 drivers[i], err); 1084 goto error; 1085 } 1086 } 1087 1088 return 0; 1089 1090 error: 1091 while (i--) { 1092 pr_debug("unregistering platform driver %ps\n", drivers[i]); 1093 platform_driver_unregister(drivers[i]); 1094 } 1095 1096 return err; 1097 } 1098 EXPORT_SYMBOL_GPL(__platform_register_drivers); 1099 1100 /** 1101 * platform_unregister_drivers - unregister an array of platform drivers 1102 * @drivers: an array of drivers to unregister 1103 * @count: the number of drivers to unregister 1104 * 1105 * Unregisters platform drivers specified by an array. This is typically used 1106 * to complement an earlier call to platform_register_drivers(). Drivers are 1107 * unregistered in the reverse order in which they were registered. 1108 */ 1109 void platform_unregister_drivers(struct platform_driver * const *drivers, 1110 unsigned int count) 1111 { 1112 while (count--) { 1113 pr_debug("unregistering platform driver %ps\n", drivers[count]); 1114 platform_driver_unregister(drivers[count]); 1115 } 1116 } 1117 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 1118 1119 static const struct platform_device_id *platform_match_id( 1120 const struct platform_device_id *id, 1121 struct platform_device *pdev) 1122 { 1123 while (id->name[0]) { 1124 if (strcmp(pdev->name, id->name) == 0) { 1125 pdev->id_entry = id; 1126 return id; 1127 } 1128 id++; 1129 } 1130 return NULL; 1131 } 1132 1133 #ifdef CONFIG_PM_SLEEP 1134 1135 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1136 { 1137 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1138 struct platform_device *pdev = to_platform_device(dev); 1139 int ret = 0; 1140 1141 if (dev->driver && pdrv->suspend) 1142 ret = pdrv->suspend(pdev, mesg); 1143 1144 return ret; 1145 } 1146 1147 static int platform_legacy_resume(struct device *dev) 1148 { 1149 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1150 struct platform_device *pdev = to_platform_device(dev); 1151 int ret = 0; 1152 1153 if (dev->driver && pdrv->resume) 1154 ret = pdrv->resume(pdev); 1155 1156 return ret; 1157 } 1158 1159 #endif /* CONFIG_PM_SLEEP */ 1160 1161 #ifdef CONFIG_SUSPEND 1162 1163 int platform_pm_suspend(struct device *dev) 1164 { 1165 const struct device_driver *drv = dev->driver; 1166 int ret = 0; 1167 1168 if (!drv) 1169 return 0; 1170 1171 if (drv->pm) { 1172 if (drv->pm->suspend) 1173 ret = drv->pm->suspend(dev); 1174 } else { 1175 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1176 } 1177 1178 return ret; 1179 } 1180 1181 int platform_pm_resume(struct device *dev) 1182 { 1183 const struct device_driver *drv = dev->driver; 1184 int ret = 0; 1185 1186 if (!drv) 1187 return 0; 1188 1189 if (drv->pm) { 1190 if (drv->pm->resume) 1191 ret = drv->pm->resume(dev); 1192 } else { 1193 ret = platform_legacy_resume(dev); 1194 } 1195 1196 return ret; 1197 } 1198 1199 #endif /* CONFIG_SUSPEND */ 1200 1201 #ifdef CONFIG_HIBERNATE_CALLBACKS 1202 1203 int platform_pm_freeze(struct device *dev) 1204 { 1205 const struct device_driver *drv = dev->driver; 1206 int ret = 0; 1207 1208 if (!drv) 1209 return 0; 1210 1211 if (drv->pm) { 1212 if (drv->pm->freeze) 1213 ret = drv->pm->freeze(dev); 1214 } else { 1215 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1216 } 1217 1218 return ret; 1219 } 1220 1221 int platform_pm_thaw(struct device *dev) 1222 { 1223 const struct device_driver *drv = dev->driver; 1224 int ret = 0; 1225 1226 if (!drv) 1227 return 0; 1228 1229 if (drv->pm) { 1230 if (drv->pm->thaw) 1231 ret = drv->pm->thaw(dev); 1232 } else { 1233 ret = platform_legacy_resume(dev); 1234 } 1235 1236 return ret; 1237 } 1238 1239 int platform_pm_poweroff(struct device *dev) 1240 { 1241 const struct device_driver *drv = dev->driver; 1242 int ret = 0; 1243 1244 if (!drv) 1245 return 0; 1246 1247 if (drv->pm) { 1248 if (drv->pm->poweroff) 1249 ret = drv->pm->poweroff(dev); 1250 } else { 1251 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1252 } 1253 1254 return ret; 1255 } 1256 1257 int platform_pm_restore(struct device *dev) 1258 { 1259 const struct device_driver *drv = dev->driver; 1260 int ret = 0; 1261 1262 if (!drv) 1263 return 0; 1264 1265 if (drv->pm) { 1266 if (drv->pm->restore) 1267 ret = drv->pm->restore(dev); 1268 } else { 1269 ret = platform_legacy_resume(dev); 1270 } 1271 1272 return ret; 1273 } 1274 1275 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1276 1277 /* modalias support enables more hands-off userspace setup: 1278 * (a) environment variable lets new-style hotplug events work once system is 1279 * fully running: "modprobe $MODALIAS" 1280 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 1281 * mishandled before system is fully running: "modprobe $(cat modalias)" 1282 */ 1283 static ssize_t modalias_show(struct device *dev, 1284 struct device_attribute *attr, char *buf) 1285 { 1286 struct platform_device *pdev = to_platform_device(dev); 1287 int len; 1288 1289 len = of_device_modalias(dev, buf, PAGE_SIZE); 1290 if (len != -ENODEV) 1291 return len; 1292 1293 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 1294 if (len != -ENODEV) 1295 return len; 1296 1297 return sysfs_emit(buf, "platform:%s\n", pdev->name); 1298 } 1299 static DEVICE_ATTR_RO(modalias); 1300 1301 static ssize_t numa_node_show(struct device *dev, 1302 struct device_attribute *attr, char *buf) 1303 { 1304 return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 1305 } 1306 static DEVICE_ATTR_RO(numa_node); 1307 1308 static struct attribute *platform_dev_attrs[] = { 1309 &dev_attr_modalias.attr, 1310 &dev_attr_numa_node.attr, 1311 NULL, 1312 }; 1313 1314 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, 1315 int n) 1316 { 1317 struct device *dev = container_of(kobj, typeof(*dev), kobj); 1318 1319 if (a == &dev_attr_numa_node.attr && 1320 dev_to_node(dev) == NUMA_NO_NODE) 1321 return 0; 1322 1323 return a->mode; 1324 } 1325 1326 static const struct attribute_group platform_dev_group = { 1327 .attrs = platform_dev_attrs, 1328 .is_visible = platform_dev_attrs_visible, 1329 }; 1330 __ATTRIBUTE_GROUPS(platform_dev); 1331 1332 1333 /** 1334 * platform_match - bind platform device to platform driver. 1335 * @dev: device. 1336 * @drv: driver. 1337 * 1338 * Platform device IDs are assumed to be encoded like this: 1339 * "<name><instance>", where <name> is a short description of the type of 1340 * device, like "pci" or "floppy", and <instance> is the enumerated 1341 * instance of the device, like '0' or '42'. Driver IDs are simply 1342 * "<name>". So, extract the <name> from the platform_device structure, 1343 * and compare it against the name of the driver. Return whether they match 1344 * or not. 1345 */ 1346 static int platform_match(struct device *dev, const struct device_driver *drv) 1347 { 1348 struct platform_device *pdev = to_platform_device(dev); 1349 struct platform_driver *pdrv = to_platform_driver(drv); 1350 int ret; 1351 1352 /* When driver_override is set, only bind to the matching driver */ 1353 ret = device_match_driver_override(dev, drv); 1354 if (ret >= 0) 1355 return ret; 1356 1357 /* Attempt an OF style match first */ 1358 if (of_driver_match_device(dev, drv)) 1359 return 1; 1360 1361 /* Then try ACPI style match */ 1362 if (acpi_driver_match_device(dev, drv)) 1363 return 1; 1364 1365 /* Then try to match against the id table */ 1366 if (pdrv->id_table) 1367 return platform_match_id(pdrv->id_table, pdev) != NULL; 1368 1369 /* fall-back to driver name match */ 1370 return (strcmp(pdev->name, drv->name) == 0); 1371 } 1372 1373 static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env) 1374 { 1375 const struct platform_device *pdev = to_platform_device(dev); 1376 int rc; 1377 1378 /* Some devices have extra OF data and an OF-style MODALIAS */ 1379 rc = of_device_uevent_modalias(dev, env); 1380 if (rc != -ENODEV) 1381 return rc; 1382 1383 rc = acpi_device_uevent_modalias(dev, env); 1384 if (rc != -ENODEV) 1385 return rc; 1386 1387 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1388 pdev->name); 1389 return 0; 1390 } 1391 1392 static int platform_probe(struct device *_dev) 1393 { 1394 struct platform_driver *drv = to_platform_driver(_dev->driver); 1395 struct platform_device *dev = to_platform_device(_dev); 1396 int ret; 1397 1398 /* 1399 * A driver registered using platform_driver_probe() cannot be bound 1400 * again later because the probe function usually lives in __init code 1401 * and so is gone. For these drivers .probe is set to 1402 * platform_probe_fail in __platform_driver_probe(). Don't even prepare 1403 * clocks and PM domains for these to match the traditional behaviour. 1404 */ 1405 if (unlikely(drv->probe == platform_probe_fail)) 1406 return -ENXIO; 1407 1408 ret = of_clk_set_defaults(_dev->of_node, false); 1409 if (ret < 0) 1410 return ret; 1411 1412 ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON | 1413 PD_FLAG_DETACH_POWER_OFF); 1414 if (ret) 1415 goto out; 1416 1417 if (drv->probe) 1418 ret = drv->probe(dev); 1419 1420 out: 1421 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 1422 dev_warn(_dev, "probe deferral not supported\n"); 1423 ret = -ENXIO; 1424 } 1425 1426 return ret; 1427 } 1428 1429 static void platform_remove(struct device *_dev) 1430 { 1431 struct platform_driver *drv = to_platform_driver(_dev->driver); 1432 struct platform_device *dev = to_platform_device(_dev); 1433 1434 if (drv->remove) 1435 drv->remove(dev); 1436 } 1437 1438 static void platform_shutdown(struct device *_dev) 1439 { 1440 struct platform_device *dev = to_platform_device(_dev); 1441 struct platform_driver *drv; 1442 1443 if (!_dev->driver) 1444 return; 1445 1446 drv = to_platform_driver(_dev->driver); 1447 if (drv->shutdown) 1448 drv->shutdown(dev); 1449 } 1450 1451 static int platform_dma_configure(struct device *dev) 1452 { 1453 struct device_driver *drv = READ_ONCE(dev->driver); 1454 struct fwnode_handle *fwnode = dev_fwnode(dev); 1455 enum dev_dma_attr attr; 1456 int ret = 0; 1457 1458 if (is_of_node(fwnode)) { 1459 ret = of_dma_configure(dev, to_of_node(fwnode), true); 1460 } else if (is_acpi_device_node(fwnode)) { 1461 attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); 1462 ret = acpi_dma_configure(dev, attr); 1463 } 1464 /* @dev->driver may not be valid when we're called from the IOMMU layer */ 1465 if (ret || !drv || to_platform_driver(drv)->driver_managed_dma) 1466 return ret; 1467 1468 ret = iommu_device_use_default_domain(dev); 1469 if (ret) 1470 arch_teardown_dma_ops(dev); 1471 1472 return ret; 1473 } 1474 1475 static void platform_dma_cleanup(struct device *dev) 1476 { 1477 struct platform_driver *drv = to_platform_driver(dev->driver); 1478 1479 if (!drv->driver_managed_dma) 1480 iommu_device_unuse_default_domain(dev); 1481 } 1482 1483 static const struct dev_pm_ops platform_dev_pm_ops = { 1484 SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) 1485 USE_PLATFORM_PM_SLEEP_OPS 1486 }; 1487 1488 const struct bus_type platform_bus_type = { 1489 .name = "platform", 1490 .dev_groups = platform_dev_groups, 1491 .driver_override = true, 1492 .match = platform_match, 1493 .uevent = platform_uevent, 1494 .probe = platform_probe, 1495 .remove = platform_remove, 1496 .shutdown = platform_shutdown, 1497 .dma_configure = platform_dma_configure, 1498 .dma_cleanup = platform_dma_cleanup, 1499 .pm = &platform_dev_pm_ops, 1500 }; 1501 EXPORT_SYMBOL_GPL(platform_bus_type); 1502 1503 static inline int __platform_match(struct device *dev, const void *drv) 1504 { 1505 return platform_match(dev, (struct device_driver *)drv); 1506 } 1507 1508 /** 1509 * platform_find_device_by_driver - Find a platform device with a given 1510 * driver. 1511 * @start: The device to start the search from. 1512 * @drv: The device driver to look for. 1513 */ 1514 struct device *platform_find_device_by_driver(struct device *start, 1515 const struct device_driver *drv) 1516 { 1517 return bus_find_device(&platform_bus_type, start, drv, 1518 __platform_match); 1519 } 1520 EXPORT_SYMBOL_GPL(platform_find_device_by_driver); 1521 1522 void __weak __init early_platform_cleanup(void) { } 1523 1524 int __init platform_bus_init(void) 1525 { 1526 int error; 1527 1528 early_platform_cleanup(); 1529 1530 error = device_register(&platform_bus); 1531 if (error) { 1532 put_device(&platform_bus); 1533 return error; 1534 } 1535 error = bus_register(&platform_bus_type); 1536 if (error) 1537 device_unregister(&platform_bus); 1538 1539 return error; 1540 } 1541