1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ioport.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/memblock.h> 22 #include <linux/err.h> 23 #include <linux/slab.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_domain.h> 26 #include <linux/idr.h> 27 #include <linux/acpi.h> 28 #include <linux/clk/clk-conf.h> 29 #include <linux/limits.h> 30 #include <linux/property.h> 31 #include <linux/kmemleak.h> 32 #include <linux/types.h> 33 #include <linux/iommu.h> 34 #include <linux/dma-map-ops.h> 35 36 #include "base.h" 37 #include "power/power.h" 38 39 /* For automatically allocated device IDs */ 40 static DEFINE_IDA(platform_devid_ida); 41 42 struct device platform_bus = { 43 .init_name = "platform", 44 }; 45 EXPORT_SYMBOL_GPL(platform_bus); 46 47 /** 48 * platform_get_resource - get a resource for a device 49 * @dev: platform device 50 * @type: resource type 51 * @num: resource index 52 * 53 * Return: a pointer to the resource or NULL on failure. 54 */ 55 struct resource *platform_get_resource(struct platform_device *dev, 56 unsigned int type, unsigned int num) 57 { 58 u32 i; 59 60 for (i = 0; i < dev->num_resources; i++) { 61 struct resource *r = &dev->resource[i]; 62 63 if (type == resource_type(r) && num-- == 0) 64 return r; 65 } 66 return NULL; 67 } 68 EXPORT_SYMBOL_GPL(platform_get_resource); 69 70 struct resource *platform_get_mem_or_io(struct platform_device *dev, 71 unsigned int num) 72 { 73 u32 i; 74 75 for (i = 0; i < dev->num_resources; i++) { 76 struct resource *r = &dev->resource[i]; 77 78 if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) 79 return r; 80 } 81 return NULL; 82 } 83 EXPORT_SYMBOL_GPL(platform_get_mem_or_io); 84 85 #ifdef CONFIG_HAS_IOMEM 86 /** 87 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a 88 * platform device and get resource 89 * 90 * @pdev: platform device to use both for memory resource lookup as well as 91 * resource management 92 * @index: resource index 93 * @res: optional output parameter to store a pointer to the obtained resource. 94 * 95 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 96 * on failure. 97 */ 98 void __iomem * 99 devm_platform_get_and_ioremap_resource(struct platform_device *pdev, 100 unsigned int index, struct resource **res) 101 { 102 struct resource *r; 103 104 r = platform_get_resource(pdev, IORESOURCE_MEM, index); 105 if (res) 106 *res = r; 107 return devm_ioremap_resource(&pdev->dev, r); 108 } 109 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); 110 111 /** 112 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 113 * device 114 * 115 * @pdev: platform device to use both for memory resource lookup as well as 116 * resource management 117 * @index: resource index 118 * 119 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 120 * on failure. 121 */ 122 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 123 unsigned int index) 124 { 125 return devm_platform_get_and_ioremap_resource(pdev, index, NULL); 126 } 127 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 128 129 /** 130 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for 131 * a platform device, retrieve the 132 * resource by name 133 * 134 * @pdev: platform device to use both for memory resource lookup as well as 135 * resource management 136 * @name: name of the resource 137 * 138 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 139 * on failure. 140 */ 141 void __iomem * 142 devm_platform_ioremap_resource_byname(struct platform_device *pdev, 143 const char *name) 144 { 145 struct resource *res; 146 147 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 148 return devm_ioremap_resource(&pdev->dev, res); 149 } 150 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); 151 #endif /* CONFIG_HAS_IOMEM */ 152 153 /** 154 * platform_get_irq_optional - get an optional IRQ for a device 155 * @dev: platform device 156 * @num: IRQ number index 157 * 158 * Gets an IRQ for a platform device. Device drivers should check the return 159 * value for errors so as to not pass a negative integer value to the 160 * request_irq() APIs. This is the same as platform_get_irq(), except that it 161 * does not print an error message if an IRQ can not be obtained. 162 * 163 * For example:: 164 * 165 * int irq = platform_get_irq_optional(pdev, 0); 166 * if (irq < 0) 167 * return irq; 168 * 169 * Return: non-zero IRQ number on success, negative error number on failure. 170 */ 171 int platform_get_irq_optional(struct platform_device *dev, unsigned int num) 172 { 173 int ret; 174 #ifdef CONFIG_SPARC 175 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 176 if (!dev || num >= dev->archdata.num_irqs) 177 goto out_not_found; 178 ret = dev->archdata.irqs[num]; 179 goto out; 180 #else 181 struct resource *r; 182 183 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 184 ret = of_irq_get(dev->dev.of_node, num); 185 if (ret > 0 || ret == -EPROBE_DEFER) 186 goto out; 187 } 188 189 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 190 if (has_acpi_companion(&dev->dev)) { 191 if (r && r->flags & IORESOURCE_DISABLED) { 192 ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); 193 if (ret) 194 goto out; 195 } 196 } 197 198 /* 199 * The resources may pass trigger flags to the irqs that need 200 * to be set up. It so happens that the trigger flags for 201 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 202 * settings. 203 */ 204 if (r && r->flags & IORESOURCE_BITS) { 205 struct irq_data *irqd; 206 207 irqd = irq_get_irq_data(r->start); 208 if (!irqd) 209 goto out_not_found; 210 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 211 } 212 213 if (r) { 214 ret = r->start; 215 goto out; 216 } 217 218 /* 219 * For the index 0 interrupt, allow falling back to GpioInt 220 * resources. While a device could have both Interrupt and GpioInt 221 * resources, making this fallback ambiguous, in many common cases 222 * the device will only expose one IRQ, and this fallback 223 * allows a common code path across either kind of resource. 224 */ 225 if (num == 0 && has_acpi_companion(&dev->dev)) { 226 ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); 227 /* Our callers expect -ENXIO for missing IRQs. */ 228 if (ret >= 0 || ret == -EPROBE_DEFER) 229 goto out; 230 } 231 232 #endif 233 out_not_found: 234 ret = -ENXIO; 235 out: 236 WARN(ret == 0, "0 is an invalid IRQ number\n"); 237 return ret; 238 } 239 EXPORT_SYMBOL_GPL(platform_get_irq_optional); 240 241 /** 242 * platform_get_irq - get an IRQ for a device 243 * @dev: platform device 244 * @num: IRQ number index 245 * 246 * Gets an IRQ for a platform device and prints an error message if finding the 247 * IRQ fails. Device drivers should check the return value for errors so as to 248 * not pass a negative integer value to the request_irq() APIs. 249 * 250 * For example:: 251 * 252 * int irq = platform_get_irq(pdev, 0); 253 * if (irq < 0) 254 * return irq; 255 * 256 * Return: non-zero IRQ number on success, negative error number on failure. 257 */ 258 int platform_get_irq(struct platform_device *dev, unsigned int num) 259 { 260 int ret; 261 262 ret = platform_get_irq_optional(dev, num); 263 if (ret < 0) 264 return dev_err_probe(&dev->dev, ret, 265 "IRQ index %u not found\n", num); 266 267 return ret; 268 } 269 EXPORT_SYMBOL_GPL(platform_get_irq); 270 271 /** 272 * platform_irq_count - Count the number of IRQs a platform device uses 273 * @dev: platform device 274 * 275 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 276 */ 277 int platform_irq_count(struct platform_device *dev) 278 { 279 int ret, nr = 0; 280 281 while ((ret = platform_get_irq_optional(dev, nr)) >= 0) 282 nr++; 283 284 if (ret == -EPROBE_DEFER) 285 return ret; 286 287 return nr; 288 } 289 EXPORT_SYMBOL_GPL(platform_irq_count); 290 291 struct irq_affinity_devres { 292 unsigned int count; 293 unsigned int irq[]; 294 }; 295 296 static void platform_disable_acpi_irq(struct platform_device *pdev, int index) 297 { 298 struct resource *r; 299 300 r = platform_get_resource(pdev, IORESOURCE_IRQ, index); 301 if (r) 302 irqresource_disabled(r, 0); 303 } 304 305 static void devm_platform_get_irqs_affinity_release(struct device *dev, 306 void *res) 307 { 308 struct irq_affinity_devres *ptr = res; 309 int i; 310 311 for (i = 0; i < ptr->count; i++) { 312 irq_dispose_mapping(ptr->irq[i]); 313 314 if (has_acpi_companion(dev)) 315 platform_disable_acpi_irq(to_platform_device(dev), i); 316 } 317 } 318 319 /** 320 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a 321 * device using an interrupt affinity descriptor 322 * @dev: platform device pointer 323 * @affd: affinity descriptor 324 * @minvec: minimum count of interrupt vectors 325 * @maxvec: maximum count of interrupt vectors 326 * @irqs: pointer holder for IRQ numbers 327 * 328 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according 329 * to the passed affinity descriptor 330 * 331 * Return: Number of vectors on success, negative error number on failure. 332 */ 333 int devm_platform_get_irqs_affinity(struct platform_device *dev, 334 struct irq_affinity *affd, 335 unsigned int minvec, 336 unsigned int maxvec, 337 int **irqs) 338 { 339 struct irq_affinity_devres *ptr; 340 struct irq_affinity_desc *desc; 341 size_t size; 342 int i, ret, nvec; 343 344 if (!affd) 345 return -EPERM; 346 347 if (maxvec < minvec) 348 return -ERANGE; 349 350 nvec = platform_irq_count(dev); 351 if (nvec < 0) 352 return nvec; 353 354 if (nvec < minvec) 355 return -ENOSPC; 356 357 nvec = irq_calc_affinity_vectors(minvec, nvec, affd); 358 if (nvec < minvec) 359 return -ENOSPC; 360 361 if (nvec > maxvec) 362 nvec = maxvec; 363 364 size = sizeof(*ptr) + sizeof(unsigned int) * nvec; 365 ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, 366 GFP_KERNEL); 367 if (!ptr) 368 return -ENOMEM; 369 370 ptr->count = nvec; 371 372 for (i = 0; i < nvec; i++) { 373 int irq = platform_get_irq(dev, i); 374 if (irq < 0) { 375 ret = irq; 376 goto err_free_devres; 377 } 378 ptr->irq[i] = irq; 379 } 380 381 desc = irq_create_affinity_masks(nvec, affd); 382 if (!desc) { 383 ret = -ENOMEM; 384 goto err_free_devres; 385 } 386 387 for (i = 0; i < nvec; i++) { 388 ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]); 389 if (ret) { 390 dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n", 391 ptr->irq[i], ret); 392 goto err_free_desc; 393 } 394 } 395 396 devres_add(&dev->dev, ptr); 397 398 kfree(desc); 399 400 *irqs = ptr->irq; 401 402 return nvec; 403 404 err_free_desc: 405 kfree(desc); 406 err_free_devres: 407 devres_free(ptr); 408 return ret; 409 } 410 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); 411 412 /** 413 * platform_get_resource_byname - get a resource for a device by name 414 * @dev: platform device 415 * @type: resource type 416 * @name: resource name 417 */ 418 struct resource *platform_get_resource_byname(struct platform_device *dev, 419 unsigned int type, 420 const char *name) 421 { 422 u32 i; 423 424 for (i = 0; i < dev->num_resources; i++) { 425 struct resource *r = &dev->resource[i]; 426 427 if (unlikely(!r->name)) 428 continue; 429 430 if (type == resource_type(r) && !strcmp(r->name, name)) 431 return r; 432 } 433 return NULL; 434 } 435 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 436 437 static int __platform_get_irq_byname(struct platform_device *dev, 438 const char *name) 439 { 440 struct resource *r; 441 int ret; 442 443 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { 444 ret = of_irq_get_byname(dev->dev.of_node, name); 445 if (ret > 0 || ret == -EPROBE_DEFER) 446 return ret; 447 } 448 449 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 450 if (r) { 451 WARN(r->start == 0, "0 is an invalid IRQ number\n"); 452 return r->start; 453 } 454 455 return -ENXIO; 456 } 457 458 /** 459 * platform_get_irq_byname - get an IRQ for a device by name 460 * @dev: platform device 461 * @name: IRQ name 462 * 463 * Get an IRQ like platform_get_irq(), but then by name rather then by index. 464 * 465 * Return: non-zero IRQ number on success, negative error number on failure. 466 */ 467 int platform_get_irq_byname(struct platform_device *dev, const char *name) 468 { 469 int ret; 470 471 ret = __platform_get_irq_byname(dev, name); 472 if (ret < 0) 473 return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n", 474 name); 475 return ret; 476 } 477 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 478 479 /** 480 * platform_get_irq_byname_optional - get an optional IRQ for a device by name 481 * @dev: platform device 482 * @name: IRQ name 483 * 484 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it 485 * does not print an error message if an IRQ can not be obtained. 486 * 487 * Return: non-zero IRQ number on success, negative error number on failure. 488 */ 489 int platform_get_irq_byname_optional(struct platform_device *dev, 490 const char *name) 491 { 492 return __platform_get_irq_byname(dev, name); 493 } 494 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); 495 496 /** 497 * platform_add_devices - add a numbers of platform devices 498 * @devs: array of platform devices to add 499 * @num: number of platform devices in array 500 */ 501 int platform_add_devices(struct platform_device **devs, int num) 502 { 503 int i, ret = 0; 504 505 for (i = 0; i < num; i++) { 506 ret = platform_device_register(devs[i]); 507 if (ret) { 508 while (--i >= 0) 509 platform_device_unregister(devs[i]); 510 break; 511 } 512 } 513 514 return ret; 515 } 516 EXPORT_SYMBOL_GPL(platform_add_devices); 517 518 struct platform_object { 519 struct platform_device pdev; 520 char name[]; 521 }; 522 523 /* 524 * Set up default DMA mask for platform devices if the they weren't 525 * previously set by the architecture / DT. 526 */ 527 static void setup_pdev_dma_masks(struct platform_device *pdev) 528 { 529 pdev->dev.dma_parms = &pdev->dma_parms; 530 531 if (!pdev->dev.coherent_dma_mask) 532 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 533 if (!pdev->dev.dma_mask) { 534 pdev->platform_dma_mask = DMA_BIT_MASK(32); 535 pdev->dev.dma_mask = &pdev->platform_dma_mask; 536 } 537 }; 538 539 /** 540 * platform_device_put - destroy a platform device 541 * @pdev: platform device to free 542 * 543 * Free all memory associated with a platform device. This function must 544 * _only_ be externally called in error cases. All other usage is a bug. 545 */ 546 void platform_device_put(struct platform_device *pdev) 547 { 548 if (!IS_ERR_OR_NULL(pdev)) 549 put_device(&pdev->dev); 550 } 551 EXPORT_SYMBOL_GPL(platform_device_put); 552 553 static void platform_device_release(struct device *dev) 554 { 555 struct platform_object *pa = container_of(dev, struct platform_object, 556 pdev.dev); 557 558 of_node_put(pa->pdev.dev.of_node); 559 kfree(pa->pdev.dev.platform_data); 560 kfree(pa->pdev.mfd_cell); 561 kfree(pa->pdev.resource); 562 kfree(pa->pdev.driver_override); 563 kfree(pa); 564 } 565 566 /** 567 * platform_device_alloc - create a platform device 568 * @name: base name of the device we're adding 569 * @id: instance id 570 * 571 * Create a platform device object which can have other objects attached 572 * to it, and which will have attached objects freed when it is released. 573 */ 574 struct platform_device *platform_device_alloc(const char *name, int id) 575 { 576 struct platform_object *pa; 577 578 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 579 if (pa) { 580 strcpy(pa->name, name); 581 pa->pdev.name = pa->name; 582 pa->pdev.id = id; 583 device_initialize(&pa->pdev.dev); 584 pa->pdev.dev.release = platform_device_release; 585 setup_pdev_dma_masks(&pa->pdev); 586 } 587 588 return pa ? &pa->pdev : NULL; 589 } 590 EXPORT_SYMBOL_GPL(platform_device_alloc); 591 592 /** 593 * platform_device_add_resources - add resources to a platform device 594 * @pdev: platform device allocated by platform_device_alloc to add resources to 595 * @res: set of resources that needs to be allocated for the device 596 * @num: number of resources 597 * 598 * Add a copy of the resources to the platform device. The memory 599 * associated with the resources will be freed when the platform device is 600 * released. 601 */ 602 int platform_device_add_resources(struct platform_device *pdev, 603 const struct resource *res, unsigned int num) 604 { 605 struct resource *r = NULL; 606 607 if (res) { 608 r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); 609 if (!r) 610 return -ENOMEM; 611 } 612 613 kfree(pdev->resource); 614 pdev->resource = r; 615 pdev->num_resources = num; 616 return 0; 617 } 618 EXPORT_SYMBOL_GPL(platform_device_add_resources); 619 620 /** 621 * platform_device_add_data - add platform-specific data to a platform device 622 * @pdev: platform device allocated by platform_device_alloc to add resources to 623 * @data: platform specific data for this platform device 624 * @size: size of platform specific data 625 * 626 * Add a copy of platform specific data to the platform device's 627 * platform_data pointer. The memory associated with the platform data 628 * will be freed when the platform device is released. 629 */ 630 int platform_device_add_data(struct platform_device *pdev, const void *data, 631 size_t size) 632 { 633 void *d = NULL; 634 635 if (data) { 636 d = kmemdup(data, size, GFP_KERNEL); 637 if (!d) 638 return -ENOMEM; 639 } 640 641 kfree(pdev->dev.platform_data); 642 pdev->dev.platform_data = d; 643 return 0; 644 } 645 EXPORT_SYMBOL_GPL(platform_device_add_data); 646 647 /** 648 * platform_device_add - add a platform device to device hierarchy 649 * @pdev: platform device we're adding 650 * 651 * This is part 2 of platform_device_register(), though may be called 652 * separately _iff_ pdev was allocated by platform_device_alloc(). 653 */ 654 int platform_device_add(struct platform_device *pdev) 655 { 656 u32 i; 657 int ret; 658 659 if (!pdev) 660 return -EINVAL; 661 662 if (!pdev->dev.parent) 663 pdev->dev.parent = &platform_bus; 664 665 pdev->dev.bus = &platform_bus_type; 666 667 switch (pdev->id) { 668 default: 669 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 670 break; 671 case PLATFORM_DEVID_NONE: 672 dev_set_name(&pdev->dev, "%s", pdev->name); 673 break; 674 case PLATFORM_DEVID_AUTO: 675 /* 676 * Automatically allocated device ID. We mark it as such so 677 * that we remember it must be freed, and we append a suffix 678 * to avoid namespace collision with explicit IDs. 679 */ 680 ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); 681 if (ret < 0) 682 goto err_out; 683 pdev->id = ret; 684 pdev->id_auto = true; 685 dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); 686 break; 687 } 688 689 for (i = 0; i < pdev->num_resources; i++) { 690 struct resource *p, *r = &pdev->resource[i]; 691 692 if (r->name == NULL) 693 r->name = dev_name(&pdev->dev); 694 695 p = r->parent; 696 if (!p) { 697 if (resource_type(r) == IORESOURCE_MEM) 698 p = &iomem_resource; 699 else if (resource_type(r) == IORESOURCE_IO) 700 p = &ioport_resource; 701 } 702 703 if (p) { 704 ret = insert_resource(p, r); 705 if (ret) { 706 dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); 707 goto failed; 708 } 709 } 710 } 711 712 pr_debug("Registering platform device '%s'. Parent at %s\n", 713 dev_name(&pdev->dev), dev_name(pdev->dev.parent)); 714 715 ret = device_add(&pdev->dev); 716 if (ret == 0) 717 return ret; 718 719 failed: 720 if (pdev->id_auto) { 721 ida_free(&platform_devid_ida, pdev->id); 722 pdev->id = PLATFORM_DEVID_AUTO; 723 } 724 725 while (i--) { 726 struct resource *r = &pdev->resource[i]; 727 if (r->parent) 728 release_resource(r); 729 } 730 731 err_out: 732 return ret; 733 } 734 EXPORT_SYMBOL_GPL(platform_device_add); 735 736 /** 737 * platform_device_del - remove a platform-level device 738 * @pdev: platform device we're removing 739 * 740 * Note that this function will also release all memory- and port-based 741 * resources owned by the device (@dev->resource). This function must 742 * _only_ be externally called in error cases. All other usage is a bug. 743 */ 744 void platform_device_del(struct platform_device *pdev) 745 { 746 u32 i; 747 748 if (!IS_ERR_OR_NULL(pdev)) { 749 device_del(&pdev->dev); 750 751 if (pdev->id_auto) { 752 ida_free(&platform_devid_ida, pdev->id); 753 pdev->id = PLATFORM_DEVID_AUTO; 754 } 755 756 for (i = 0; i < pdev->num_resources; i++) { 757 struct resource *r = &pdev->resource[i]; 758 if (r->parent) 759 release_resource(r); 760 } 761 } 762 } 763 EXPORT_SYMBOL_GPL(platform_device_del); 764 765 /** 766 * platform_device_register - add a platform-level device 767 * @pdev: platform device we're adding 768 * 769 * NOTE: _Never_ directly free @pdev after calling this function, even if it 770 * returned an error! Always use platform_device_put() to give up the 771 * reference initialised in this function instead. 772 */ 773 int platform_device_register(struct platform_device *pdev) 774 { 775 device_initialize(&pdev->dev); 776 setup_pdev_dma_masks(pdev); 777 return platform_device_add(pdev); 778 } 779 EXPORT_SYMBOL_GPL(platform_device_register); 780 781 /** 782 * platform_device_unregister - unregister a platform-level device 783 * @pdev: platform device we're unregistering 784 * 785 * Unregistration is done in 2 steps. First we release all resources 786 * and remove it from the subsystem, then we drop reference count by 787 * calling platform_device_put(). 788 */ 789 void platform_device_unregister(struct platform_device *pdev) 790 { 791 platform_device_del(pdev); 792 platform_device_put(pdev); 793 } 794 EXPORT_SYMBOL_GPL(platform_device_unregister); 795 796 /** 797 * platform_device_register_full - add a platform-level device with 798 * resources and platform-specific data 799 * 800 * @pdevinfo: data used to create device 801 * 802 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 803 */ 804 struct platform_device *platform_device_register_full( 805 const struct platform_device_info *pdevinfo) 806 { 807 int ret; 808 struct platform_device *pdev; 809 810 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 811 if (!pdev) 812 return ERR_PTR(-ENOMEM); 813 814 pdev->dev.parent = pdevinfo->parent; 815 pdev->dev.fwnode = pdevinfo->fwnode; 816 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 817 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 818 819 if (pdevinfo->dma_mask) { 820 pdev->platform_dma_mask = pdevinfo->dma_mask; 821 pdev->dev.dma_mask = &pdev->platform_dma_mask; 822 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 823 } 824 825 ret = platform_device_add_resources(pdev, 826 pdevinfo->res, pdevinfo->num_res); 827 if (ret) 828 goto err; 829 830 ret = platform_device_add_data(pdev, 831 pdevinfo->data, pdevinfo->size_data); 832 if (ret) 833 goto err; 834 835 if (pdevinfo->properties) { 836 ret = device_create_managed_software_node(&pdev->dev, 837 pdevinfo->properties, NULL); 838 if (ret) 839 goto err; 840 } 841 842 ret = platform_device_add(pdev); 843 if (ret) { 844 err: 845 ACPI_COMPANION_SET(&pdev->dev, NULL); 846 platform_device_put(pdev); 847 return ERR_PTR(ret); 848 } 849 850 return pdev; 851 } 852 EXPORT_SYMBOL_GPL(platform_device_register_full); 853 854 /** 855 * __platform_driver_register - register a driver for platform-level devices 856 * @drv: platform driver structure 857 * @owner: owning module/driver 858 */ 859 int __platform_driver_register(struct platform_driver *drv, 860 struct module *owner) 861 { 862 drv->driver.owner = owner; 863 drv->driver.bus = &platform_bus_type; 864 865 return driver_register(&drv->driver); 866 } 867 EXPORT_SYMBOL_GPL(__platform_driver_register); 868 869 /** 870 * platform_driver_unregister - unregister a driver for platform-level devices 871 * @drv: platform driver structure 872 */ 873 void platform_driver_unregister(struct platform_driver *drv) 874 { 875 driver_unregister(&drv->driver); 876 } 877 EXPORT_SYMBOL_GPL(platform_driver_unregister); 878 879 static int platform_probe_fail(struct platform_device *pdev) 880 { 881 return -ENXIO; 882 } 883 884 /** 885 * __platform_driver_probe - register driver for non-hotpluggable device 886 * @drv: platform driver structure 887 * @probe: the driver probe routine, probably from an __init section 888 * @module: module which will be the owner of the driver 889 * 890 * Use this instead of platform_driver_register() when you know the device 891 * is not hotpluggable and has already been registered, and you want to 892 * remove its run-once probe() infrastructure from memory after the driver 893 * has bound to the device. 894 * 895 * One typical use for this would be with drivers for controllers integrated 896 * into system-on-chip processors, where the controller devices have been 897 * configured as part of board setup. 898 * 899 * Note that this is incompatible with deferred probing. 900 * 901 * Returns zero if the driver registered and bound to a device, else returns 902 * a negative error code and with the driver not registered. 903 */ 904 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 905 int (*probe)(struct platform_device *), struct module *module) 906 { 907 int retval, code; 908 909 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 910 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 911 drv->driver.name, __func__); 912 return -EINVAL; 913 } 914 915 /* 916 * We have to run our probes synchronously because we check if 917 * we find any devices to bind to and exit with error if there 918 * are any. 919 */ 920 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 921 922 /* 923 * Prevent driver from requesting probe deferral to avoid further 924 * futile probe attempts. 925 */ 926 drv->prevent_deferred_probe = true; 927 928 /* make sure driver won't have bind/unbind attributes */ 929 drv->driver.suppress_bind_attrs = true; 930 931 /* temporary section violation during probe() */ 932 drv->probe = probe; 933 retval = code = __platform_driver_register(drv, module); 934 if (retval) 935 return retval; 936 937 /* 938 * Fixup that section violation, being paranoid about code scanning 939 * the list of drivers in order to probe new devices. Check to see 940 * if the probe was successful, and make sure any forced probes of 941 * new devices fail. 942 */ 943 spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); 944 drv->probe = platform_probe_fail; 945 if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) 946 retval = -ENODEV; 947 spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); 948 949 if (code != retval) 950 platform_driver_unregister(drv); 951 return retval; 952 } 953 EXPORT_SYMBOL_GPL(__platform_driver_probe); 954 955 /** 956 * __platform_create_bundle - register driver and create corresponding device 957 * @driver: platform driver structure 958 * @probe: the driver probe routine, probably from an __init section 959 * @res: set of resources that needs to be allocated for the device 960 * @n_res: number of resources 961 * @data: platform specific data for this platform device 962 * @size: size of platform specific data 963 * @module: module which will be the owner of the driver 964 * 965 * Use this in legacy-style modules that probe hardware directly and 966 * register a single platform device and corresponding platform driver. 967 * 968 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 969 */ 970 struct platform_device * __init_or_module __platform_create_bundle( 971 struct platform_driver *driver, 972 int (*probe)(struct platform_device *), 973 struct resource *res, unsigned int n_res, 974 const void *data, size_t size, struct module *module) 975 { 976 struct platform_device *pdev; 977 int error; 978 979 pdev = platform_device_alloc(driver->driver.name, -1); 980 if (!pdev) { 981 error = -ENOMEM; 982 goto err_out; 983 } 984 985 error = platform_device_add_resources(pdev, res, n_res); 986 if (error) 987 goto err_pdev_put; 988 989 error = platform_device_add_data(pdev, data, size); 990 if (error) 991 goto err_pdev_put; 992 993 error = platform_device_add(pdev); 994 if (error) 995 goto err_pdev_put; 996 997 error = __platform_driver_probe(driver, probe, module); 998 if (error) 999 goto err_pdev_del; 1000 1001 return pdev; 1002 1003 err_pdev_del: 1004 platform_device_del(pdev); 1005 err_pdev_put: 1006 platform_device_put(pdev); 1007 err_out: 1008 return ERR_PTR(error); 1009 } 1010 EXPORT_SYMBOL_GPL(__platform_create_bundle); 1011 1012 /** 1013 * __platform_register_drivers - register an array of platform drivers 1014 * @drivers: an array of drivers to register 1015 * @count: the number of drivers to register 1016 * @owner: module owning the drivers 1017 * 1018 * Registers platform drivers specified by an array. On failure to register a 1019 * driver, all previously registered drivers will be unregistered. Callers of 1020 * this API should use platform_unregister_drivers() to unregister drivers in 1021 * the reverse order. 1022 * 1023 * Returns: 0 on success or a negative error code on failure. 1024 */ 1025 int __platform_register_drivers(struct platform_driver * const *drivers, 1026 unsigned int count, struct module *owner) 1027 { 1028 unsigned int i; 1029 int err; 1030 1031 for (i = 0; i < count; i++) { 1032 pr_debug("registering platform driver %ps\n", drivers[i]); 1033 1034 err = __platform_driver_register(drivers[i], owner); 1035 if (err < 0) { 1036 pr_err("failed to register platform driver %ps: %d\n", 1037 drivers[i], err); 1038 goto error; 1039 } 1040 } 1041 1042 return 0; 1043 1044 error: 1045 while (i--) { 1046 pr_debug("unregistering platform driver %ps\n", drivers[i]); 1047 platform_driver_unregister(drivers[i]); 1048 } 1049 1050 return err; 1051 } 1052 EXPORT_SYMBOL_GPL(__platform_register_drivers); 1053 1054 /** 1055 * platform_unregister_drivers - unregister an array of platform drivers 1056 * @drivers: an array of drivers to unregister 1057 * @count: the number of drivers to unregister 1058 * 1059 * Unregisters platform drivers specified by an array. This is typically used 1060 * to complement an earlier call to platform_register_drivers(). Drivers are 1061 * unregistered in the reverse order in which they were registered. 1062 */ 1063 void platform_unregister_drivers(struct platform_driver * const *drivers, 1064 unsigned int count) 1065 { 1066 while (count--) { 1067 pr_debug("unregistering platform driver %ps\n", drivers[count]); 1068 platform_driver_unregister(drivers[count]); 1069 } 1070 } 1071 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 1072 1073 static const struct platform_device_id *platform_match_id( 1074 const struct platform_device_id *id, 1075 struct platform_device *pdev) 1076 { 1077 while (id->name[0]) { 1078 if (strcmp(pdev->name, id->name) == 0) { 1079 pdev->id_entry = id; 1080 return id; 1081 } 1082 id++; 1083 } 1084 return NULL; 1085 } 1086 1087 #ifdef CONFIG_PM_SLEEP 1088 1089 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1090 { 1091 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1092 struct platform_device *pdev = to_platform_device(dev); 1093 int ret = 0; 1094 1095 if (dev->driver && pdrv->suspend) 1096 ret = pdrv->suspend(pdev, mesg); 1097 1098 return ret; 1099 } 1100 1101 static int platform_legacy_resume(struct device *dev) 1102 { 1103 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1104 struct platform_device *pdev = to_platform_device(dev); 1105 int ret = 0; 1106 1107 if (dev->driver && pdrv->resume) 1108 ret = pdrv->resume(pdev); 1109 1110 return ret; 1111 } 1112 1113 #endif /* CONFIG_PM_SLEEP */ 1114 1115 #ifdef CONFIG_SUSPEND 1116 1117 int platform_pm_suspend(struct device *dev) 1118 { 1119 struct device_driver *drv = dev->driver; 1120 int ret = 0; 1121 1122 if (!drv) 1123 return 0; 1124 1125 if (drv->pm) { 1126 if (drv->pm->suspend) 1127 ret = drv->pm->suspend(dev); 1128 } else { 1129 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1130 } 1131 1132 return ret; 1133 } 1134 1135 int platform_pm_resume(struct device *dev) 1136 { 1137 struct device_driver *drv = dev->driver; 1138 int ret = 0; 1139 1140 if (!drv) 1141 return 0; 1142 1143 if (drv->pm) { 1144 if (drv->pm->resume) 1145 ret = drv->pm->resume(dev); 1146 } else { 1147 ret = platform_legacy_resume(dev); 1148 } 1149 1150 return ret; 1151 } 1152 1153 #endif /* CONFIG_SUSPEND */ 1154 1155 #ifdef CONFIG_HIBERNATE_CALLBACKS 1156 1157 int platform_pm_freeze(struct device *dev) 1158 { 1159 struct device_driver *drv = dev->driver; 1160 int ret = 0; 1161 1162 if (!drv) 1163 return 0; 1164 1165 if (drv->pm) { 1166 if (drv->pm->freeze) 1167 ret = drv->pm->freeze(dev); 1168 } else { 1169 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1170 } 1171 1172 return ret; 1173 } 1174 1175 int platform_pm_thaw(struct device *dev) 1176 { 1177 struct device_driver *drv = dev->driver; 1178 int ret = 0; 1179 1180 if (!drv) 1181 return 0; 1182 1183 if (drv->pm) { 1184 if (drv->pm->thaw) 1185 ret = drv->pm->thaw(dev); 1186 } else { 1187 ret = platform_legacy_resume(dev); 1188 } 1189 1190 return ret; 1191 } 1192 1193 int platform_pm_poweroff(struct device *dev) 1194 { 1195 struct device_driver *drv = dev->driver; 1196 int ret = 0; 1197 1198 if (!drv) 1199 return 0; 1200 1201 if (drv->pm) { 1202 if (drv->pm->poweroff) 1203 ret = drv->pm->poweroff(dev); 1204 } else { 1205 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1206 } 1207 1208 return ret; 1209 } 1210 1211 int platform_pm_restore(struct device *dev) 1212 { 1213 struct device_driver *drv = dev->driver; 1214 int ret = 0; 1215 1216 if (!drv) 1217 return 0; 1218 1219 if (drv->pm) { 1220 if (drv->pm->restore) 1221 ret = drv->pm->restore(dev); 1222 } else { 1223 ret = platform_legacy_resume(dev); 1224 } 1225 1226 return ret; 1227 } 1228 1229 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1230 1231 /* modalias support enables more hands-off userspace setup: 1232 * (a) environment variable lets new-style hotplug events work once system is 1233 * fully running: "modprobe $MODALIAS" 1234 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 1235 * mishandled before system is fully running: "modprobe $(cat modalias)" 1236 */ 1237 static ssize_t modalias_show(struct device *dev, 1238 struct device_attribute *attr, char *buf) 1239 { 1240 struct platform_device *pdev = to_platform_device(dev); 1241 int len; 1242 1243 len = of_device_modalias(dev, buf, PAGE_SIZE); 1244 if (len != -ENODEV) 1245 return len; 1246 1247 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 1248 if (len != -ENODEV) 1249 return len; 1250 1251 return sysfs_emit(buf, "platform:%s\n", pdev->name); 1252 } 1253 static DEVICE_ATTR_RO(modalias); 1254 1255 static ssize_t numa_node_show(struct device *dev, 1256 struct device_attribute *attr, char *buf) 1257 { 1258 return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 1259 } 1260 static DEVICE_ATTR_RO(numa_node); 1261 1262 static ssize_t driver_override_show(struct device *dev, 1263 struct device_attribute *attr, char *buf) 1264 { 1265 struct platform_device *pdev = to_platform_device(dev); 1266 ssize_t len; 1267 1268 device_lock(dev); 1269 len = sysfs_emit(buf, "%s\n", pdev->driver_override); 1270 device_unlock(dev); 1271 1272 return len; 1273 } 1274 1275 static ssize_t driver_override_store(struct device *dev, 1276 struct device_attribute *attr, 1277 const char *buf, size_t count) 1278 { 1279 struct platform_device *pdev = to_platform_device(dev); 1280 char *driver_override, *old, *cp; 1281 1282 /* We need to keep extra room for a newline */ 1283 if (count >= (PAGE_SIZE - 1)) 1284 return -EINVAL; 1285 1286 driver_override = kstrndup(buf, count, GFP_KERNEL); 1287 if (!driver_override) 1288 return -ENOMEM; 1289 1290 cp = strchr(driver_override, '\n'); 1291 if (cp) 1292 *cp = '\0'; 1293 1294 device_lock(dev); 1295 old = pdev->driver_override; 1296 if (strlen(driver_override)) { 1297 pdev->driver_override = driver_override; 1298 } else { 1299 kfree(driver_override); 1300 pdev->driver_override = NULL; 1301 } 1302 device_unlock(dev); 1303 1304 kfree(old); 1305 1306 return count; 1307 } 1308 static DEVICE_ATTR_RW(driver_override); 1309 1310 static struct attribute *platform_dev_attrs[] = { 1311 &dev_attr_modalias.attr, 1312 &dev_attr_numa_node.attr, 1313 &dev_attr_driver_override.attr, 1314 NULL, 1315 }; 1316 1317 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, 1318 int n) 1319 { 1320 struct device *dev = container_of(kobj, typeof(*dev), kobj); 1321 1322 if (a == &dev_attr_numa_node.attr && 1323 dev_to_node(dev) == NUMA_NO_NODE) 1324 return 0; 1325 1326 return a->mode; 1327 } 1328 1329 static const struct attribute_group platform_dev_group = { 1330 .attrs = platform_dev_attrs, 1331 .is_visible = platform_dev_attrs_visible, 1332 }; 1333 __ATTRIBUTE_GROUPS(platform_dev); 1334 1335 1336 /** 1337 * platform_match - bind platform device to platform driver. 1338 * @dev: device. 1339 * @drv: driver. 1340 * 1341 * Platform device IDs are assumed to be encoded like this: 1342 * "<name><instance>", where <name> is a short description of the type of 1343 * device, like "pci" or "floppy", and <instance> is the enumerated 1344 * instance of the device, like '0' or '42'. Driver IDs are simply 1345 * "<name>". So, extract the <name> from the platform_device structure, 1346 * and compare it against the name of the driver. Return whether they match 1347 * or not. 1348 */ 1349 static int platform_match(struct device *dev, struct device_driver *drv) 1350 { 1351 struct platform_device *pdev = to_platform_device(dev); 1352 struct platform_driver *pdrv = to_platform_driver(drv); 1353 1354 /* When driver_override is set, only bind to the matching driver */ 1355 if (pdev->driver_override) 1356 return !strcmp(pdev->driver_override, drv->name); 1357 1358 /* Attempt an OF style match first */ 1359 if (of_driver_match_device(dev, drv)) 1360 return 1; 1361 1362 /* Then try ACPI style match */ 1363 if (acpi_driver_match_device(dev, drv)) 1364 return 1; 1365 1366 /* Then try to match against the id table */ 1367 if (pdrv->id_table) 1368 return platform_match_id(pdrv->id_table, pdev) != NULL; 1369 1370 /* fall-back to driver name match */ 1371 return (strcmp(pdev->name, drv->name) == 0); 1372 } 1373 1374 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) 1375 { 1376 struct platform_device *pdev = to_platform_device(dev); 1377 int rc; 1378 1379 /* Some devices have extra OF data and an OF-style MODALIAS */ 1380 rc = of_device_uevent_modalias(dev, env); 1381 if (rc != -ENODEV) 1382 return rc; 1383 1384 rc = acpi_device_uevent_modalias(dev, env); 1385 if (rc != -ENODEV) 1386 return rc; 1387 1388 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1389 pdev->name); 1390 return 0; 1391 } 1392 1393 static int platform_probe(struct device *_dev) 1394 { 1395 struct platform_driver *drv = to_platform_driver(_dev->driver); 1396 struct platform_device *dev = to_platform_device(_dev); 1397 int ret; 1398 1399 /* 1400 * A driver registered using platform_driver_probe() cannot be bound 1401 * again later because the probe function usually lives in __init code 1402 * and so is gone. For these drivers .probe is set to 1403 * platform_probe_fail in __platform_driver_probe(). Don't even prepare 1404 * clocks and PM domains for these to match the traditional behaviour. 1405 */ 1406 if (unlikely(drv->probe == platform_probe_fail)) 1407 return -ENXIO; 1408 1409 ret = of_clk_set_defaults(_dev->of_node, false); 1410 if (ret < 0) 1411 return ret; 1412 1413 ret = dev_pm_domain_attach(_dev, true); 1414 if (ret) 1415 goto out; 1416 1417 if (drv->probe) { 1418 ret = drv->probe(dev); 1419 if (ret) 1420 dev_pm_domain_detach(_dev, true); 1421 } 1422 1423 out: 1424 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 1425 dev_warn(_dev, "probe deferral not supported\n"); 1426 ret = -ENXIO; 1427 } 1428 1429 return ret; 1430 } 1431 1432 static void platform_remove(struct device *_dev) 1433 { 1434 struct platform_driver *drv = to_platform_driver(_dev->driver); 1435 struct platform_device *dev = to_platform_device(_dev); 1436 1437 if (drv->remove) { 1438 int ret = drv->remove(dev); 1439 1440 if (ret) 1441 dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n"); 1442 } 1443 dev_pm_domain_detach(_dev, true); 1444 } 1445 1446 static void platform_shutdown(struct device *_dev) 1447 { 1448 struct platform_device *dev = to_platform_device(_dev); 1449 struct platform_driver *drv; 1450 1451 if (!_dev->driver) 1452 return; 1453 1454 drv = to_platform_driver(_dev->driver); 1455 if (drv->shutdown) 1456 drv->shutdown(dev); 1457 } 1458 1459 static int platform_dma_configure(struct device *dev) 1460 { 1461 struct platform_driver *drv = to_platform_driver(dev->driver); 1462 enum dev_dma_attr attr; 1463 int ret = 0; 1464 1465 if (dev->of_node) { 1466 ret = of_dma_configure(dev, dev->of_node, true); 1467 } else if (has_acpi_companion(dev)) { 1468 attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); 1469 ret = acpi_dma_configure(dev, attr); 1470 } 1471 1472 if (!ret && !drv->driver_managed_dma) { 1473 ret = iommu_device_use_default_domain(dev); 1474 if (ret) 1475 arch_teardown_dma_ops(dev); 1476 } 1477 1478 return ret; 1479 } 1480 1481 static void platform_dma_cleanup(struct device *dev) 1482 { 1483 struct platform_driver *drv = to_platform_driver(dev->driver); 1484 1485 if (!drv->driver_managed_dma) 1486 iommu_device_unuse_default_domain(dev); 1487 } 1488 1489 static const struct dev_pm_ops platform_dev_pm_ops = { 1490 SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) 1491 USE_PLATFORM_PM_SLEEP_OPS 1492 }; 1493 1494 struct bus_type platform_bus_type = { 1495 .name = "platform", 1496 .dev_groups = platform_dev_groups, 1497 .match = platform_match, 1498 .uevent = platform_uevent, 1499 .probe = platform_probe, 1500 .remove = platform_remove, 1501 .shutdown = platform_shutdown, 1502 .dma_configure = platform_dma_configure, 1503 .dma_cleanup = platform_dma_cleanup, 1504 .pm = &platform_dev_pm_ops, 1505 }; 1506 EXPORT_SYMBOL_GPL(platform_bus_type); 1507 1508 static inline int __platform_match(struct device *dev, const void *drv) 1509 { 1510 return platform_match(dev, (struct device_driver *)drv); 1511 } 1512 1513 /** 1514 * platform_find_device_by_driver - Find a platform device with a given 1515 * driver. 1516 * @start: The device to start the search from. 1517 * @drv: The device driver to look for. 1518 */ 1519 struct device *platform_find_device_by_driver(struct device *start, 1520 const struct device_driver *drv) 1521 { 1522 return bus_find_device(&platform_bus_type, start, drv, 1523 __platform_match); 1524 } 1525 EXPORT_SYMBOL_GPL(platform_find_device_by_driver); 1526 1527 void __weak __init early_platform_cleanup(void) { } 1528 1529 int __init platform_bus_init(void) 1530 { 1531 int error; 1532 1533 early_platform_cleanup(); 1534 1535 error = device_register(&platform_bus); 1536 if (error) { 1537 put_device(&platform_bus); 1538 return error; 1539 } 1540 error = bus_register(&platform_bus_type); 1541 if (error) 1542 device_unregister(&platform_bus); 1543 of_platform_register_reconfig_notifier(); 1544 return error; 1545 } 1546