1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * platform.c - platform 'pseudo' bus for legacy devices 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * 8 * Please see Documentation/driver-api/driver-model/platform.rst for more 9 * information. 10 */ 11 12 #include <linux/string.h> 13 #include <linux/platform_device.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/ioport.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/memblock.h> 22 #include <linux/err.h> 23 #include <linux/slab.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_domain.h> 26 #include <linux/idr.h> 27 #include <linux/acpi.h> 28 #include <linux/clk/clk-conf.h> 29 #include <linux/limits.h> 30 #include <linux/property.h> 31 #include <linux/kmemleak.h> 32 #include <linux/types.h> 33 #include <linux/iommu.h> 34 #include <linux/dma-map-ops.h> 35 36 #include "base.h" 37 #include "power/power.h" 38 39 /* For automatically allocated device IDs */ 40 static DEFINE_IDA(platform_devid_ida); 41 42 struct device platform_bus = { 43 .init_name = "platform", 44 }; 45 EXPORT_SYMBOL_GPL(platform_bus); 46 47 /** 48 * platform_get_resource - get a resource for a device 49 * @dev: platform device 50 * @type: resource type 51 * @num: resource index 52 * 53 * Return: a pointer to the resource or NULL on failure. 54 */ 55 struct resource *platform_get_resource(struct platform_device *dev, 56 unsigned int type, unsigned int num) 57 { 58 u32 i; 59 60 for (i = 0; i < dev->num_resources; i++) { 61 struct resource *r = &dev->resource[i]; 62 63 if (type == resource_type(r) && num-- == 0) 64 return r; 65 } 66 return NULL; 67 } 68 EXPORT_SYMBOL_GPL(platform_get_resource); 69 70 struct resource *platform_get_mem_or_io(struct platform_device *dev, 71 unsigned int num) 72 { 73 u32 i; 74 75 for (i = 0; i < dev->num_resources; i++) { 76 struct resource *r = &dev->resource[i]; 77 78 if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) 79 return r; 80 } 81 return NULL; 82 } 83 EXPORT_SYMBOL_GPL(platform_get_mem_or_io); 84 85 #ifdef CONFIG_HAS_IOMEM 86 /** 87 * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a 88 * platform device and get resource 89 * 90 * @pdev: platform device to use both for memory resource lookup as well as 91 * resource management 92 * @index: resource index 93 * @res: optional output parameter to store a pointer to the obtained resource. 94 * 95 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 96 * on failure. 97 */ 98 void __iomem * 99 devm_platform_get_and_ioremap_resource(struct platform_device *pdev, 100 unsigned int index, struct resource **res) 101 { 102 struct resource *r; 103 104 r = platform_get_resource(pdev, IORESOURCE_MEM, index); 105 if (res) 106 *res = r; 107 return devm_ioremap_resource(&pdev->dev, r); 108 } 109 EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); 110 111 /** 112 * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform 113 * device 114 * 115 * @pdev: platform device to use both for memory resource lookup as well as 116 * resource management 117 * @index: resource index 118 * 119 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 120 * on failure. 121 */ 122 void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, 123 unsigned int index) 124 { 125 return devm_platform_get_and_ioremap_resource(pdev, index, NULL); 126 } 127 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); 128 129 /** 130 * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for 131 * a platform device, retrieve the 132 * resource by name 133 * 134 * @pdev: platform device to use both for memory resource lookup as well as 135 * resource management 136 * @name: name of the resource 137 * 138 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code 139 * on failure. 140 */ 141 void __iomem * 142 devm_platform_ioremap_resource_byname(struct platform_device *pdev, 143 const char *name) 144 { 145 struct resource *res; 146 147 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 148 return devm_ioremap_resource(&pdev->dev, res); 149 } 150 EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); 151 #endif /* CONFIG_HAS_IOMEM */ 152 153 static const struct cpumask *get_irq_affinity(struct platform_device *dev, 154 unsigned int num) 155 { 156 const struct cpumask *mask = NULL; 157 #ifndef CONFIG_SPARC 158 struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); 159 160 if (is_of_node(fwnode)) 161 mask = of_irq_get_affinity(to_of_node(fwnode), num); 162 else if (is_acpi_device_node(fwnode)) 163 mask = acpi_irq_get_affinity(ACPI_HANDLE_FWNODE(fwnode), num); 164 #endif 165 166 return mask ?: cpu_possible_mask; 167 } 168 169 /** 170 * platform_get_irq_affinity - get an optional IRQ and its affinity for a device 171 * @dev: platform device 172 * @num: interrupt number index 173 * @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt 174 * 175 * Gets an interupt for a platform device. Device drivers should check the 176 * return value for errors so as to not pass a negative integer value to 177 * the request_irq() APIs. Optional affinity information is provided in the 178 * affinity pointer if available, and NULL otherwise. 179 * 180 * Return: non-zero interrupt number on success, negative error number on failure. 181 */ 182 int platform_get_irq_affinity(struct platform_device *dev, unsigned int num, 183 const struct cpumask **affinity) 184 { 185 int ret; 186 #ifdef CONFIG_SPARC 187 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 188 if (!dev || num >= dev->archdata.num_irqs) 189 goto out_not_found; 190 ret = dev->archdata.irqs[num]; 191 goto out; 192 #else 193 struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); 194 struct resource *r; 195 196 if (is_of_node(fwnode)) { 197 ret = of_irq_get(to_of_node(fwnode), num); 198 if (ret > 0 || ret == -EPROBE_DEFER) 199 goto out; 200 } 201 202 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 203 if (is_acpi_device_node(fwnode)) { 204 if (r && r->flags & IORESOURCE_DISABLED) { 205 ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), num, r); 206 if (ret) 207 goto out; 208 } 209 } 210 211 /* 212 * The resources may pass trigger flags to the irqs that need 213 * to be set up. It so happens that the trigger flags for 214 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* 215 * settings. 216 */ 217 if (r && r->flags & IORESOURCE_BITS) { 218 struct irq_data *irqd; 219 220 irqd = irq_get_irq_data(r->start); 221 if (!irqd) 222 goto out_not_found; 223 irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); 224 } 225 226 if (r) { 227 ret = r->start; 228 goto out; 229 } 230 231 /* 232 * For the index 0 interrupt, allow falling back to GpioInt 233 * resources. While a device could have both Interrupt and GpioInt 234 * resources, making this fallback ambiguous, in many common cases 235 * the device will only expose one IRQ, and this fallback 236 * allows a common code path across either kind of resource. 237 */ 238 if (num == 0 && is_acpi_device_node(fwnode)) { 239 ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), num); 240 /* Our callers expect -ENXIO for missing IRQs. */ 241 if (ret >= 0 || ret == -EPROBE_DEFER) 242 goto out; 243 } 244 245 #endif 246 out_not_found: 247 ret = -ENXIO; 248 out: 249 if (WARN(!ret, "0 is an invalid IRQ number\n")) 250 return -EINVAL; 251 252 if (ret > 0 && affinity) 253 *affinity = get_irq_affinity(dev, num); 254 255 return ret; 256 } 257 EXPORT_SYMBOL_GPL(platform_get_irq_affinity); 258 259 /** 260 * platform_get_irq_optional - get an optional interrupt for a device 261 * @dev: platform device 262 * @num: interrupt number index 263 * 264 * Gets an interrupt for a platform device. Device drivers should check the 265 * return value for errors so as to not pass a negative integer value to 266 * the request_irq() APIs. This is the same as platform_get_irq(), except 267 * that it does not print an error message if an interrupt can not be 268 * obtained. 269 * 270 * For example:: 271 * 272 * int irq = platform_get_irq_optional(pdev, 0); 273 * if (irq < 0) 274 * return irq; 275 * 276 * Return: non-zero interrupt number on success, negative error number on failure. 277 */ 278 int platform_get_irq_optional(struct platform_device *dev, unsigned int num) 279 { 280 return platform_get_irq_affinity(dev, num, NULL); 281 } 282 EXPORT_SYMBOL_GPL(platform_get_irq_optional); 283 284 /** 285 * platform_get_irq - get an IRQ for a device 286 * @dev: platform device 287 * @num: IRQ number index 288 * 289 * Gets an IRQ for a platform device and prints an error message if finding the 290 * IRQ fails. Device drivers should check the return value for errors so as to 291 * not pass a negative integer value to the request_irq() APIs. 292 * 293 * For example:: 294 * 295 * int irq = platform_get_irq(pdev, 0); 296 * if (irq < 0) 297 * return irq; 298 * 299 * Return: non-zero IRQ number on success, negative error number on failure. 300 */ 301 int platform_get_irq(struct platform_device *dev, unsigned int num) 302 { 303 int ret; 304 305 ret = platform_get_irq_optional(dev, num); 306 if (ret < 0) 307 return dev_err_probe(&dev->dev, ret, 308 "IRQ index %u not found\n", num); 309 310 return ret; 311 } 312 EXPORT_SYMBOL_GPL(platform_get_irq); 313 314 /** 315 * platform_irq_count - Count the number of IRQs a platform device uses 316 * @dev: platform device 317 * 318 * Return: Number of IRQs a platform device uses or EPROBE_DEFER 319 */ 320 int platform_irq_count(struct platform_device *dev) 321 { 322 int ret, nr = 0; 323 324 while ((ret = platform_get_irq_optional(dev, nr)) >= 0) 325 nr++; 326 327 if (ret == -EPROBE_DEFER) 328 return ret; 329 330 return nr; 331 } 332 EXPORT_SYMBOL_GPL(platform_irq_count); 333 334 struct irq_affinity_devres { 335 unsigned int count; 336 unsigned int irq[] __counted_by(count); 337 }; 338 339 static void platform_disable_acpi_irq(struct platform_device *pdev, int index) 340 { 341 struct resource *r; 342 343 r = platform_get_resource(pdev, IORESOURCE_IRQ, index); 344 if (r) 345 irqresource_disabled(r, 0); 346 } 347 348 static void devm_platform_get_irqs_affinity_release(struct device *dev, 349 void *res) 350 { 351 struct irq_affinity_devres *ptr = res; 352 int i; 353 354 for (i = 0; i < ptr->count; i++) { 355 irq_dispose_mapping(ptr->irq[i]); 356 357 if (is_acpi_device_node(dev_fwnode(dev))) 358 platform_disable_acpi_irq(to_platform_device(dev), i); 359 } 360 } 361 362 /** 363 * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a 364 * device using an interrupt affinity descriptor 365 * @dev: platform device pointer 366 * @affd: affinity descriptor 367 * @minvec: minimum count of interrupt vectors 368 * @maxvec: maximum count of interrupt vectors 369 * @irqs: pointer holder for IRQ numbers 370 * 371 * Gets a set of IRQs for a platform device, and updates IRQ afffinty according 372 * to the passed affinity descriptor 373 * 374 * Return: Number of vectors on success, negative error number on failure. 375 */ 376 int devm_platform_get_irqs_affinity(struct platform_device *dev, 377 struct irq_affinity *affd, 378 unsigned int minvec, 379 unsigned int maxvec, 380 int **irqs) 381 { 382 struct irq_affinity_devres *ptr; 383 struct irq_affinity_desc *desc; 384 size_t size; 385 int i, ret, nvec; 386 387 if (!affd) 388 return -EPERM; 389 390 if (maxvec < minvec) 391 return -ERANGE; 392 393 nvec = platform_irq_count(dev); 394 if (nvec < 0) 395 return nvec; 396 397 if (nvec < minvec) 398 return -ENOSPC; 399 400 nvec = irq_calc_affinity_vectors(minvec, nvec, affd); 401 if (nvec < minvec) 402 return -ENOSPC; 403 404 if (nvec > maxvec) 405 nvec = maxvec; 406 407 size = sizeof(*ptr) + sizeof(unsigned int) * nvec; 408 ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, 409 GFP_KERNEL); 410 if (!ptr) 411 return -ENOMEM; 412 413 ptr->count = nvec; 414 415 for (i = 0; i < nvec; i++) { 416 int irq = platform_get_irq(dev, i); 417 if (irq < 0) { 418 ret = irq; 419 goto err_free_devres; 420 } 421 ptr->irq[i] = irq; 422 } 423 424 desc = irq_create_affinity_masks(nvec, affd); 425 if (!desc) { 426 ret = -ENOMEM; 427 goto err_free_devres; 428 } 429 430 for (i = 0; i < nvec; i++) { 431 ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]); 432 if (ret) { 433 dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n", 434 ptr->irq[i], ret); 435 goto err_free_desc; 436 } 437 } 438 439 devres_add(&dev->dev, ptr); 440 441 kfree(desc); 442 443 *irqs = ptr->irq; 444 445 return nvec; 446 447 err_free_desc: 448 kfree(desc); 449 err_free_devres: 450 devres_free(ptr); 451 return ret; 452 } 453 EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); 454 455 /** 456 * platform_get_resource_byname - get a resource for a device by name 457 * @dev: platform device 458 * @type: resource type 459 * @name: resource name 460 */ 461 struct resource *platform_get_resource_byname(struct platform_device *dev, 462 unsigned int type, 463 const char *name) 464 { 465 u32 i; 466 467 for (i = 0; i < dev->num_resources; i++) { 468 struct resource *r = &dev->resource[i]; 469 470 if (unlikely(!r->name)) 471 continue; 472 473 if (type == resource_type(r) && !strcmp(r->name, name)) 474 return r; 475 } 476 return NULL; 477 } 478 EXPORT_SYMBOL_GPL(platform_get_resource_byname); 479 480 static int __platform_get_irq_byname(struct platform_device *dev, 481 const char *name) 482 { 483 struct resource *r; 484 int ret; 485 486 ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); 487 if (ret > 0 || ret == -EPROBE_DEFER) 488 return ret; 489 490 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 491 if (r) { 492 if (WARN(!r->start, "0 is an invalid IRQ number\n")) 493 return -EINVAL; 494 return r->start; 495 } 496 497 return -ENXIO; 498 } 499 500 /** 501 * platform_get_irq_byname - get an IRQ for a device by name 502 * @dev: platform device 503 * @name: IRQ name 504 * 505 * Get an IRQ like platform_get_irq(), but then by name rather then by index. 506 * 507 * Return: non-zero IRQ number on success, negative error number on failure. 508 */ 509 int platform_get_irq_byname(struct platform_device *dev, const char *name) 510 { 511 int ret; 512 513 ret = __platform_get_irq_byname(dev, name); 514 if (ret < 0) 515 return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n", 516 name); 517 return ret; 518 } 519 EXPORT_SYMBOL_GPL(platform_get_irq_byname); 520 521 /** 522 * platform_get_irq_byname_optional - get an optional IRQ for a device by name 523 * @dev: platform device 524 * @name: IRQ name 525 * 526 * Get an optional IRQ by name like platform_get_irq_byname(). Except that it 527 * does not print an error message if an IRQ can not be obtained. 528 * 529 * Return: non-zero IRQ number on success, negative error number on failure. 530 */ 531 int platform_get_irq_byname_optional(struct platform_device *dev, 532 const char *name) 533 { 534 return __platform_get_irq_byname(dev, name); 535 } 536 EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); 537 538 /** 539 * platform_add_devices - add a numbers of platform devices 540 * @devs: array of platform devices to add 541 * @num: number of platform devices in array 542 * 543 * Return: 0 on success, negative error number on failure. 544 */ 545 int platform_add_devices(struct platform_device **devs, int num) 546 { 547 int i, ret = 0; 548 549 for (i = 0; i < num; i++) { 550 ret = platform_device_register(devs[i]); 551 if (ret) { 552 while (--i >= 0) 553 platform_device_unregister(devs[i]); 554 break; 555 } 556 } 557 558 return ret; 559 } 560 EXPORT_SYMBOL_GPL(platform_add_devices); 561 562 struct platform_object { 563 struct platform_device pdev; 564 char name[]; 565 }; 566 567 /* 568 * Set up default DMA mask for platform devices if the they weren't 569 * previously set by the architecture / DT. 570 */ 571 static void setup_pdev_dma_masks(struct platform_device *pdev) 572 { 573 pdev->dev.dma_parms = &pdev->dma_parms; 574 575 if (!pdev->dev.coherent_dma_mask) 576 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 577 if (!pdev->dev.dma_mask) { 578 pdev->platform_dma_mask = DMA_BIT_MASK(32); 579 pdev->dev.dma_mask = &pdev->platform_dma_mask; 580 } 581 }; 582 583 /** 584 * platform_device_put - destroy a platform device 585 * @pdev: platform device to free 586 * 587 * Free all memory associated with a platform device. This function must 588 * _only_ be externally called in error cases. All other usage is a bug. 589 */ 590 void platform_device_put(struct platform_device *pdev) 591 { 592 if (!IS_ERR_OR_NULL(pdev)) 593 put_device(&pdev->dev); 594 } 595 EXPORT_SYMBOL_GPL(platform_device_put); 596 597 static void platform_device_release(struct device *dev) 598 { 599 struct platform_object *pa = container_of(dev, struct platform_object, 600 pdev.dev); 601 602 of_node_put(pa->pdev.dev.of_node); 603 kfree(pa->pdev.dev.platform_data); 604 kfree(pa->pdev.mfd_cell); 605 kfree(pa->pdev.resource); 606 kfree(pa->pdev.driver_override); 607 kfree(pa); 608 } 609 610 /** 611 * platform_device_alloc - create a platform device 612 * @name: base name of the device we're adding 613 * @id: instance id 614 * 615 * Create a platform device object which can have other objects attached 616 * to it, and which will have attached objects freed when it is released. 617 */ 618 struct platform_device *platform_device_alloc(const char *name, int id) 619 { 620 struct platform_object *pa; 621 622 pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); 623 if (pa) { 624 strcpy(pa->name, name); 625 pa->pdev.name = pa->name; 626 pa->pdev.id = id; 627 device_initialize(&pa->pdev.dev); 628 pa->pdev.dev.release = platform_device_release; 629 setup_pdev_dma_masks(&pa->pdev); 630 } 631 632 return pa ? &pa->pdev : NULL; 633 } 634 EXPORT_SYMBOL_GPL(platform_device_alloc); 635 636 /** 637 * platform_device_add_resources - add resources to a platform device 638 * @pdev: platform device allocated by platform_device_alloc to add resources to 639 * @res: set of resources that needs to be allocated for the device 640 * @num: number of resources 641 * 642 * Add a copy of the resources to the platform device. The memory 643 * associated with the resources will be freed when the platform device is 644 * released. 645 */ 646 int platform_device_add_resources(struct platform_device *pdev, 647 const struct resource *res, unsigned int num) 648 { 649 struct resource *r = NULL; 650 651 if (res) { 652 r = kmemdup_array(res, num, sizeof(*r), GFP_KERNEL); 653 if (!r) 654 return -ENOMEM; 655 } 656 657 kfree(pdev->resource); 658 pdev->resource = r; 659 pdev->num_resources = num; 660 return 0; 661 } 662 EXPORT_SYMBOL_GPL(platform_device_add_resources); 663 664 /** 665 * platform_device_add_data - add platform-specific data to a platform device 666 * @pdev: platform device allocated by platform_device_alloc to add resources to 667 * @data: platform specific data for this platform device 668 * @size: size of platform specific data 669 * 670 * Add a copy of platform specific data to the platform device's 671 * platform_data pointer. The memory associated with the platform data 672 * will be freed when the platform device is released. 673 */ 674 int platform_device_add_data(struct platform_device *pdev, const void *data, 675 size_t size) 676 { 677 void *d = NULL; 678 679 if (data) { 680 d = kmemdup(data, size, GFP_KERNEL); 681 if (!d) 682 return -ENOMEM; 683 } 684 685 kfree(pdev->dev.platform_data); 686 pdev->dev.platform_data = d; 687 return 0; 688 } 689 EXPORT_SYMBOL_GPL(platform_device_add_data); 690 691 /** 692 * platform_device_add - add a platform device to device hierarchy 693 * @pdev: platform device we're adding 694 * 695 * This is part 2 of platform_device_register(), though may be called 696 * separately _iff_ pdev was allocated by platform_device_alloc(). 697 */ 698 int platform_device_add(struct platform_device *pdev) 699 { 700 struct device *dev = &pdev->dev; 701 u32 i; 702 int ret; 703 704 if (!dev->parent) 705 dev->parent = &platform_bus; 706 707 dev->bus = &platform_bus_type; 708 709 switch (pdev->id) { 710 default: 711 dev_set_name(dev, "%s.%d", pdev->name, pdev->id); 712 break; 713 case PLATFORM_DEVID_NONE: 714 dev_set_name(dev, "%s", pdev->name); 715 break; 716 case PLATFORM_DEVID_AUTO: 717 /* 718 * Automatically allocated device ID. We mark it as such so 719 * that we remember it must be freed, and we append a suffix 720 * to avoid namespace collision with explicit IDs. 721 */ 722 ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); 723 if (ret < 0) 724 return ret; 725 pdev->id = ret; 726 pdev->id_auto = true; 727 dev_set_name(dev, "%s.%d.auto", pdev->name, pdev->id); 728 break; 729 } 730 731 for (i = 0; i < pdev->num_resources; i++) { 732 struct resource *p, *r = &pdev->resource[i]; 733 734 if (r->name == NULL) 735 r->name = dev_name(dev); 736 737 p = r->parent; 738 if (!p) { 739 if (resource_type(r) == IORESOURCE_MEM) 740 p = &iomem_resource; 741 else if (resource_type(r) == IORESOURCE_IO) 742 p = &ioport_resource; 743 } 744 745 if (p) { 746 ret = insert_resource(p, r); 747 if (ret) { 748 dev_err(dev, "failed to claim resource %d: %pR\n", i, r); 749 goto failed; 750 } 751 } 752 } 753 754 pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(dev), 755 dev_name(dev->parent)); 756 757 ret = device_add(dev); 758 if (ret) 759 goto failed; 760 761 return 0; 762 763 failed: 764 if (pdev->id_auto) { 765 ida_free(&platform_devid_ida, pdev->id); 766 pdev->id = PLATFORM_DEVID_AUTO; 767 } 768 769 while (i--) { 770 struct resource *r = &pdev->resource[i]; 771 if (r->parent) 772 release_resource(r); 773 } 774 775 return ret; 776 } 777 EXPORT_SYMBOL_GPL(platform_device_add); 778 779 /** 780 * platform_device_del - remove a platform-level device 781 * @pdev: platform device we're removing 782 * 783 * Note that this function will also release all memory- and port-based 784 * resources owned by the device (@dev->resource). This function must 785 * _only_ be externally called in error cases. All other usage is a bug. 786 */ 787 void platform_device_del(struct platform_device *pdev) 788 { 789 u32 i; 790 791 if (!IS_ERR_OR_NULL(pdev)) { 792 device_del(&pdev->dev); 793 794 if (pdev->id_auto) { 795 ida_free(&platform_devid_ida, pdev->id); 796 pdev->id = PLATFORM_DEVID_AUTO; 797 } 798 799 for (i = 0; i < pdev->num_resources; i++) { 800 struct resource *r = &pdev->resource[i]; 801 if (r->parent) 802 release_resource(r); 803 } 804 } 805 } 806 EXPORT_SYMBOL_GPL(platform_device_del); 807 808 /** 809 * platform_device_register - add a platform-level device 810 * @pdev: platform device we're adding 811 * 812 * NOTE: _Never_ directly free @pdev after calling this function, even if it 813 * returned an error! Always use platform_device_put() to give up the 814 * reference initialised in this function instead. 815 */ 816 int platform_device_register(struct platform_device *pdev) 817 { 818 device_initialize(&pdev->dev); 819 setup_pdev_dma_masks(pdev); 820 return platform_device_add(pdev); 821 } 822 EXPORT_SYMBOL_GPL(platform_device_register); 823 824 /** 825 * platform_device_unregister - unregister a platform-level device 826 * @pdev: platform device we're unregistering 827 * 828 * Unregistration is done in 2 steps. First we release all resources 829 * and remove it from the subsystem, then we drop reference count by 830 * calling platform_device_put(). 831 */ 832 void platform_device_unregister(struct platform_device *pdev) 833 { 834 platform_device_del(pdev); 835 platform_device_put(pdev); 836 } 837 EXPORT_SYMBOL_GPL(platform_device_unregister); 838 839 /** 840 * platform_device_register_full - add a platform-level device with 841 * resources and platform-specific data 842 * 843 * @pdevinfo: data used to create device 844 * 845 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 846 */ 847 struct platform_device *platform_device_register_full( 848 const struct platform_device_info *pdevinfo) 849 { 850 int ret; 851 struct platform_device *pdev; 852 853 pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); 854 if (!pdev) 855 return ERR_PTR(-ENOMEM); 856 857 pdev->dev.parent = pdevinfo->parent; 858 pdev->dev.fwnode = pdevinfo->fwnode; 859 pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); 860 pdev->dev.of_node_reused = pdevinfo->of_node_reused; 861 862 if (pdevinfo->dma_mask) { 863 pdev->platform_dma_mask = pdevinfo->dma_mask; 864 pdev->dev.dma_mask = &pdev->platform_dma_mask; 865 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; 866 } 867 868 ret = platform_device_add_resources(pdev, 869 pdevinfo->res, pdevinfo->num_res); 870 if (ret) 871 goto err; 872 873 ret = platform_device_add_data(pdev, 874 pdevinfo->data, pdevinfo->size_data); 875 if (ret) 876 goto err; 877 878 if (pdevinfo->properties) { 879 ret = device_create_managed_software_node(&pdev->dev, 880 pdevinfo->properties, NULL); 881 if (ret) 882 goto err; 883 } 884 885 ret = platform_device_add(pdev); 886 if (ret) { 887 err: 888 ACPI_COMPANION_SET(&pdev->dev, NULL); 889 platform_device_put(pdev); 890 return ERR_PTR(ret); 891 } 892 893 return pdev; 894 } 895 EXPORT_SYMBOL_GPL(platform_device_register_full); 896 897 /** 898 * __platform_driver_register - register a driver for platform-level devices 899 * @drv: platform driver structure 900 * @owner: owning module/driver 901 */ 902 int __platform_driver_register(struct platform_driver *drv, 903 struct module *owner) 904 { 905 drv->driver.owner = owner; 906 drv->driver.bus = &platform_bus_type; 907 908 return driver_register(&drv->driver); 909 } 910 EXPORT_SYMBOL_GPL(__platform_driver_register); 911 912 /** 913 * platform_driver_unregister - unregister a driver for platform-level devices 914 * @drv: platform driver structure 915 */ 916 void platform_driver_unregister(struct platform_driver *drv) 917 { 918 driver_unregister(&drv->driver); 919 } 920 EXPORT_SYMBOL_GPL(platform_driver_unregister); 921 922 static int platform_probe_fail(struct platform_device *pdev) 923 { 924 return -ENXIO; 925 } 926 927 static int is_bound_to_driver(struct device *dev, void *driver) 928 { 929 if (dev->driver == driver) 930 return 1; 931 return 0; 932 } 933 934 /** 935 * __platform_driver_probe - register driver for non-hotpluggable device 936 * @drv: platform driver structure 937 * @probe: the driver probe routine, probably from an __init section 938 * @module: module which will be the owner of the driver 939 * 940 * Use this instead of platform_driver_register() when you know the device 941 * is not hotpluggable and has already been registered, and you want to 942 * remove its run-once probe() infrastructure from memory after the driver 943 * has bound to the device. 944 * 945 * One typical use for this would be with drivers for controllers integrated 946 * into system-on-chip processors, where the controller devices have been 947 * configured as part of board setup. 948 * 949 * Note that this is incompatible with deferred probing. 950 * 951 * Returns zero if the driver registered and bound to a device, else returns 952 * a negative error code and with the driver not registered. 953 */ 954 int __init_or_module __platform_driver_probe(struct platform_driver *drv, 955 int (*probe)(struct platform_device *), struct module *module) 956 { 957 int retval; 958 959 if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { 960 pr_err("%s: drivers registered with %s can not be probed asynchronously\n", 961 drv->driver.name, __func__); 962 return -EINVAL; 963 } 964 965 /* 966 * We have to run our probes synchronously because we check if 967 * we find any devices to bind to and exit with error if there 968 * are any. 969 */ 970 drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; 971 972 /* 973 * Prevent driver from requesting probe deferral to avoid further 974 * futile probe attempts. 975 */ 976 drv->prevent_deferred_probe = true; 977 978 /* make sure driver won't have bind/unbind attributes */ 979 drv->driver.suppress_bind_attrs = true; 980 981 /* temporary section violation during probe() */ 982 drv->probe = probe; 983 retval = __platform_driver_register(drv, module); 984 if (retval) 985 return retval; 986 987 /* Force all new probes of this driver to fail */ 988 drv->probe = platform_probe_fail; 989 990 /* Walk all platform devices and see if any actually bound to this driver. 991 * If not, return an error as the device should have done so by now. 992 */ 993 if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) { 994 retval = -ENODEV; 995 platform_driver_unregister(drv); 996 } 997 998 return retval; 999 } 1000 EXPORT_SYMBOL_GPL(__platform_driver_probe); 1001 1002 /** 1003 * __platform_create_bundle - register driver and create corresponding device 1004 * @driver: platform driver structure 1005 * @probe: the driver probe routine, probably from an __init section 1006 * @res: set of resources that needs to be allocated for the device 1007 * @n_res: number of resources 1008 * @data: platform specific data for this platform device 1009 * @size: size of platform specific data 1010 * @module: module which will be the owner of the driver 1011 * 1012 * Use this in legacy-style modules that probe hardware directly and 1013 * register a single platform device and corresponding platform driver. 1014 * 1015 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 1016 */ 1017 struct platform_device * __init_or_module __platform_create_bundle( 1018 struct platform_driver *driver, 1019 int (*probe)(struct platform_device *), 1020 struct resource *res, unsigned int n_res, 1021 const void *data, size_t size, struct module *module) 1022 { 1023 struct platform_device *pdev; 1024 int error; 1025 1026 pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE); 1027 if (!pdev) { 1028 error = -ENOMEM; 1029 goto err_out; 1030 } 1031 1032 error = platform_device_add_resources(pdev, res, n_res); 1033 if (error) 1034 goto err_pdev_put; 1035 1036 error = platform_device_add_data(pdev, data, size); 1037 if (error) 1038 goto err_pdev_put; 1039 1040 error = platform_device_add(pdev); 1041 if (error) 1042 goto err_pdev_put; 1043 1044 error = __platform_driver_probe(driver, probe, module); 1045 if (error) 1046 goto err_pdev_del; 1047 1048 return pdev; 1049 1050 err_pdev_del: 1051 platform_device_del(pdev); 1052 err_pdev_put: 1053 platform_device_put(pdev); 1054 err_out: 1055 return ERR_PTR(error); 1056 } 1057 EXPORT_SYMBOL_GPL(__platform_create_bundle); 1058 1059 /** 1060 * __platform_register_drivers - register an array of platform drivers 1061 * @drivers: an array of drivers to register 1062 * @count: the number of drivers to register 1063 * @owner: module owning the drivers 1064 * 1065 * Registers platform drivers specified by an array. On failure to register a 1066 * driver, all previously registered drivers will be unregistered. Callers of 1067 * this API should use platform_unregister_drivers() to unregister drivers in 1068 * the reverse order. 1069 * 1070 * Returns: 0 on success or a negative error code on failure. 1071 */ 1072 int __platform_register_drivers(struct platform_driver * const *drivers, 1073 unsigned int count, struct module *owner) 1074 { 1075 unsigned int i; 1076 int err; 1077 1078 for (i = 0; i < count; i++) { 1079 pr_debug("registering platform driver %ps\n", drivers[i]); 1080 1081 err = __platform_driver_register(drivers[i], owner); 1082 if (err < 0) { 1083 pr_err("failed to register platform driver %ps: %d\n", 1084 drivers[i], err); 1085 goto error; 1086 } 1087 } 1088 1089 return 0; 1090 1091 error: 1092 while (i--) { 1093 pr_debug("unregistering platform driver %ps\n", drivers[i]); 1094 platform_driver_unregister(drivers[i]); 1095 } 1096 1097 return err; 1098 } 1099 EXPORT_SYMBOL_GPL(__platform_register_drivers); 1100 1101 /** 1102 * platform_unregister_drivers - unregister an array of platform drivers 1103 * @drivers: an array of drivers to unregister 1104 * @count: the number of drivers to unregister 1105 * 1106 * Unregisters platform drivers specified by an array. This is typically used 1107 * to complement an earlier call to platform_register_drivers(). Drivers are 1108 * unregistered in the reverse order in which they were registered. 1109 */ 1110 void platform_unregister_drivers(struct platform_driver * const *drivers, 1111 unsigned int count) 1112 { 1113 while (count--) { 1114 pr_debug("unregistering platform driver %ps\n", drivers[count]); 1115 platform_driver_unregister(drivers[count]); 1116 } 1117 } 1118 EXPORT_SYMBOL_GPL(platform_unregister_drivers); 1119 1120 static const struct platform_device_id *platform_match_id( 1121 const struct platform_device_id *id, 1122 struct platform_device *pdev) 1123 { 1124 while (id->name[0]) { 1125 if (strcmp(pdev->name, id->name) == 0) { 1126 pdev->id_entry = id; 1127 return id; 1128 } 1129 id++; 1130 } 1131 return NULL; 1132 } 1133 1134 #ifdef CONFIG_PM_SLEEP 1135 1136 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 1137 { 1138 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1139 struct platform_device *pdev = to_platform_device(dev); 1140 int ret = 0; 1141 1142 if (dev->driver && pdrv->suspend) 1143 ret = pdrv->suspend(pdev, mesg); 1144 1145 return ret; 1146 } 1147 1148 static int platform_legacy_resume(struct device *dev) 1149 { 1150 struct platform_driver *pdrv = to_platform_driver(dev->driver); 1151 struct platform_device *pdev = to_platform_device(dev); 1152 int ret = 0; 1153 1154 if (dev->driver && pdrv->resume) 1155 ret = pdrv->resume(pdev); 1156 1157 return ret; 1158 } 1159 1160 #endif /* CONFIG_PM_SLEEP */ 1161 1162 #ifdef CONFIG_SUSPEND 1163 1164 int platform_pm_suspend(struct device *dev) 1165 { 1166 const struct device_driver *drv = dev->driver; 1167 int ret = 0; 1168 1169 if (!drv) 1170 return 0; 1171 1172 if (drv->pm) { 1173 if (drv->pm->suspend) 1174 ret = drv->pm->suspend(dev); 1175 } else { 1176 ret = platform_legacy_suspend(dev, PMSG_SUSPEND); 1177 } 1178 1179 return ret; 1180 } 1181 1182 int platform_pm_resume(struct device *dev) 1183 { 1184 const struct device_driver *drv = dev->driver; 1185 int ret = 0; 1186 1187 if (!drv) 1188 return 0; 1189 1190 if (drv->pm) { 1191 if (drv->pm->resume) 1192 ret = drv->pm->resume(dev); 1193 } else { 1194 ret = platform_legacy_resume(dev); 1195 } 1196 1197 return ret; 1198 } 1199 1200 #endif /* CONFIG_SUSPEND */ 1201 1202 #ifdef CONFIG_HIBERNATE_CALLBACKS 1203 1204 int platform_pm_freeze(struct device *dev) 1205 { 1206 const struct device_driver *drv = dev->driver; 1207 int ret = 0; 1208 1209 if (!drv) 1210 return 0; 1211 1212 if (drv->pm) { 1213 if (drv->pm->freeze) 1214 ret = drv->pm->freeze(dev); 1215 } else { 1216 ret = platform_legacy_suspend(dev, PMSG_FREEZE); 1217 } 1218 1219 return ret; 1220 } 1221 1222 int platform_pm_thaw(struct device *dev) 1223 { 1224 const struct device_driver *drv = dev->driver; 1225 int ret = 0; 1226 1227 if (!drv) 1228 return 0; 1229 1230 if (drv->pm) { 1231 if (drv->pm->thaw) 1232 ret = drv->pm->thaw(dev); 1233 } else { 1234 ret = platform_legacy_resume(dev); 1235 } 1236 1237 return ret; 1238 } 1239 1240 int platform_pm_poweroff(struct device *dev) 1241 { 1242 const struct device_driver *drv = dev->driver; 1243 int ret = 0; 1244 1245 if (!drv) 1246 return 0; 1247 1248 if (drv->pm) { 1249 if (drv->pm->poweroff) 1250 ret = drv->pm->poweroff(dev); 1251 } else { 1252 ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); 1253 } 1254 1255 return ret; 1256 } 1257 1258 int platform_pm_restore(struct device *dev) 1259 { 1260 const struct device_driver *drv = dev->driver; 1261 int ret = 0; 1262 1263 if (!drv) 1264 return 0; 1265 1266 if (drv->pm) { 1267 if (drv->pm->restore) 1268 ret = drv->pm->restore(dev); 1269 } else { 1270 ret = platform_legacy_resume(dev); 1271 } 1272 1273 return ret; 1274 } 1275 1276 #endif /* CONFIG_HIBERNATE_CALLBACKS */ 1277 1278 /* modalias support enables more hands-off userspace setup: 1279 * (a) environment variable lets new-style hotplug events work once system is 1280 * fully running: "modprobe $MODALIAS" 1281 * (b) sysfs attribute lets new-style coldplug recover from hotplug events 1282 * mishandled before system is fully running: "modprobe $(cat modalias)" 1283 */ 1284 static ssize_t modalias_show(struct device *dev, 1285 struct device_attribute *attr, char *buf) 1286 { 1287 struct platform_device *pdev = to_platform_device(dev); 1288 int len; 1289 1290 len = of_device_modalias(dev, buf, PAGE_SIZE); 1291 if (len != -ENODEV) 1292 return len; 1293 1294 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 1295 if (len != -ENODEV) 1296 return len; 1297 1298 return sysfs_emit(buf, "platform:%s\n", pdev->name); 1299 } 1300 static DEVICE_ATTR_RO(modalias); 1301 1302 static ssize_t numa_node_show(struct device *dev, 1303 struct device_attribute *attr, char *buf) 1304 { 1305 return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 1306 } 1307 static DEVICE_ATTR_RO(numa_node); 1308 1309 static ssize_t driver_override_show(struct device *dev, 1310 struct device_attribute *attr, char *buf) 1311 { 1312 struct platform_device *pdev = to_platform_device(dev); 1313 ssize_t len; 1314 1315 device_lock(dev); 1316 len = sysfs_emit(buf, "%s\n", pdev->driver_override); 1317 device_unlock(dev); 1318 1319 return len; 1320 } 1321 1322 static ssize_t driver_override_store(struct device *dev, 1323 struct device_attribute *attr, 1324 const char *buf, size_t count) 1325 { 1326 struct platform_device *pdev = to_platform_device(dev); 1327 int ret; 1328 1329 ret = driver_set_override(dev, &pdev->driver_override, buf, count); 1330 if (ret) 1331 return ret; 1332 1333 return count; 1334 } 1335 static DEVICE_ATTR_RW(driver_override); 1336 1337 static struct attribute *platform_dev_attrs[] = { 1338 &dev_attr_modalias.attr, 1339 &dev_attr_numa_node.attr, 1340 &dev_attr_driver_override.attr, 1341 NULL, 1342 }; 1343 1344 static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, 1345 int n) 1346 { 1347 struct device *dev = container_of(kobj, typeof(*dev), kobj); 1348 1349 if (a == &dev_attr_numa_node.attr && 1350 dev_to_node(dev) == NUMA_NO_NODE) 1351 return 0; 1352 1353 return a->mode; 1354 } 1355 1356 static const struct attribute_group platform_dev_group = { 1357 .attrs = platform_dev_attrs, 1358 .is_visible = platform_dev_attrs_visible, 1359 }; 1360 __ATTRIBUTE_GROUPS(platform_dev); 1361 1362 1363 /** 1364 * platform_match - bind platform device to platform driver. 1365 * @dev: device. 1366 * @drv: driver. 1367 * 1368 * Platform device IDs are assumed to be encoded like this: 1369 * "<name><instance>", where <name> is a short description of the type of 1370 * device, like "pci" or "floppy", and <instance> is the enumerated 1371 * instance of the device, like '0' or '42'. Driver IDs are simply 1372 * "<name>". So, extract the <name> from the platform_device structure, 1373 * and compare it against the name of the driver. Return whether they match 1374 * or not. 1375 */ 1376 static int platform_match(struct device *dev, const struct device_driver *drv) 1377 { 1378 struct platform_device *pdev = to_platform_device(dev); 1379 struct platform_driver *pdrv = to_platform_driver(drv); 1380 1381 /* When driver_override is set, only bind to the matching driver */ 1382 if (pdev->driver_override) 1383 return !strcmp(pdev->driver_override, drv->name); 1384 1385 /* Attempt an OF style match first */ 1386 if (of_driver_match_device(dev, drv)) 1387 return 1; 1388 1389 /* Then try ACPI style match */ 1390 if (acpi_driver_match_device(dev, drv)) 1391 return 1; 1392 1393 /* Then try to match against the id table */ 1394 if (pdrv->id_table) 1395 return platform_match_id(pdrv->id_table, pdev) != NULL; 1396 1397 /* fall-back to driver name match */ 1398 return (strcmp(pdev->name, drv->name) == 0); 1399 } 1400 1401 static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env) 1402 { 1403 const struct platform_device *pdev = to_platform_device(dev); 1404 int rc; 1405 1406 /* Some devices have extra OF data and an OF-style MODALIAS */ 1407 rc = of_device_uevent_modalias(dev, env); 1408 if (rc != -ENODEV) 1409 return rc; 1410 1411 rc = acpi_device_uevent_modalias(dev, env); 1412 if (rc != -ENODEV) 1413 return rc; 1414 1415 add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, 1416 pdev->name); 1417 return 0; 1418 } 1419 1420 static int platform_probe(struct device *_dev) 1421 { 1422 struct platform_driver *drv = to_platform_driver(_dev->driver); 1423 struct platform_device *dev = to_platform_device(_dev); 1424 int ret; 1425 1426 /* 1427 * A driver registered using platform_driver_probe() cannot be bound 1428 * again later because the probe function usually lives in __init code 1429 * and so is gone. For these drivers .probe is set to 1430 * platform_probe_fail in __platform_driver_probe(). Don't even prepare 1431 * clocks and PM domains for these to match the traditional behaviour. 1432 */ 1433 if (unlikely(drv->probe == platform_probe_fail)) 1434 return -ENXIO; 1435 1436 ret = of_clk_set_defaults(_dev->of_node, false); 1437 if (ret < 0) 1438 return ret; 1439 1440 ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON | 1441 PD_FLAG_DETACH_POWER_OFF); 1442 if (ret) 1443 goto out; 1444 1445 if (drv->probe) 1446 ret = drv->probe(dev); 1447 1448 out: 1449 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 1450 dev_warn(_dev, "probe deferral not supported\n"); 1451 ret = -ENXIO; 1452 } 1453 1454 return ret; 1455 } 1456 1457 static void platform_remove(struct device *_dev) 1458 { 1459 struct platform_driver *drv = to_platform_driver(_dev->driver); 1460 struct platform_device *dev = to_platform_device(_dev); 1461 1462 if (drv->remove) 1463 drv->remove(dev); 1464 } 1465 1466 static void platform_shutdown(struct device *_dev) 1467 { 1468 struct platform_device *dev = to_platform_device(_dev); 1469 struct platform_driver *drv; 1470 1471 if (!_dev->driver) 1472 return; 1473 1474 drv = to_platform_driver(_dev->driver); 1475 if (drv->shutdown) 1476 drv->shutdown(dev); 1477 } 1478 1479 static int platform_dma_configure(struct device *dev) 1480 { 1481 struct device_driver *drv = READ_ONCE(dev->driver); 1482 struct fwnode_handle *fwnode = dev_fwnode(dev); 1483 enum dev_dma_attr attr; 1484 int ret = 0; 1485 1486 if (is_of_node(fwnode)) { 1487 ret = of_dma_configure(dev, to_of_node(fwnode), true); 1488 } else if (is_acpi_device_node(fwnode)) { 1489 attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); 1490 ret = acpi_dma_configure(dev, attr); 1491 } 1492 /* @dev->driver may not be valid when we're called from the IOMMU layer */ 1493 if (ret || !drv || to_platform_driver(drv)->driver_managed_dma) 1494 return ret; 1495 1496 ret = iommu_device_use_default_domain(dev); 1497 if (ret) 1498 arch_teardown_dma_ops(dev); 1499 1500 return ret; 1501 } 1502 1503 static void platform_dma_cleanup(struct device *dev) 1504 { 1505 struct platform_driver *drv = to_platform_driver(dev->driver); 1506 1507 if (!drv->driver_managed_dma) 1508 iommu_device_unuse_default_domain(dev); 1509 } 1510 1511 static const struct dev_pm_ops platform_dev_pm_ops = { 1512 SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) 1513 USE_PLATFORM_PM_SLEEP_OPS 1514 }; 1515 1516 const struct bus_type platform_bus_type = { 1517 .name = "platform", 1518 .dev_groups = platform_dev_groups, 1519 .match = platform_match, 1520 .uevent = platform_uevent, 1521 .probe = platform_probe, 1522 .remove = platform_remove, 1523 .shutdown = platform_shutdown, 1524 .dma_configure = platform_dma_configure, 1525 .dma_cleanup = platform_dma_cleanup, 1526 .pm = &platform_dev_pm_ops, 1527 }; 1528 EXPORT_SYMBOL_GPL(platform_bus_type); 1529 1530 static inline int __platform_match(struct device *dev, const void *drv) 1531 { 1532 return platform_match(dev, (struct device_driver *)drv); 1533 } 1534 1535 /** 1536 * platform_find_device_by_driver - Find a platform device with a given 1537 * driver. 1538 * @start: The device to start the search from. 1539 * @drv: The device driver to look for. 1540 */ 1541 struct device *platform_find_device_by_driver(struct device *start, 1542 const struct device_driver *drv) 1543 { 1544 return bus_find_device(&platform_bus_type, start, drv, 1545 __platform_match); 1546 } 1547 EXPORT_SYMBOL_GPL(platform_find_device_by_driver); 1548 1549 void __weak __init early_platform_cleanup(void) { } 1550 1551 int __init platform_bus_init(void) 1552 { 1553 int error; 1554 1555 early_platform_cleanup(); 1556 1557 error = device_register(&platform_bus); 1558 if (error) { 1559 put_device(&platform_bus); 1560 return error; 1561 } 1562 error = bus_register(&platform_bus_type); 1563 if (error) 1564 device_unregister(&platform_bus); 1565 1566 return error; 1567 } 1568