xref: /linux/drivers/base/platform.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * platform.c - platform 'pseudo' bus for legacy devices
3  *
4  * Copyright (c) 2002-3 Patrick Mochel
5  * Copyright (c) 2002-3 Open Source Development Labs
6  *
7  * This file is released under the GPLv2
8  *
9  * Please see Documentation/driver-model/platform.txt for more
10  * information.
11  */
12 
13 #include <linux/string.h>
14 #include <linux/platform_device.h>
15 #include <linux/of_device.h>
16 #include <linux/of_irq.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bootmem.h>
21 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/idr.h>
26 #include <linux/acpi.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/limits.h>
29 
30 #include "base.h"
31 #include "power/power.h"
32 
33 /* For automatically allocated device IDs */
34 static DEFINE_IDA(platform_devid_ida);
35 
36 struct device platform_bus = {
37 	.init_name	= "platform",
38 };
39 EXPORT_SYMBOL_GPL(platform_bus);
40 
41 /**
42  * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
43  * @pdev: platform device
44  *
45  * This is called before platform_device_add() such that any pdev_archdata may
46  * be setup before the platform_notifier is called.  So if a user needs to
47  * manipulate any relevant information in the pdev_archdata they can do:
48  *
49  *	platform_device_alloc()
50  *	... manipulate ...
51  *	platform_device_add()
52  *
53  * And if they don't care they can just call platform_device_register() and
54  * everything will just work out.
55  */
56 void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
57 {
58 }
59 
60 /**
61  * platform_get_resource - get a resource for a device
62  * @dev: platform device
63  * @type: resource type
64  * @num: resource index
65  */
66 struct resource *platform_get_resource(struct platform_device *dev,
67 				       unsigned int type, unsigned int num)
68 {
69 	int i;
70 
71 	for (i = 0; i < dev->num_resources; i++) {
72 		struct resource *r = &dev->resource[i];
73 
74 		if (type == resource_type(r) && num-- == 0)
75 			return r;
76 	}
77 	return NULL;
78 }
79 EXPORT_SYMBOL_GPL(platform_get_resource);
80 
81 /**
82  * platform_get_irq - get an IRQ for a device
83  * @dev: platform device
84  * @num: IRQ number index
85  */
86 int platform_get_irq(struct platform_device *dev, unsigned int num)
87 {
88 #ifdef CONFIG_SPARC
89 	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
90 	if (!dev || num >= dev->archdata.num_irqs)
91 		return -ENXIO;
92 	return dev->archdata.irqs[num];
93 #else
94 	struct resource *r;
95 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
96 		int ret;
97 
98 		ret = of_irq_get(dev->dev.of_node, num);
99 		if (ret >= 0 || ret == -EPROBE_DEFER)
100 			return ret;
101 	}
102 
103 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
104 	/*
105 	 * The resources may pass trigger flags to the irqs that need
106 	 * to be set up. It so happens that the trigger flags for
107 	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
108 	 * settings.
109 	 */
110 	if (r && r->flags & IORESOURCE_BITS)
111 		irqd_set_trigger_type(irq_get_irq_data(r->start),
112 				      r->flags & IORESOURCE_BITS);
113 
114 	return r ? r->start : -ENXIO;
115 #endif
116 }
117 EXPORT_SYMBOL_GPL(platform_get_irq);
118 
119 /**
120  * platform_get_resource_byname - get a resource for a device by name
121  * @dev: platform device
122  * @type: resource type
123  * @name: resource name
124  */
125 struct resource *platform_get_resource_byname(struct platform_device *dev,
126 					      unsigned int type,
127 					      const char *name)
128 {
129 	int i;
130 
131 	for (i = 0; i < dev->num_resources; i++) {
132 		struct resource *r = &dev->resource[i];
133 
134 		if (unlikely(!r->name))
135 			continue;
136 
137 		if (type == resource_type(r) && !strcmp(r->name, name))
138 			return r;
139 	}
140 	return NULL;
141 }
142 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
143 
144 /**
145  * platform_get_irq_byname - get an IRQ for a device by name
146  * @dev: platform device
147  * @name: IRQ name
148  */
149 int platform_get_irq_byname(struct platform_device *dev, const char *name)
150 {
151 	struct resource *r;
152 
153 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
154 		int ret;
155 
156 		ret = of_irq_get_byname(dev->dev.of_node, name);
157 		if (ret >= 0 || ret == -EPROBE_DEFER)
158 			return ret;
159 	}
160 
161 	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
162 	return r ? r->start : -ENXIO;
163 }
164 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
165 
166 /**
167  * platform_add_devices - add a numbers of platform devices
168  * @devs: array of platform devices to add
169  * @num: number of platform devices in array
170  */
171 int platform_add_devices(struct platform_device **devs, int num)
172 {
173 	int i, ret = 0;
174 
175 	for (i = 0; i < num; i++) {
176 		ret = platform_device_register(devs[i]);
177 		if (ret) {
178 			while (--i >= 0)
179 				platform_device_unregister(devs[i]);
180 			break;
181 		}
182 	}
183 
184 	return ret;
185 }
186 EXPORT_SYMBOL_GPL(platform_add_devices);
187 
188 struct platform_object {
189 	struct platform_device pdev;
190 	char name[];
191 };
192 
193 /**
194  * platform_device_put - destroy a platform device
195  * @pdev: platform device to free
196  *
197  * Free all memory associated with a platform device.  This function must
198  * _only_ be externally called in error cases.  All other usage is a bug.
199  */
200 void platform_device_put(struct platform_device *pdev)
201 {
202 	if (pdev)
203 		put_device(&pdev->dev);
204 }
205 EXPORT_SYMBOL_GPL(platform_device_put);
206 
207 static void platform_device_release(struct device *dev)
208 {
209 	struct platform_object *pa = container_of(dev, struct platform_object,
210 						  pdev.dev);
211 
212 	of_device_node_put(&pa->pdev.dev);
213 	kfree(pa->pdev.dev.platform_data);
214 	kfree(pa->pdev.mfd_cell);
215 	kfree(pa->pdev.resource);
216 	kfree(pa->pdev.driver_override);
217 	kfree(pa);
218 }
219 
220 /**
221  * platform_device_alloc - create a platform device
222  * @name: base name of the device we're adding
223  * @id: instance id
224  *
225  * Create a platform device object which can have other objects attached
226  * to it, and which will have attached objects freed when it is released.
227  */
228 struct platform_device *platform_device_alloc(const char *name, int id)
229 {
230 	struct platform_object *pa;
231 
232 	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
233 	if (pa) {
234 		strcpy(pa->name, name);
235 		pa->pdev.name = pa->name;
236 		pa->pdev.id = id;
237 		device_initialize(&pa->pdev.dev);
238 		pa->pdev.dev.release = platform_device_release;
239 		arch_setup_pdev_archdata(&pa->pdev);
240 	}
241 
242 	return pa ? &pa->pdev : NULL;
243 }
244 EXPORT_SYMBOL_GPL(platform_device_alloc);
245 
246 /**
247  * platform_device_add_resources - add resources to a platform device
248  * @pdev: platform device allocated by platform_device_alloc to add resources to
249  * @res: set of resources that needs to be allocated for the device
250  * @num: number of resources
251  *
252  * Add a copy of the resources to the platform device.  The memory
253  * associated with the resources will be freed when the platform device is
254  * released.
255  */
256 int platform_device_add_resources(struct platform_device *pdev,
257 				  const struct resource *res, unsigned int num)
258 {
259 	struct resource *r = NULL;
260 
261 	if (res) {
262 		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
263 		if (!r)
264 			return -ENOMEM;
265 	}
266 
267 	kfree(pdev->resource);
268 	pdev->resource = r;
269 	pdev->num_resources = num;
270 	return 0;
271 }
272 EXPORT_SYMBOL_GPL(platform_device_add_resources);
273 
274 /**
275  * platform_device_add_data - add platform-specific data to a platform device
276  * @pdev: platform device allocated by platform_device_alloc to add resources to
277  * @data: platform specific data for this platform device
278  * @size: size of platform specific data
279  *
280  * Add a copy of platform specific data to the platform device's
281  * platform_data pointer.  The memory associated with the platform data
282  * will be freed when the platform device is released.
283  */
284 int platform_device_add_data(struct platform_device *pdev, const void *data,
285 			     size_t size)
286 {
287 	void *d = NULL;
288 
289 	if (data) {
290 		d = kmemdup(data, size, GFP_KERNEL);
291 		if (!d)
292 			return -ENOMEM;
293 	}
294 
295 	kfree(pdev->dev.platform_data);
296 	pdev->dev.platform_data = d;
297 	return 0;
298 }
299 EXPORT_SYMBOL_GPL(platform_device_add_data);
300 
301 /**
302  * platform_device_add - add a platform device to device hierarchy
303  * @pdev: platform device we're adding
304  *
305  * This is part 2 of platform_device_register(), though may be called
306  * separately _iff_ pdev was allocated by platform_device_alloc().
307  */
308 int platform_device_add(struct platform_device *pdev)
309 {
310 	int i, ret;
311 
312 	if (!pdev)
313 		return -EINVAL;
314 
315 	if (!pdev->dev.parent)
316 		pdev->dev.parent = &platform_bus;
317 
318 	pdev->dev.bus = &platform_bus_type;
319 
320 	switch (pdev->id) {
321 	default:
322 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
323 		break;
324 	case PLATFORM_DEVID_NONE:
325 		dev_set_name(&pdev->dev, "%s", pdev->name);
326 		break;
327 	case PLATFORM_DEVID_AUTO:
328 		/*
329 		 * Automatically allocated device ID. We mark it as such so
330 		 * that we remember it must be freed, and we append a suffix
331 		 * to avoid namespace collision with explicit IDs.
332 		 */
333 		ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
334 		if (ret < 0)
335 			goto err_out;
336 		pdev->id = ret;
337 		pdev->id_auto = true;
338 		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
339 		break;
340 	}
341 
342 	for (i = 0; i < pdev->num_resources; i++) {
343 		struct resource *p, *r = &pdev->resource[i];
344 
345 		if (r->name == NULL)
346 			r->name = dev_name(&pdev->dev);
347 
348 		p = r->parent;
349 		if (!p) {
350 			if (resource_type(r) == IORESOURCE_MEM)
351 				p = &iomem_resource;
352 			else if (resource_type(r) == IORESOURCE_IO)
353 				p = &ioport_resource;
354 		}
355 
356 		if (p && insert_resource(p, r)) {
357 			dev_err(&pdev->dev, "failed to claim resource %d\n", i);
358 			ret = -EBUSY;
359 			goto failed;
360 		}
361 	}
362 
363 	pr_debug("Registering platform device '%s'. Parent at %s\n",
364 		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
365 
366 	ret = device_add(&pdev->dev);
367 	if (ret == 0)
368 		return ret;
369 
370  failed:
371 	if (pdev->id_auto) {
372 		ida_simple_remove(&platform_devid_ida, pdev->id);
373 		pdev->id = PLATFORM_DEVID_AUTO;
374 	}
375 
376 	while (--i >= 0) {
377 		struct resource *r = &pdev->resource[i];
378 		if (r->parent)
379 			release_resource(r);
380 	}
381 
382  err_out:
383 	return ret;
384 }
385 EXPORT_SYMBOL_GPL(platform_device_add);
386 
387 /**
388  * platform_device_del - remove a platform-level device
389  * @pdev: platform device we're removing
390  *
391  * Note that this function will also release all memory- and port-based
392  * resources owned by the device (@dev->resource).  This function must
393  * _only_ be externally called in error cases.  All other usage is a bug.
394  */
395 void platform_device_del(struct platform_device *pdev)
396 {
397 	int i;
398 
399 	if (pdev) {
400 		device_del(&pdev->dev);
401 
402 		if (pdev->id_auto) {
403 			ida_simple_remove(&platform_devid_ida, pdev->id);
404 			pdev->id = PLATFORM_DEVID_AUTO;
405 		}
406 
407 		for (i = 0; i < pdev->num_resources; i++) {
408 			struct resource *r = &pdev->resource[i];
409 			if (r->parent)
410 				release_resource(r);
411 		}
412 	}
413 }
414 EXPORT_SYMBOL_GPL(platform_device_del);
415 
416 /**
417  * platform_device_register - add a platform-level device
418  * @pdev: platform device we're adding
419  */
420 int platform_device_register(struct platform_device *pdev)
421 {
422 	device_initialize(&pdev->dev);
423 	arch_setup_pdev_archdata(pdev);
424 	return platform_device_add(pdev);
425 }
426 EXPORT_SYMBOL_GPL(platform_device_register);
427 
428 /**
429  * platform_device_unregister - unregister a platform-level device
430  * @pdev: platform device we're unregistering
431  *
432  * Unregistration is done in 2 steps. First we release all resources
433  * and remove it from the subsystem, then we drop reference count by
434  * calling platform_device_put().
435  */
436 void platform_device_unregister(struct platform_device *pdev)
437 {
438 	platform_device_del(pdev);
439 	platform_device_put(pdev);
440 }
441 EXPORT_SYMBOL_GPL(platform_device_unregister);
442 
443 /**
444  * platform_device_register_full - add a platform-level device with
445  * resources and platform-specific data
446  *
447  * @pdevinfo: data used to create device
448  *
449  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
450  */
451 struct platform_device *platform_device_register_full(
452 		const struct platform_device_info *pdevinfo)
453 {
454 	int ret = -ENOMEM;
455 	struct platform_device *pdev;
456 
457 	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
458 	if (!pdev)
459 		goto err_alloc;
460 
461 	pdev->dev.parent = pdevinfo->parent;
462 	pdev->dev.fwnode = pdevinfo->fwnode;
463 
464 	if (pdevinfo->dma_mask) {
465 		/*
466 		 * This memory isn't freed when the device is put,
467 		 * I don't have a nice idea for that though.  Conceptually
468 		 * dma_mask in struct device should not be a pointer.
469 		 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
470 		 */
471 		pdev->dev.dma_mask =
472 			kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
473 		if (!pdev->dev.dma_mask)
474 			goto err;
475 
476 		*pdev->dev.dma_mask = pdevinfo->dma_mask;
477 		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
478 	}
479 
480 	ret = platform_device_add_resources(pdev,
481 			pdevinfo->res, pdevinfo->num_res);
482 	if (ret)
483 		goto err;
484 
485 	ret = platform_device_add_data(pdev,
486 			pdevinfo->data, pdevinfo->size_data);
487 	if (ret)
488 		goto err;
489 
490 	ret = platform_device_add(pdev);
491 	if (ret) {
492 err:
493 		ACPI_COMPANION_SET(&pdev->dev, NULL);
494 		kfree(pdev->dev.dma_mask);
495 
496 err_alloc:
497 		platform_device_put(pdev);
498 		return ERR_PTR(ret);
499 	}
500 
501 	return pdev;
502 }
503 EXPORT_SYMBOL_GPL(platform_device_register_full);
504 
505 static int platform_drv_probe(struct device *_dev)
506 {
507 	struct platform_driver *drv = to_platform_driver(_dev->driver);
508 	struct platform_device *dev = to_platform_device(_dev);
509 	int ret;
510 
511 	ret = of_clk_set_defaults(_dev->of_node, false);
512 	if (ret < 0)
513 		return ret;
514 
515 	ret = dev_pm_domain_attach(_dev, true);
516 	if (ret != -EPROBE_DEFER) {
517 		ret = drv->probe(dev);
518 		if (ret)
519 			dev_pm_domain_detach(_dev, true);
520 	}
521 
522 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
523 		dev_warn(_dev, "probe deferral not supported\n");
524 		ret = -ENXIO;
525 	}
526 
527 	return ret;
528 }
529 
530 static int platform_drv_probe_fail(struct device *_dev)
531 {
532 	return -ENXIO;
533 }
534 
535 static int platform_drv_remove(struct device *_dev)
536 {
537 	struct platform_driver *drv = to_platform_driver(_dev->driver);
538 	struct platform_device *dev = to_platform_device(_dev);
539 	int ret;
540 
541 	ret = drv->remove(dev);
542 	dev_pm_domain_detach(_dev, true);
543 
544 	return ret;
545 }
546 
547 static void platform_drv_shutdown(struct device *_dev)
548 {
549 	struct platform_driver *drv = to_platform_driver(_dev->driver);
550 	struct platform_device *dev = to_platform_device(_dev);
551 
552 	drv->shutdown(dev);
553 	dev_pm_domain_detach(_dev, true);
554 }
555 
556 /**
557  * __platform_driver_register - register a driver for platform-level devices
558  * @drv: platform driver structure
559  * @owner: owning module/driver
560  */
561 int __platform_driver_register(struct platform_driver *drv,
562 				struct module *owner)
563 {
564 	drv->driver.owner = owner;
565 	drv->driver.bus = &platform_bus_type;
566 	if (drv->probe)
567 		drv->driver.probe = platform_drv_probe;
568 	if (drv->remove)
569 		drv->driver.remove = platform_drv_remove;
570 	if (drv->shutdown)
571 		drv->driver.shutdown = platform_drv_shutdown;
572 
573 	return driver_register(&drv->driver);
574 }
575 EXPORT_SYMBOL_GPL(__platform_driver_register);
576 
577 /**
578  * platform_driver_unregister - unregister a driver for platform-level devices
579  * @drv: platform driver structure
580  */
581 void platform_driver_unregister(struct platform_driver *drv)
582 {
583 	driver_unregister(&drv->driver);
584 }
585 EXPORT_SYMBOL_GPL(platform_driver_unregister);
586 
587 /**
588  * __platform_driver_probe - register driver for non-hotpluggable device
589  * @drv: platform driver structure
590  * @probe: the driver probe routine, probably from an __init section
591  * @module: module which will be the owner of the driver
592  *
593  * Use this instead of platform_driver_register() when you know the device
594  * is not hotpluggable and has already been registered, and you want to
595  * remove its run-once probe() infrastructure from memory after the driver
596  * has bound to the device.
597  *
598  * One typical use for this would be with drivers for controllers integrated
599  * into system-on-chip processors, where the controller devices have been
600  * configured as part of board setup.
601  *
602  * Note that this is incompatible with deferred probing.
603  *
604  * Returns zero if the driver registered and bound to a device, else returns
605  * a negative error code and with the driver not registered.
606  */
607 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
608 		int (*probe)(struct platform_device *), struct module *module)
609 {
610 	int retval, code;
611 
612 	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
613 		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
614 			 drv->driver.name, __func__);
615 		return -EINVAL;
616 	}
617 
618 	/*
619 	 * We have to run our probes synchronously because we check if
620 	 * we find any devices to bind to and exit with error if there
621 	 * are any.
622 	 */
623 	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
624 
625 	/*
626 	 * Prevent driver from requesting probe deferral to avoid further
627 	 * futile probe attempts.
628 	 */
629 	drv->prevent_deferred_probe = true;
630 
631 	/* make sure driver won't have bind/unbind attributes */
632 	drv->driver.suppress_bind_attrs = true;
633 
634 	/* temporary section violation during probe() */
635 	drv->probe = probe;
636 	retval = code = __platform_driver_register(drv, module);
637 
638 	/*
639 	 * Fixup that section violation, being paranoid about code scanning
640 	 * the list of drivers in order to probe new devices.  Check to see
641 	 * if the probe was successful, and make sure any forced probes of
642 	 * new devices fail.
643 	 */
644 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
645 	drv->probe = NULL;
646 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
647 		retval = -ENODEV;
648 	drv->driver.probe = platform_drv_probe_fail;
649 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
650 
651 	if (code != retval)
652 		platform_driver_unregister(drv);
653 	return retval;
654 }
655 EXPORT_SYMBOL_GPL(__platform_driver_probe);
656 
657 /**
658  * __platform_create_bundle - register driver and create corresponding device
659  * @driver: platform driver structure
660  * @probe: the driver probe routine, probably from an __init section
661  * @res: set of resources that needs to be allocated for the device
662  * @n_res: number of resources
663  * @data: platform specific data for this platform device
664  * @size: size of platform specific data
665  * @module: module which will be the owner of the driver
666  *
667  * Use this in legacy-style modules that probe hardware directly and
668  * register a single platform device and corresponding platform driver.
669  *
670  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
671  */
672 struct platform_device * __init_or_module __platform_create_bundle(
673 			struct platform_driver *driver,
674 			int (*probe)(struct platform_device *),
675 			struct resource *res, unsigned int n_res,
676 			const void *data, size_t size, struct module *module)
677 {
678 	struct platform_device *pdev;
679 	int error;
680 
681 	pdev = platform_device_alloc(driver->driver.name, -1);
682 	if (!pdev) {
683 		error = -ENOMEM;
684 		goto err_out;
685 	}
686 
687 	error = platform_device_add_resources(pdev, res, n_res);
688 	if (error)
689 		goto err_pdev_put;
690 
691 	error = platform_device_add_data(pdev, data, size);
692 	if (error)
693 		goto err_pdev_put;
694 
695 	error = platform_device_add(pdev);
696 	if (error)
697 		goto err_pdev_put;
698 
699 	error = __platform_driver_probe(driver, probe, module);
700 	if (error)
701 		goto err_pdev_del;
702 
703 	return pdev;
704 
705 err_pdev_del:
706 	platform_device_del(pdev);
707 err_pdev_put:
708 	platform_device_put(pdev);
709 err_out:
710 	return ERR_PTR(error);
711 }
712 EXPORT_SYMBOL_GPL(__platform_create_bundle);
713 
714 /* modalias support enables more hands-off userspace setup:
715  * (a) environment variable lets new-style hotplug events work once system is
716  *     fully running:  "modprobe $MODALIAS"
717  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
718  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
719  */
720 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
721 			     char *buf)
722 {
723 	struct platform_device	*pdev = to_platform_device(dev);
724 	int len;
725 
726 	len = of_device_get_modalias(dev, buf, PAGE_SIZE -1);
727 	if (len != -ENODEV)
728 		return len;
729 
730 	len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
731 	if (len != -ENODEV)
732 		return len;
733 
734 	len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
735 
736 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
737 }
738 static DEVICE_ATTR_RO(modalias);
739 
740 static ssize_t driver_override_store(struct device *dev,
741 				     struct device_attribute *attr,
742 				     const char *buf, size_t count)
743 {
744 	struct platform_device *pdev = to_platform_device(dev);
745 	char *driver_override, *old = pdev->driver_override, *cp;
746 
747 	if (count > PATH_MAX)
748 		return -EINVAL;
749 
750 	driver_override = kstrndup(buf, count, GFP_KERNEL);
751 	if (!driver_override)
752 		return -ENOMEM;
753 
754 	cp = strchr(driver_override, '\n');
755 	if (cp)
756 		*cp = '\0';
757 
758 	if (strlen(driver_override)) {
759 		pdev->driver_override = driver_override;
760 	} else {
761 		kfree(driver_override);
762 		pdev->driver_override = NULL;
763 	}
764 
765 	kfree(old);
766 
767 	return count;
768 }
769 
770 static ssize_t driver_override_show(struct device *dev,
771 				    struct device_attribute *attr, char *buf)
772 {
773 	struct platform_device *pdev = to_platform_device(dev);
774 
775 	return sprintf(buf, "%s\n", pdev->driver_override);
776 }
777 static DEVICE_ATTR_RW(driver_override);
778 
779 
780 static struct attribute *platform_dev_attrs[] = {
781 	&dev_attr_modalias.attr,
782 	&dev_attr_driver_override.attr,
783 	NULL,
784 };
785 ATTRIBUTE_GROUPS(platform_dev);
786 
787 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
788 {
789 	struct platform_device	*pdev = to_platform_device(dev);
790 	int rc;
791 
792 	/* Some devices have extra OF data and an OF-style MODALIAS */
793 	rc = of_device_uevent_modalias(dev, env);
794 	if (rc != -ENODEV)
795 		return rc;
796 
797 	rc = acpi_device_uevent_modalias(dev, env);
798 	if (rc != -ENODEV)
799 		return rc;
800 
801 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
802 			pdev->name);
803 	return 0;
804 }
805 
806 static const struct platform_device_id *platform_match_id(
807 			const struct platform_device_id *id,
808 			struct platform_device *pdev)
809 {
810 	while (id->name[0]) {
811 		if (strcmp(pdev->name, id->name) == 0) {
812 			pdev->id_entry = id;
813 			return id;
814 		}
815 		id++;
816 	}
817 	return NULL;
818 }
819 
820 /**
821  * platform_match - bind platform device to platform driver.
822  * @dev: device.
823  * @drv: driver.
824  *
825  * Platform device IDs are assumed to be encoded like this:
826  * "<name><instance>", where <name> is a short description of the type of
827  * device, like "pci" or "floppy", and <instance> is the enumerated
828  * instance of the device, like '0' or '42'.  Driver IDs are simply
829  * "<name>".  So, extract the <name> from the platform_device structure,
830  * and compare it against the name of the driver. Return whether they match
831  * or not.
832  */
833 static int platform_match(struct device *dev, struct device_driver *drv)
834 {
835 	struct platform_device *pdev = to_platform_device(dev);
836 	struct platform_driver *pdrv = to_platform_driver(drv);
837 
838 	/* When driver_override is set, only bind to the matching driver */
839 	if (pdev->driver_override)
840 		return !strcmp(pdev->driver_override, drv->name);
841 
842 	/* Attempt an OF style match first */
843 	if (of_driver_match_device(dev, drv))
844 		return 1;
845 
846 	/* Then try ACPI style match */
847 	if (acpi_driver_match_device(dev, drv))
848 		return 1;
849 
850 	/* Then try to match against the id table */
851 	if (pdrv->id_table)
852 		return platform_match_id(pdrv->id_table, pdev) != NULL;
853 
854 	/* fall-back to driver name match */
855 	return (strcmp(pdev->name, drv->name) == 0);
856 }
857 
858 #ifdef CONFIG_PM_SLEEP
859 
860 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
861 {
862 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
863 	struct platform_device *pdev = to_platform_device(dev);
864 	int ret = 0;
865 
866 	if (dev->driver && pdrv->suspend)
867 		ret = pdrv->suspend(pdev, mesg);
868 
869 	return ret;
870 }
871 
872 static int platform_legacy_resume(struct device *dev)
873 {
874 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
875 	struct platform_device *pdev = to_platform_device(dev);
876 	int ret = 0;
877 
878 	if (dev->driver && pdrv->resume)
879 		ret = pdrv->resume(pdev);
880 
881 	return ret;
882 }
883 
884 #endif /* CONFIG_PM_SLEEP */
885 
886 #ifdef CONFIG_SUSPEND
887 
888 int platform_pm_suspend(struct device *dev)
889 {
890 	struct device_driver *drv = dev->driver;
891 	int ret = 0;
892 
893 	if (!drv)
894 		return 0;
895 
896 	if (drv->pm) {
897 		if (drv->pm->suspend)
898 			ret = drv->pm->suspend(dev);
899 	} else {
900 		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
901 	}
902 
903 	return ret;
904 }
905 
906 int platform_pm_resume(struct device *dev)
907 {
908 	struct device_driver *drv = dev->driver;
909 	int ret = 0;
910 
911 	if (!drv)
912 		return 0;
913 
914 	if (drv->pm) {
915 		if (drv->pm->resume)
916 			ret = drv->pm->resume(dev);
917 	} else {
918 		ret = platform_legacy_resume(dev);
919 	}
920 
921 	return ret;
922 }
923 
924 #endif /* CONFIG_SUSPEND */
925 
926 #ifdef CONFIG_HIBERNATE_CALLBACKS
927 
928 int platform_pm_freeze(struct device *dev)
929 {
930 	struct device_driver *drv = dev->driver;
931 	int ret = 0;
932 
933 	if (!drv)
934 		return 0;
935 
936 	if (drv->pm) {
937 		if (drv->pm->freeze)
938 			ret = drv->pm->freeze(dev);
939 	} else {
940 		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
941 	}
942 
943 	return ret;
944 }
945 
946 int platform_pm_thaw(struct device *dev)
947 {
948 	struct device_driver *drv = dev->driver;
949 	int ret = 0;
950 
951 	if (!drv)
952 		return 0;
953 
954 	if (drv->pm) {
955 		if (drv->pm->thaw)
956 			ret = drv->pm->thaw(dev);
957 	} else {
958 		ret = platform_legacy_resume(dev);
959 	}
960 
961 	return ret;
962 }
963 
964 int platform_pm_poweroff(struct device *dev)
965 {
966 	struct device_driver *drv = dev->driver;
967 	int ret = 0;
968 
969 	if (!drv)
970 		return 0;
971 
972 	if (drv->pm) {
973 		if (drv->pm->poweroff)
974 			ret = drv->pm->poweroff(dev);
975 	} else {
976 		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
977 	}
978 
979 	return ret;
980 }
981 
982 int platform_pm_restore(struct device *dev)
983 {
984 	struct device_driver *drv = dev->driver;
985 	int ret = 0;
986 
987 	if (!drv)
988 		return 0;
989 
990 	if (drv->pm) {
991 		if (drv->pm->restore)
992 			ret = drv->pm->restore(dev);
993 	} else {
994 		ret = platform_legacy_resume(dev);
995 	}
996 
997 	return ret;
998 }
999 
1000 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1001 
1002 static const struct dev_pm_ops platform_dev_pm_ops = {
1003 	.runtime_suspend = pm_generic_runtime_suspend,
1004 	.runtime_resume = pm_generic_runtime_resume,
1005 	USE_PLATFORM_PM_SLEEP_OPS
1006 };
1007 
1008 struct bus_type platform_bus_type = {
1009 	.name		= "platform",
1010 	.dev_groups	= platform_dev_groups,
1011 	.match		= platform_match,
1012 	.uevent		= platform_uevent,
1013 	.pm		= &platform_dev_pm_ops,
1014 };
1015 EXPORT_SYMBOL_GPL(platform_bus_type);
1016 
1017 int __init platform_bus_init(void)
1018 {
1019 	int error;
1020 
1021 	early_platform_cleanup();
1022 
1023 	error = device_register(&platform_bus);
1024 	if (error)
1025 		return error;
1026 	error =  bus_register(&platform_bus_type);
1027 	if (error)
1028 		device_unregister(&platform_bus);
1029 	of_platform_register_reconfig_notifier();
1030 	return error;
1031 }
1032 
1033 #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
1034 u64 dma_get_required_mask(struct device *dev)
1035 {
1036 	u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
1037 	u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
1038 	u64 mask;
1039 
1040 	if (!high_totalram) {
1041 		/* convert to mask just covering totalram */
1042 		low_totalram = (1 << (fls(low_totalram) - 1));
1043 		low_totalram += low_totalram - 1;
1044 		mask = low_totalram;
1045 	} else {
1046 		high_totalram = (1 << (fls(high_totalram) - 1));
1047 		high_totalram += high_totalram - 1;
1048 		mask = (((u64)high_totalram) << 32) + 0xffffffff;
1049 	}
1050 	return mask;
1051 }
1052 EXPORT_SYMBOL_GPL(dma_get_required_mask);
1053 #endif
1054 
1055 static __initdata LIST_HEAD(early_platform_driver_list);
1056 static __initdata LIST_HEAD(early_platform_device_list);
1057 
1058 /**
1059  * early_platform_driver_register - register early platform driver
1060  * @epdrv: early_platform driver structure
1061  * @buf: string passed from early_param()
1062  *
1063  * Helper function for early_platform_init() / early_platform_init_buffer()
1064  */
1065 int __init early_platform_driver_register(struct early_platform_driver *epdrv,
1066 					  char *buf)
1067 {
1068 	char *tmp;
1069 	int n;
1070 
1071 	/* Simply add the driver to the end of the global list.
1072 	 * Drivers will by default be put on the list in compiled-in order.
1073 	 */
1074 	if (!epdrv->list.next) {
1075 		INIT_LIST_HEAD(&epdrv->list);
1076 		list_add_tail(&epdrv->list, &early_platform_driver_list);
1077 	}
1078 
1079 	/* If the user has specified device then make sure the driver
1080 	 * gets prioritized. The driver of the last device specified on
1081 	 * command line will be put first on the list.
1082 	 */
1083 	n = strlen(epdrv->pdrv->driver.name);
1084 	if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
1085 		list_move(&epdrv->list, &early_platform_driver_list);
1086 
1087 		/* Allow passing parameters after device name */
1088 		if (buf[n] == '\0' || buf[n] == ',')
1089 			epdrv->requested_id = -1;
1090 		else {
1091 			epdrv->requested_id = simple_strtoul(&buf[n + 1],
1092 							     &tmp, 10);
1093 
1094 			if (buf[n] != '.' || (tmp == &buf[n + 1])) {
1095 				epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
1096 				n = 0;
1097 			} else
1098 				n += strcspn(&buf[n + 1], ",") + 1;
1099 		}
1100 
1101 		if (buf[n] == ',')
1102 			n++;
1103 
1104 		if (epdrv->bufsize) {
1105 			memcpy(epdrv->buffer, &buf[n],
1106 			       min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
1107 			epdrv->buffer[epdrv->bufsize - 1] = '\0';
1108 		}
1109 	}
1110 
1111 	return 0;
1112 }
1113 
1114 /**
1115  * early_platform_add_devices - adds a number of early platform devices
1116  * @devs: array of early platform devices to add
1117  * @num: number of early platform devices in array
1118  *
1119  * Used by early architecture code to register early platform devices and
1120  * their platform data.
1121  */
1122 void __init early_platform_add_devices(struct platform_device **devs, int num)
1123 {
1124 	struct device *dev;
1125 	int i;
1126 
1127 	/* simply add the devices to list */
1128 	for (i = 0; i < num; i++) {
1129 		dev = &devs[i]->dev;
1130 
1131 		if (!dev->devres_head.next) {
1132 			pm_runtime_early_init(dev);
1133 			INIT_LIST_HEAD(&dev->devres_head);
1134 			list_add_tail(&dev->devres_head,
1135 				      &early_platform_device_list);
1136 		}
1137 	}
1138 }
1139 
1140 /**
1141  * early_platform_driver_register_all - register early platform drivers
1142  * @class_str: string to identify early platform driver class
1143  *
1144  * Used by architecture code to register all early platform drivers
1145  * for a certain class. If omitted then only early platform drivers
1146  * with matching kernel command line class parameters will be registered.
1147  */
1148 void __init early_platform_driver_register_all(char *class_str)
1149 {
1150 	/* The "class_str" parameter may or may not be present on the kernel
1151 	 * command line. If it is present then there may be more than one
1152 	 * matching parameter.
1153 	 *
1154 	 * Since we register our early platform drivers using early_param()
1155 	 * we need to make sure that they also get registered in the case
1156 	 * when the parameter is missing from the kernel command line.
1157 	 *
1158 	 * We use parse_early_options() to make sure the early_param() gets
1159 	 * called at least once. The early_param() may be called more than
1160 	 * once since the name of the preferred device may be specified on
1161 	 * the kernel command line. early_platform_driver_register() handles
1162 	 * this case for us.
1163 	 */
1164 	parse_early_options(class_str);
1165 }
1166 
1167 /**
1168  * early_platform_match - find early platform device matching driver
1169  * @epdrv: early platform driver structure
1170  * @id: id to match against
1171  */
1172 static struct platform_device * __init
1173 early_platform_match(struct early_platform_driver *epdrv, int id)
1174 {
1175 	struct platform_device *pd;
1176 
1177 	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1178 		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1179 			if (pd->id == id)
1180 				return pd;
1181 
1182 	return NULL;
1183 }
1184 
1185 /**
1186  * early_platform_left - check if early platform driver has matching devices
1187  * @epdrv: early platform driver structure
1188  * @id: return true if id or above exists
1189  */
1190 static int __init early_platform_left(struct early_platform_driver *epdrv,
1191 				       int id)
1192 {
1193 	struct platform_device *pd;
1194 
1195 	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1196 		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1197 			if (pd->id >= id)
1198 				return 1;
1199 
1200 	return 0;
1201 }
1202 
1203 /**
1204  * early_platform_driver_probe_id - probe drivers matching class_str and id
1205  * @class_str: string to identify early platform driver class
1206  * @id: id to match against
1207  * @nr_probe: number of platform devices to successfully probe before exiting
1208  */
1209 static int __init early_platform_driver_probe_id(char *class_str,
1210 						 int id,
1211 						 int nr_probe)
1212 {
1213 	struct early_platform_driver *epdrv;
1214 	struct platform_device *match;
1215 	int match_id;
1216 	int n = 0;
1217 	int left = 0;
1218 
1219 	list_for_each_entry(epdrv, &early_platform_driver_list, list) {
1220 		/* only use drivers matching our class_str */
1221 		if (strcmp(class_str, epdrv->class_str))
1222 			continue;
1223 
1224 		if (id == -2) {
1225 			match_id = epdrv->requested_id;
1226 			left = 1;
1227 
1228 		} else {
1229 			match_id = id;
1230 			left += early_platform_left(epdrv, id);
1231 
1232 			/* skip requested id */
1233 			switch (epdrv->requested_id) {
1234 			case EARLY_PLATFORM_ID_ERROR:
1235 			case EARLY_PLATFORM_ID_UNSET:
1236 				break;
1237 			default:
1238 				if (epdrv->requested_id == id)
1239 					match_id = EARLY_PLATFORM_ID_UNSET;
1240 			}
1241 		}
1242 
1243 		switch (match_id) {
1244 		case EARLY_PLATFORM_ID_ERROR:
1245 			pr_warn("%s: unable to parse %s parameter\n",
1246 				class_str, epdrv->pdrv->driver.name);
1247 			/* fall-through */
1248 		case EARLY_PLATFORM_ID_UNSET:
1249 			match = NULL;
1250 			break;
1251 		default:
1252 			match = early_platform_match(epdrv, match_id);
1253 		}
1254 
1255 		if (match) {
1256 			/*
1257 			 * Set up a sensible init_name to enable
1258 			 * dev_name() and others to be used before the
1259 			 * rest of the driver core is initialized.
1260 			 */
1261 			if (!match->dev.init_name && slab_is_available()) {
1262 				if (match->id != -1)
1263 					match->dev.init_name =
1264 						kasprintf(GFP_KERNEL, "%s.%d",
1265 							  match->name,
1266 							  match->id);
1267 				else
1268 					match->dev.init_name =
1269 						kasprintf(GFP_KERNEL, "%s",
1270 							  match->name);
1271 
1272 				if (!match->dev.init_name)
1273 					return -ENOMEM;
1274 			}
1275 
1276 			if (epdrv->pdrv->probe(match))
1277 				pr_warn("%s: unable to probe %s early.\n",
1278 					class_str, match->name);
1279 			else
1280 				n++;
1281 		}
1282 
1283 		if (n >= nr_probe)
1284 			break;
1285 	}
1286 
1287 	if (left)
1288 		return n;
1289 	else
1290 		return -ENODEV;
1291 }
1292 
1293 /**
1294  * early_platform_driver_probe - probe a class of registered drivers
1295  * @class_str: string to identify early platform driver class
1296  * @nr_probe: number of platform devices to successfully probe before exiting
1297  * @user_only: only probe user specified early platform devices
1298  *
1299  * Used by architecture code to probe registered early platform drivers
1300  * within a certain class. For probe to happen a registered early platform
1301  * device matching a registered early platform driver is needed.
1302  */
1303 int __init early_platform_driver_probe(char *class_str,
1304 				       int nr_probe,
1305 				       int user_only)
1306 {
1307 	int k, n, i;
1308 
1309 	n = 0;
1310 	for (i = -2; n < nr_probe; i++) {
1311 		k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
1312 
1313 		if (k < 0)
1314 			break;
1315 
1316 		n += k;
1317 
1318 		if (user_only)
1319 			break;
1320 	}
1321 
1322 	return n;
1323 }
1324 
1325 /**
1326  * early_platform_cleanup - clean up early platform code
1327  */
1328 void __init early_platform_cleanup(void)
1329 {
1330 	struct platform_device *pd, *pd2;
1331 
1332 	/* clean up the devres list used to chain devices */
1333 	list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
1334 				 dev.devres_head) {
1335 		list_del(&pd->dev.devres_head);
1336 		memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
1337 	}
1338 }
1339 
1340