xref: /linux/drivers/base/platform.c (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /*
2  * platform.c - platform 'pseudo' bus for legacy devices
3  *
4  * Copyright (c) 2002-3 Patrick Mochel
5  * Copyright (c) 2002-3 Open Source Development Labs
6  *
7  * This file is released under the GPLv2
8  *
9  * Please see Documentation/driver-model/platform.txt for more
10  * information.
11  */
12 
13 #include <linux/string.h>
14 #include <linux/platform_device.h>
15 #include <linux/of_device.h>
16 #include <linux/of_irq.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bootmem.h>
21 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/pm_domain.h>
25 #include <linux/idr.h>
26 #include <linux/acpi.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/limits.h>
29 
30 #include "base.h"
31 #include "power/power.h"
32 
33 /* For automatically allocated device IDs */
34 static DEFINE_IDA(platform_devid_ida);
35 
36 struct device platform_bus = {
37 	.init_name	= "platform",
38 };
39 EXPORT_SYMBOL_GPL(platform_bus);
40 
41 /**
42  * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
43  * @pdev: platform device
44  *
45  * This is called before platform_device_add() such that any pdev_archdata may
46  * be setup before the platform_notifier is called.  So if a user needs to
47  * manipulate any relevant information in the pdev_archdata they can do:
48  *
49  *	platform_device_alloc()
50  *	... manipulate ...
51  *	platform_device_add()
52  *
53  * And if they don't care they can just call platform_device_register() and
54  * everything will just work out.
55  */
56 void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
57 {
58 }
59 
60 /**
61  * platform_get_resource - get a resource for a device
62  * @dev: platform device
63  * @type: resource type
64  * @num: resource index
65  */
66 struct resource *platform_get_resource(struct platform_device *dev,
67 				       unsigned int type, unsigned int num)
68 {
69 	int i;
70 
71 	for (i = 0; i < dev->num_resources; i++) {
72 		struct resource *r = &dev->resource[i];
73 
74 		if (type == resource_type(r) && num-- == 0)
75 			return r;
76 	}
77 	return NULL;
78 }
79 EXPORT_SYMBOL_GPL(platform_get_resource);
80 
81 /**
82  * platform_get_irq - get an IRQ for a device
83  * @dev: platform device
84  * @num: IRQ number index
85  */
86 int platform_get_irq(struct platform_device *dev, unsigned int num)
87 {
88 #ifdef CONFIG_SPARC
89 	/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
90 	if (!dev || num >= dev->archdata.num_irqs)
91 		return -ENXIO;
92 	return dev->archdata.irqs[num];
93 #else
94 	struct resource *r;
95 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
96 		int ret;
97 
98 		ret = of_irq_get(dev->dev.of_node, num);
99 		if (ret >= 0 || ret == -EPROBE_DEFER)
100 			return ret;
101 	}
102 
103 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
104 	/*
105 	 * The resources may pass trigger flags to the irqs that need
106 	 * to be set up. It so happens that the trigger flags for
107 	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
108 	 * settings.
109 	 */
110 	if (r && r->flags & IORESOURCE_BITS)
111 		irqd_set_trigger_type(irq_get_irq_data(r->start),
112 				      r->flags & IORESOURCE_BITS);
113 
114 	return r ? r->start : -ENXIO;
115 #endif
116 }
117 EXPORT_SYMBOL_GPL(platform_get_irq);
118 
119 /**
120  * platform_get_resource_byname - get a resource for a device by name
121  * @dev: platform device
122  * @type: resource type
123  * @name: resource name
124  */
125 struct resource *platform_get_resource_byname(struct platform_device *dev,
126 					      unsigned int type,
127 					      const char *name)
128 {
129 	int i;
130 
131 	for (i = 0; i < dev->num_resources; i++) {
132 		struct resource *r = &dev->resource[i];
133 
134 		if (unlikely(!r->name))
135 			continue;
136 
137 		if (type == resource_type(r) && !strcmp(r->name, name))
138 			return r;
139 	}
140 	return NULL;
141 }
142 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
143 
144 /**
145  * platform_get_irq_byname - get an IRQ for a device by name
146  * @dev: platform device
147  * @name: IRQ name
148  */
149 int platform_get_irq_byname(struct platform_device *dev, const char *name)
150 {
151 	struct resource *r;
152 
153 	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
154 		int ret;
155 
156 		ret = of_irq_get_byname(dev->dev.of_node, name);
157 		if (ret >= 0 || ret == -EPROBE_DEFER)
158 			return ret;
159 	}
160 
161 	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
162 	return r ? r->start : -ENXIO;
163 }
164 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
165 
166 /**
167  * platform_add_devices - add a numbers of platform devices
168  * @devs: array of platform devices to add
169  * @num: number of platform devices in array
170  */
171 int platform_add_devices(struct platform_device **devs, int num)
172 {
173 	int i, ret = 0;
174 
175 	for (i = 0; i < num; i++) {
176 		ret = platform_device_register(devs[i]);
177 		if (ret) {
178 			while (--i >= 0)
179 				platform_device_unregister(devs[i]);
180 			break;
181 		}
182 	}
183 
184 	return ret;
185 }
186 EXPORT_SYMBOL_GPL(platform_add_devices);
187 
188 struct platform_object {
189 	struct platform_device pdev;
190 	char name[];
191 };
192 
193 /**
194  * platform_device_put - destroy a platform device
195  * @pdev: platform device to free
196  *
197  * Free all memory associated with a platform device.  This function must
198  * _only_ be externally called in error cases.  All other usage is a bug.
199  */
200 void platform_device_put(struct platform_device *pdev)
201 {
202 	if (pdev)
203 		put_device(&pdev->dev);
204 }
205 EXPORT_SYMBOL_GPL(platform_device_put);
206 
207 static void platform_device_release(struct device *dev)
208 {
209 	struct platform_object *pa = container_of(dev, struct platform_object,
210 						  pdev.dev);
211 
212 	of_device_node_put(&pa->pdev.dev);
213 	kfree(pa->pdev.dev.platform_data);
214 	kfree(pa->pdev.mfd_cell);
215 	kfree(pa->pdev.resource);
216 	kfree(pa->pdev.driver_override);
217 	kfree(pa);
218 }
219 
220 /**
221  * platform_device_alloc - create a platform device
222  * @name: base name of the device we're adding
223  * @id: instance id
224  *
225  * Create a platform device object which can have other objects attached
226  * to it, and which will have attached objects freed when it is released.
227  */
228 struct platform_device *platform_device_alloc(const char *name, int id)
229 {
230 	struct platform_object *pa;
231 
232 	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
233 	if (pa) {
234 		strcpy(pa->name, name);
235 		pa->pdev.name = pa->name;
236 		pa->pdev.id = id;
237 		device_initialize(&pa->pdev.dev);
238 		pa->pdev.dev.release = platform_device_release;
239 		arch_setup_pdev_archdata(&pa->pdev);
240 	}
241 
242 	return pa ? &pa->pdev : NULL;
243 }
244 EXPORT_SYMBOL_GPL(platform_device_alloc);
245 
246 /**
247  * platform_device_add_resources - add resources to a platform device
248  * @pdev: platform device allocated by platform_device_alloc to add resources to
249  * @res: set of resources that needs to be allocated for the device
250  * @num: number of resources
251  *
252  * Add a copy of the resources to the platform device.  The memory
253  * associated with the resources will be freed when the platform device is
254  * released.
255  */
256 int platform_device_add_resources(struct platform_device *pdev,
257 				  const struct resource *res, unsigned int num)
258 {
259 	struct resource *r = NULL;
260 
261 	if (res) {
262 		r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
263 		if (!r)
264 			return -ENOMEM;
265 	}
266 
267 	kfree(pdev->resource);
268 	pdev->resource = r;
269 	pdev->num_resources = num;
270 	return 0;
271 }
272 EXPORT_SYMBOL_GPL(platform_device_add_resources);
273 
274 /**
275  * platform_device_add_data - add platform-specific data to a platform device
276  * @pdev: platform device allocated by platform_device_alloc to add resources to
277  * @data: platform specific data for this platform device
278  * @size: size of platform specific data
279  *
280  * Add a copy of platform specific data to the platform device's
281  * platform_data pointer.  The memory associated with the platform data
282  * will be freed when the platform device is released.
283  */
284 int platform_device_add_data(struct platform_device *pdev, const void *data,
285 			     size_t size)
286 {
287 	void *d = NULL;
288 
289 	if (data) {
290 		d = kmemdup(data, size, GFP_KERNEL);
291 		if (!d)
292 			return -ENOMEM;
293 	}
294 
295 	kfree(pdev->dev.platform_data);
296 	pdev->dev.platform_data = d;
297 	return 0;
298 }
299 EXPORT_SYMBOL_GPL(platform_device_add_data);
300 
301 /**
302  * platform_device_add - add a platform device to device hierarchy
303  * @pdev: platform device we're adding
304  *
305  * This is part 2 of platform_device_register(), though may be called
306  * separately _iff_ pdev was allocated by platform_device_alloc().
307  */
308 int platform_device_add(struct platform_device *pdev)
309 {
310 	int i, ret;
311 
312 	if (!pdev)
313 		return -EINVAL;
314 
315 	if (!pdev->dev.parent)
316 		pdev->dev.parent = &platform_bus;
317 
318 	pdev->dev.bus = &platform_bus_type;
319 
320 	switch (pdev->id) {
321 	default:
322 		dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
323 		break;
324 	case PLATFORM_DEVID_NONE:
325 		dev_set_name(&pdev->dev, "%s", pdev->name);
326 		break;
327 	case PLATFORM_DEVID_AUTO:
328 		/*
329 		 * Automatically allocated device ID. We mark it as such so
330 		 * that we remember it must be freed, and we append a suffix
331 		 * to avoid namespace collision with explicit IDs.
332 		 */
333 		ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
334 		if (ret < 0)
335 			goto err_out;
336 		pdev->id = ret;
337 		pdev->id_auto = true;
338 		dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
339 		break;
340 	}
341 
342 	for (i = 0; i < pdev->num_resources; i++) {
343 		struct resource *p, *r = &pdev->resource[i];
344 
345 		if (r->name == NULL)
346 			r->name = dev_name(&pdev->dev);
347 
348 		p = r->parent;
349 		if (!p) {
350 			if (resource_type(r) == IORESOURCE_MEM)
351 				p = &iomem_resource;
352 			else if (resource_type(r) == IORESOURCE_IO)
353 				p = &ioport_resource;
354 		}
355 
356 		if (p && insert_resource(p, r)) {
357 			dev_err(&pdev->dev, "failed to claim resource %d\n", i);
358 			ret = -EBUSY;
359 			goto failed;
360 		}
361 	}
362 
363 	pr_debug("Registering platform device '%s'. Parent at %s\n",
364 		 dev_name(&pdev->dev), dev_name(pdev->dev.parent));
365 
366 	ret = device_add(&pdev->dev);
367 	if (ret == 0)
368 		return ret;
369 
370  failed:
371 	if (pdev->id_auto) {
372 		ida_simple_remove(&platform_devid_ida, pdev->id);
373 		pdev->id = PLATFORM_DEVID_AUTO;
374 	}
375 
376 	while (--i >= 0) {
377 		struct resource *r = &pdev->resource[i];
378 		unsigned long type = resource_type(r);
379 
380 		if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
381 			release_resource(r);
382 	}
383 
384  err_out:
385 	return ret;
386 }
387 EXPORT_SYMBOL_GPL(platform_device_add);
388 
389 /**
390  * platform_device_del - remove a platform-level device
391  * @pdev: platform device we're removing
392  *
393  * Note that this function will also release all memory- and port-based
394  * resources owned by the device (@dev->resource).  This function must
395  * _only_ be externally called in error cases.  All other usage is a bug.
396  */
397 void platform_device_del(struct platform_device *pdev)
398 {
399 	int i;
400 
401 	if (pdev) {
402 		device_del(&pdev->dev);
403 
404 		if (pdev->id_auto) {
405 			ida_simple_remove(&platform_devid_ida, pdev->id);
406 			pdev->id = PLATFORM_DEVID_AUTO;
407 		}
408 
409 		for (i = 0; i < pdev->num_resources; i++) {
410 			struct resource *r = &pdev->resource[i];
411 			unsigned long type = resource_type(r);
412 
413 			if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
414 				release_resource(r);
415 		}
416 	}
417 }
418 EXPORT_SYMBOL_GPL(platform_device_del);
419 
420 /**
421  * platform_device_register - add a platform-level device
422  * @pdev: platform device we're adding
423  */
424 int platform_device_register(struct platform_device *pdev)
425 {
426 	device_initialize(&pdev->dev);
427 	arch_setup_pdev_archdata(pdev);
428 	return platform_device_add(pdev);
429 }
430 EXPORT_SYMBOL_GPL(platform_device_register);
431 
432 /**
433  * platform_device_unregister - unregister a platform-level device
434  * @pdev: platform device we're unregistering
435  *
436  * Unregistration is done in 2 steps. First we release all resources
437  * and remove it from the subsystem, then we drop reference count by
438  * calling platform_device_put().
439  */
440 void platform_device_unregister(struct platform_device *pdev)
441 {
442 	platform_device_del(pdev);
443 	platform_device_put(pdev);
444 }
445 EXPORT_SYMBOL_GPL(platform_device_unregister);
446 
447 /**
448  * platform_device_register_full - add a platform-level device with
449  * resources and platform-specific data
450  *
451  * @pdevinfo: data used to create device
452  *
453  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
454  */
455 struct platform_device *platform_device_register_full(
456 		const struct platform_device_info *pdevinfo)
457 {
458 	int ret = -ENOMEM;
459 	struct platform_device *pdev;
460 
461 	pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
462 	if (!pdev)
463 		goto err_alloc;
464 
465 	pdev->dev.parent = pdevinfo->parent;
466 	pdev->dev.fwnode = pdevinfo->fwnode;
467 
468 	if (pdevinfo->dma_mask) {
469 		/*
470 		 * This memory isn't freed when the device is put,
471 		 * I don't have a nice idea for that though.  Conceptually
472 		 * dma_mask in struct device should not be a pointer.
473 		 * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
474 		 */
475 		pdev->dev.dma_mask =
476 			kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
477 		if (!pdev->dev.dma_mask)
478 			goto err;
479 
480 		*pdev->dev.dma_mask = pdevinfo->dma_mask;
481 		pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
482 	}
483 
484 	ret = platform_device_add_resources(pdev,
485 			pdevinfo->res, pdevinfo->num_res);
486 	if (ret)
487 		goto err;
488 
489 	ret = platform_device_add_data(pdev,
490 			pdevinfo->data, pdevinfo->size_data);
491 	if (ret)
492 		goto err;
493 
494 	ret = platform_device_add(pdev);
495 	if (ret) {
496 err:
497 		ACPI_COMPANION_SET(&pdev->dev, NULL);
498 		kfree(pdev->dev.dma_mask);
499 
500 err_alloc:
501 		platform_device_put(pdev);
502 		return ERR_PTR(ret);
503 	}
504 
505 	return pdev;
506 }
507 EXPORT_SYMBOL_GPL(platform_device_register_full);
508 
509 static int platform_drv_probe(struct device *_dev)
510 {
511 	struct platform_driver *drv = to_platform_driver(_dev->driver);
512 	struct platform_device *dev = to_platform_device(_dev);
513 	int ret;
514 
515 	ret = of_clk_set_defaults(_dev->of_node, false);
516 	if (ret < 0)
517 		return ret;
518 
519 	ret = dev_pm_domain_attach(_dev, true);
520 	if (ret != -EPROBE_DEFER) {
521 		ret = drv->probe(dev);
522 		if (ret)
523 			dev_pm_domain_detach(_dev, true);
524 	}
525 
526 	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
527 		dev_warn(_dev, "probe deferral not supported\n");
528 		ret = -ENXIO;
529 	}
530 
531 	return ret;
532 }
533 
534 static int platform_drv_probe_fail(struct device *_dev)
535 {
536 	return -ENXIO;
537 }
538 
539 static int platform_drv_remove(struct device *_dev)
540 {
541 	struct platform_driver *drv = to_platform_driver(_dev->driver);
542 	struct platform_device *dev = to_platform_device(_dev);
543 	int ret;
544 
545 	ret = drv->remove(dev);
546 	dev_pm_domain_detach(_dev, true);
547 
548 	return ret;
549 }
550 
551 static void platform_drv_shutdown(struct device *_dev)
552 {
553 	struct platform_driver *drv = to_platform_driver(_dev->driver);
554 	struct platform_device *dev = to_platform_device(_dev);
555 
556 	drv->shutdown(dev);
557 	dev_pm_domain_detach(_dev, true);
558 }
559 
560 /**
561  * __platform_driver_register - register a driver for platform-level devices
562  * @drv: platform driver structure
563  * @owner: owning module/driver
564  */
565 int __platform_driver_register(struct platform_driver *drv,
566 				struct module *owner)
567 {
568 	drv->driver.owner = owner;
569 	drv->driver.bus = &platform_bus_type;
570 	if (drv->probe)
571 		drv->driver.probe = platform_drv_probe;
572 	if (drv->remove)
573 		drv->driver.remove = platform_drv_remove;
574 	if (drv->shutdown)
575 		drv->driver.shutdown = platform_drv_shutdown;
576 
577 	return driver_register(&drv->driver);
578 }
579 EXPORT_SYMBOL_GPL(__platform_driver_register);
580 
581 /**
582  * platform_driver_unregister - unregister a driver for platform-level devices
583  * @drv: platform driver structure
584  */
585 void platform_driver_unregister(struct platform_driver *drv)
586 {
587 	driver_unregister(&drv->driver);
588 }
589 EXPORT_SYMBOL_GPL(platform_driver_unregister);
590 
591 /**
592  * __platform_driver_probe - register driver for non-hotpluggable device
593  * @drv: platform driver structure
594  * @probe: the driver probe routine, probably from an __init section
595  * @module: module which will be the owner of the driver
596  *
597  * Use this instead of platform_driver_register() when you know the device
598  * is not hotpluggable and has already been registered, and you want to
599  * remove its run-once probe() infrastructure from memory after the driver
600  * has bound to the device.
601  *
602  * One typical use for this would be with drivers for controllers integrated
603  * into system-on-chip processors, where the controller devices have been
604  * configured as part of board setup.
605  *
606  * Note that this is incompatible with deferred probing.
607  *
608  * Returns zero if the driver registered and bound to a device, else returns
609  * a negative error code and with the driver not registered.
610  */
611 int __init_or_module __platform_driver_probe(struct platform_driver *drv,
612 		int (*probe)(struct platform_device *), struct module *module)
613 {
614 	int retval, code;
615 
616 	if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
617 		pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
618 			 drv->driver.name, __func__);
619 		return -EINVAL;
620 	}
621 
622 	/*
623 	 * We have to run our probes synchronously because we check if
624 	 * we find any devices to bind to and exit with error if there
625 	 * are any.
626 	 */
627 	drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
628 
629 	/*
630 	 * Prevent driver from requesting probe deferral to avoid further
631 	 * futile probe attempts.
632 	 */
633 	drv->prevent_deferred_probe = true;
634 
635 	/* make sure driver won't have bind/unbind attributes */
636 	drv->driver.suppress_bind_attrs = true;
637 
638 	/* temporary section violation during probe() */
639 	drv->probe = probe;
640 	retval = code = __platform_driver_register(drv, module);
641 
642 	/*
643 	 * Fixup that section violation, being paranoid about code scanning
644 	 * the list of drivers in order to probe new devices.  Check to see
645 	 * if the probe was successful, and make sure any forced probes of
646 	 * new devices fail.
647 	 */
648 	spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
649 	drv->probe = NULL;
650 	if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
651 		retval = -ENODEV;
652 	drv->driver.probe = platform_drv_probe_fail;
653 	spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
654 
655 	if (code != retval)
656 		platform_driver_unregister(drv);
657 	return retval;
658 }
659 EXPORT_SYMBOL_GPL(__platform_driver_probe);
660 
661 /**
662  * __platform_create_bundle - register driver and create corresponding device
663  * @driver: platform driver structure
664  * @probe: the driver probe routine, probably from an __init section
665  * @res: set of resources that needs to be allocated for the device
666  * @n_res: number of resources
667  * @data: platform specific data for this platform device
668  * @size: size of platform specific data
669  * @module: module which will be the owner of the driver
670  *
671  * Use this in legacy-style modules that probe hardware directly and
672  * register a single platform device and corresponding platform driver.
673  *
674  * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
675  */
676 struct platform_device * __init_or_module __platform_create_bundle(
677 			struct platform_driver *driver,
678 			int (*probe)(struct platform_device *),
679 			struct resource *res, unsigned int n_res,
680 			const void *data, size_t size, struct module *module)
681 {
682 	struct platform_device *pdev;
683 	int error;
684 
685 	pdev = platform_device_alloc(driver->driver.name, -1);
686 	if (!pdev) {
687 		error = -ENOMEM;
688 		goto err_out;
689 	}
690 
691 	error = platform_device_add_resources(pdev, res, n_res);
692 	if (error)
693 		goto err_pdev_put;
694 
695 	error = platform_device_add_data(pdev, data, size);
696 	if (error)
697 		goto err_pdev_put;
698 
699 	error = platform_device_add(pdev);
700 	if (error)
701 		goto err_pdev_put;
702 
703 	error = __platform_driver_probe(driver, probe, module);
704 	if (error)
705 		goto err_pdev_del;
706 
707 	return pdev;
708 
709 err_pdev_del:
710 	platform_device_del(pdev);
711 err_pdev_put:
712 	platform_device_put(pdev);
713 err_out:
714 	return ERR_PTR(error);
715 }
716 EXPORT_SYMBOL_GPL(__platform_create_bundle);
717 
718 /* modalias support enables more hands-off userspace setup:
719  * (a) environment variable lets new-style hotplug events work once system is
720  *     fully running:  "modprobe $MODALIAS"
721  * (b) sysfs attribute lets new-style coldplug recover from hotplug events
722  *     mishandled before system is fully running:  "modprobe $(cat modalias)"
723  */
724 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
725 			     char *buf)
726 {
727 	struct platform_device	*pdev = to_platform_device(dev);
728 	int len;
729 
730 	len = of_device_get_modalias(dev, buf, PAGE_SIZE -1);
731 	if (len != -ENODEV)
732 		return len;
733 
734 	len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
735 	if (len != -ENODEV)
736 		return len;
737 
738 	len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
739 
740 	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
741 }
742 static DEVICE_ATTR_RO(modalias);
743 
744 static ssize_t driver_override_store(struct device *dev,
745 				     struct device_attribute *attr,
746 				     const char *buf, size_t count)
747 {
748 	struct platform_device *pdev = to_platform_device(dev);
749 	char *driver_override, *old = pdev->driver_override, *cp;
750 
751 	if (count > PATH_MAX)
752 		return -EINVAL;
753 
754 	driver_override = kstrndup(buf, count, GFP_KERNEL);
755 	if (!driver_override)
756 		return -ENOMEM;
757 
758 	cp = strchr(driver_override, '\n');
759 	if (cp)
760 		*cp = '\0';
761 
762 	if (strlen(driver_override)) {
763 		pdev->driver_override = driver_override;
764 	} else {
765 		kfree(driver_override);
766 		pdev->driver_override = NULL;
767 	}
768 
769 	kfree(old);
770 
771 	return count;
772 }
773 
774 static ssize_t driver_override_show(struct device *dev,
775 				    struct device_attribute *attr, char *buf)
776 {
777 	struct platform_device *pdev = to_platform_device(dev);
778 
779 	return sprintf(buf, "%s\n", pdev->driver_override);
780 }
781 static DEVICE_ATTR_RW(driver_override);
782 
783 
784 static struct attribute *platform_dev_attrs[] = {
785 	&dev_attr_modalias.attr,
786 	&dev_attr_driver_override.attr,
787 	NULL,
788 };
789 ATTRIBUTE_GROUPS(platform_dev);
790 
791 static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
792 {
793 	struct platform_device	*pdev = to_platform_device(dev);
794 	int rc;
795 
796 	/* Some devices have extra OF data and an OF-style MODALIAS */
797 	rc = of_device_uevent_modalias(dev, env);
798 	if (rc != -ENODEV)
799 		return rc;
800 
801 	rc = acpi_device_uevent_modalias(dev, env);
802 	if (rc != -ENODEV)
803 		return rc;
804 
805 	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
806 			pdev->name);
807 	return 0;
808 }
809 
810 static const struct platform_device_id *platform_match_id(
811 			const struct platform_device_id *id,
812 			struct platform_device *pdev)
813 {
814 	while (id->name[0]) {
815 		if (strcmp(pdev->name, id->name) == 0) {
816 			pdev->id_entry = id;
817 			return id;
818 		}
819 		id++;
820 	}
821 	return NULL;
822 }
823 
824 /**
825  * platform_match - bind platform device to platform driver.
826  * @dev: device.
827  * @drv: driver.
828  *
829  * Platform device IDs are assumed to be encoded like this:
830  * "<name><instance>", where <name> is a short description of the type of
831  * device, like "pci" or "floppy", and <instance> is the enumerated
832  * instance of the device, like '0' or '42'.  Driver IDs are simply
833  * "<name>".  So, extract the <name> from the platform_device structure,
834  * and compare it against the name of the driver. Return whether they match
835  * or not.
836  */
837 static int platform_match(struct device *dev, struct device_driver *drv)
838 {
839 	struct platform_device *pdev = to_platform_device(dev);
840 	struct platform_driver *pdrv = to_platform_driver(drv);
841 
842 	/* When driver_override is set, only bind to the matching driver */
843 	if (pdev->driver_override)
844 		return !strcmp(pdev->driver_override, drv->name);
845 
846 	/* Attempt an OF style match first */
847 	if (of_driver_match_device(dev, drv))
848 		return 1;
849 
850 	/* Then try ACPI style match */
851 	if (acpi_driver_match_device(dev, drv))
852 		return 1;
853 
854 	/* Then try to match against the id table */
855 	if (pdrv->id_table)
856 		return platform_match_id(pdrv->id_table, pdev) != NULL;
857 
858 	/* fall-back to driver name match */
859 	return (strcmp(pdev->name, drv->name) == 0);
860 }
861 
862 #ifdef CONFIG_PM_SLEEP
863 
864 static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
865 {
866 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
867 	struct platform_device *pdev = to_platform_device(dev);
868 	int ret = 0;
869 
870 	if (dev->driver && pdrv->suspend)
871 		ret = pdrv->suspend(pdev, mesg);
872 
873 	return ret;
874 }
875 
876 static int platform_legacy_resume(struct device *dev)
877 {
878 	struct platform_driver *pdrv = to_platform_driver(dev->driver);
879 	struct platform_device *pdev = to_platform_device(dev);
880 	int ret = 0;
881 
882 	if (dev->driver && pdrv->resume)
883 		ret = pdrv->resume(pdev);
884 
885 	return ret;
886 }
887 
888 #endif /* CONFIG_PM_SLEEP */
889 
890 #ifdef CONFIG_SUSPEND
891 
892 int platform_pm_suspend(struct device *dev)
893 {
894 	struct device_driver *drv = dev->driver;
895 	int ret = 0;
896 
897 	if (!drv)
898 		return 0;
899 
900 	if (drv->pm) {
901 		if (drv->pm->suspend)
902 			ret = drv->pm->suspend(dev);
903 	} else {
904 		ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
905 	}
906 
907 	return ret;
908 }
909 
910 int platform_pm_resume(struct device *dev)
911 {
912 	struct device_driver *drv = dev->driver;
913 	int ret = 0;
914 
915 	if (!drv)
916 		return 0;
917 
918 	if (drv->pm) {
919 		if (drv->pm->resume)
920 			ret = drv->pm->resume(dev);
921 	} else {
922 		ret = platform_legacy_resume(dev);
923 	}
924 
925 	return ret;
926 }
927 
928 #endif /* CONFIG_SUSPEND */
929 
930 #ifdef CONFIG_HIBERNATE_CALLBACKS
931 
932 int platform_pm_freeze(struct device *dev)
933 {
934 	struct device_driver *drv = dev->driver;
935 	int ret = 0;
936 
937 	if (!drv)
938 		return 0;
939 
940 	if (drv->pm) {
941 		if (drv->pm->freeze)
942 			ret = drv->pm->freeze(dev);
943 	} else {
944 		ret = platform_legacy_suspend(dev, PMSG_FREEZE);
945 	}
946 
947 	return ret;
948 }
949 
950 int platform_pm_thaw(struct device *dev)
951 {
952 	struct device_driver *drv = dev->driver;
953 	int ret = 0;
954 
955 	if (!drv)
956 		return 0;
957 
958 	if (drv->pm) {
959 		if (drv->pm->thaw)
960 			ret = drv->pm->thaw(dev);
961 	} else {
962 		ret = platform_legacy_resume(dev);
963 	}
964 
965 	return ret;
966 }
967 
968 int platform_pm_poweroff(struct device *dev)
969 {
970 	struct device_driver *drv = dev->driver;
971 	int ret = 0;
972 
973 	if (!drv)
974 		return 0;
975 
976 	if (drv->pm) {
977 		if (drv->pm->poweroff)
978 			ret = drv->pm->poweroff(dev);
979 	} else {
980 		ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
981 	}
982 
983 	return ret;
984 }
985 
986 int platform_pm_restore(struct device *dev)
987 {
988 	struct device_driver *drv = dev->driver;
989 	int ret = 0;
990 
991 	if (!drv)
992 		return 0;
993 
994 	if (drv->pm) {
995 		if (drv->pm->restore)
996 			ret = drv->pm->restore(dev);
997 	} else {
998 		ret = platform_legacy_resume(dev);
999 	}
1000 
1001 	return ret;
1002 }
1003 
1004 #endif /* CONFIG_HIBERNATE_CALLBACKS */
1005 
1006 static const struct dev_pm_ops platform_dev_pm_ops = {
1007 	.runtime_suspend = pm_generic_runtime_suspend,
1008 	.runtime_resume = pm_generic_runtime_resume,
1009 	USE_PLATFORM_PM_SLEEP_OPS
1010 };
1011 
1012 struct bus_type platform_bus_type = {
1013 	.name		= "platform",
1014 	.dev_groups	= platform_dev_groups,
1015 	.match		= platform_match,
1016 	.uevent		= platform_uevent,
1017 	.pm		= &platform_dev_pm_ops,
1018 };
1019 EXPORT_SYMBOL_GPL(platform_bus_type);
1020 
1021 int __init platform_bus_init(void)
1022 {
1023 	int error;
1024 
1025 	early_platform_cleanup();
1026 
1027 	error = device_register(&platform_bus);
1028 	if (error)
1029 		return error;
1030 	error =  bus_register(&platform_bus_type);
1031 	if (error)
1032 		device_unregister(&platform_bus);
1033 	of_platform_register_reconfig_notifier();
1034 	return error;
1035 }
1036 
1037 #ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
1038 u64 dma_get_required_mask(struct device *dev)
1039 {
1040 	u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
1041 	u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
1042 	u64 mask;
1043 
1044 	if (!high_totalram) {
1045 		/* convert to mask just covering totalram */
1046 		low_totalram = (1 << (fls(low_totalram) - 1));
1047 		low_totalram += low_totalram - 1;
1048 		mask = low_totalram;
1049 	} else {
1050 		high_totalram = (1 << (fls(high_totalram) - 1));
1051 		high_totalram += high_totalram - 1;
1052 		mask = (((u64)high_totalram) << 32) + 0xffffffff;
1053 	}
1054 	return mask;
1055 }
1056 EXPORT_SYMBOL_GPL(dma_get_required_mask);
1057 #endif
1058 
1059 static __initdata LIST_HEAD(early_platform_driver_list);
1060 static __initdata LIST_HEAD(early_platform_device_list);
1061 
1062 /**
1063  * early_platform_driver_register - register early platform driver
1064  * @epdrv: early_platform driver structure
1065  * @buf: string passed from early_param()
1066  *
1067  * Helper function for early_platform_init() / early_platform_init_buffer()
1068  */
1069 int __init early_platform_driver_register(struct early_platform_driver *epdrv,
1070 					  char *buf)
1071 {
1072 	char *tmp;
1073 	int n;
1074 
1075 	/* Simply add the driver to the end of the global list.
1076 	 * Drivers will by default be put on the list in compiled-in order.
1077 	 */
1078 	if (!epdrv->list.next) {
1079 		INIT_LIST_HEAD(&epdrv->list);
1080 		list_add_tail(&epdrv->list, &early_platform_driver_list);
1081 	}
1082 
1083 	/* If the user has specified device then make sure the driver
1084 	 * gets prioritized. The driver of the last device specified on
1085 	 * command line will be put first on the list.
1086 	 */
1087 	n = strlen(epdrv->pdrv->driver.name);
1088 	if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
1089 		list_move(&epdrv->list, &early_platform_driver_list);
1090 
1091 		/* Allow passing parameters after device name */
1092 		if (buf[n] == '\0' || buf[n] == ',')
1093 			epdrv->requested_id = -1;
1094 		else {
1095 			epdrv->requested_id = simple_strtoul(&buf[n + 1],
1096 							     &tmp, 10);
1097 
1098 			if (buf[n] != '.' || (tmp == &buf[n + 1])) {
1099 				epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
1100 				n = 0;
1101 			} else
1102 				n += strcspn(&buf[n + 1], ",") + 1;
1103 		}
1104 
1105 		if (buf[n] == ',')
1106 			n++;
1107 
1108 		if (epdrv->bufsize) {
1109 			memcpy(epdrv->buffer, &buf[n],
1110 			       min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
1111 			epdrv->buffer[epdrv->bufsize - 1] = '\0';
1112 		}
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 /**
1119  * early_platform_add_devices - adds a number of early platform devices
1120  * @devs: array of early platform devices to add
1121  * @num: number of early platform devices in array
1122  *
1123  * Used by early architecture code to register early platform devices and
1124  * their platform data.
1125  */
1126 void __init early_platform_add_devices(struct platform_device **devs, int num)
1127 {
1128 	struct device *dev;
1129 	int i;
1130 
1131 	/* simply add the devices to list */
1132 	for (i = 0; i < num; i++) {
1133 		dev = &devs[i]->dev;
1134 
1135 		if (!dev->devres_head.next) {
1136 			pm_runtime_early_init(dev);
1137 			INIT_LIST_HEAD(&dev->devres_head);
1138 			list_add_tail(&dev->devres_head,
1139 				      &early_platform_device_list);
1140 		}
1141 	}
1142 }
1143 
1144 /**
1145  * early_platform_driver_register_all - register early platform drivers
1146  * @class_str: string to identify early platform driver class
1147  *
1148  * Used by architecture code to register all early platform drivers
1149  * for a certain class. If omitted then only early platform drivers
1150  * with matching kernel command line class parameters will be registered.
1151  */
1152 void __init early_platform_driver_register_all(char *class_str)
1153 {
1154 	/* The "class_str" parameter may or may not be present on the kernel
1155 	 * command line. If it is present then there may be more than one
1156 	 * matching parameter.
1157 	 *
1158 	 * Since we register our early platform drivers using early_param()
1159 	 * we need to make sure that they also get registered in the case
1160 	 * when the parameter is missing from the kernel command line.
1161 	 *
1162 	 * We use parse_early_options() to make sure the early_param() gets
1163 	 * called at least once. The early_param() may be called more than
1164 	 * once since the name of the preferred device may be specified on
1165 	 * the kernel command line. early_platform_driver_register() handles
1166 	 * this case for us.
1167 	 */
1168 	parse_early_options(class_str);
1169 }
1170 
1171 /**
1172  * early_platform_match - find early platform device matching driver
1173  * @epdrv: early platform driver structure
1174  * @id: id to match against
1175  */
1176 static struct platform_device * __init
1177 early_platform_match(struct early_platform_driver *epdrv, int id)
1178 {
1179 	struct platform_device *pd;
1180 
1181 	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1182 		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1183 			if (pd->id == id)
1184 				return pd;
1185 
1186 	return NULL;
1187 }
1188 
1189 /**
1190  * early_platform_left - check if early platform driver has matching devices
1191  * @epdrv: early platform driver structure
1192  * @id: return true if id or above exists
1193  */
1194 static int __init early_platform_left(struct early_platform_driver *epdrv,
1195 				       int id)
1196 {
1197 	struct platform_device *pd;
1198 
1199 	list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
1200 		if (platform_match(&pd->dev, &epdrv->pdrv->driver))
1201 			if (pd->id >= id)
1202 				return 1;
1203 
1204 	return 0;
1205 }
1206 
1207 /**
1208  * early_platform_driver_probe_id - probe drivers matching class_str and id
1209  * @class_str: string to identify early platform driver class
1210  * @id: id to match against
1211  * @nr_probe: number of platform devices to successfully probe before exiting
1212  */
1213 static int __init early_platform_driver_probe_id(char *class_str,
1214 						 int id,
1215 						 int nr_probe)
1216 {
1217 	struct early_platform_driver *epdrv;
1218 	struct platform_device *match;
1219 	int match_id;
1220 	int n = 0;
1221 	int left = 0;
1222 
1223 	list_for_each_entry(epdrv, &early_platform_driver_list, list) {
1224 		/* only use drivers matching our class_str */
1225 		if (strcmp(class_str, epdrv->class_str))
1226 			continue;
1227 
1228 		if (id == -2) {
1229 			match_id = epdrv->requested_id;
1230 			left = 1;
1231 
1232 		} else {
1233 			match_id = id;
1234 			left += early_platform_left(epdrv, id);
1235 
1236 			/* skip requested id */
1237 			switch (epdrv->requested_id) {
1238 			case EARLY_PLATFORM_ID_ERROR:
1239 			case EARLY_PLATFORM_ID_UNSET:
1240 				break;
1241 			default:
1242 				if (epdrv->requested_id == id)
1243 					match_id = EARLY_PLATFORM_ID_UNSET;
1244 			}
1245 		}
1246 
1247 		switch (match_id) {
1248 		case EARLY_PLATFORM_ID_ERROR:
1249 			pr_warn("%s: unable to parse %s parameter\n",
1250 				class_str, epdrv->pdrv->driver.name);
1251 			/* fall-through */
1252 		case EARLY_PLATFORM_ID_UNSET:
1253 			match = NULL;
1254 			break;
1255 		default:
1256 			match = early_platform_match(epdrv, match_id);
1257 		}
1258 
1259 		if (match) {
1260 			/*
1261 			 * Set up a sensible init_name to enable
1262 			 * dev_name() and others to be used before the
1263 			 * rest of the driver core is initialized.
1264 			 */
1265 			if (!match->dev.init_name && slab_is_available()) {
1266 				if (match->id != -1)
1267 					match->dev.init_name =
1268 						kasprintf(GFP_KERNEL, "%s.%d",
1269 							  match->name,
1270 							  match->id);
1271 				else
1272 					match->dev.init_name =
1273 						kasprintf(GFP_KERNEL, "%s",
1274 							  match->name);
1275 
1276 				if (!match->dev.init_name)
1277 					return -ENOMEM;
1278 			}
1279 
1280 			if (epdrv->pdrv->probe(match))
1281 				pr_warn("%s: unable to probe %s early.\n",
1282 					class_str, match->name);
1283 			else
1284 				n++;
1285 		}
1286 
1287 		if (n >= nr_probe)
1288 			break;
1289 	}
1290 
1291 	if (left)
1292 		return n;
1293 	else
1294 		return -ENODEV;
1295 }
1296 
1297 /**
1298  * early_platform_driver_probe - probe a class of registered drivers
1299  * @class_str: string to identify early platform driver class
1300  * @nr_probe: number of platform devices to successfully probe before exiting
1301  * @user_only: only probe user specified early platform devices
1302  *
1303  * Used by architecture code to probe registered early platform drivers
1304  * within a certain class. For probe to happen a registered early platform
1305  * device matching a registered early platform driver is needed.
1306  */
1307 int __init early_platform_driver_probe(char *class_str,
1308 				       int nr_probe,
1309 				       int user_only)
1310 {
1311 	int k, n, i;
1312 
1313 	n = 0;
1314 	for (i = -2; n < nr_probe; i++) {
1315 		k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
1316 
1317 		if (k < 0)
1318 			break;
1319 
1320 		n += k;
1321 
1322 		if (user_only)
1323 			break;
1324 	}
1325 
1326 	return n;
1327 }
1328 
1329 /**
1330  * early_platform_cleanup - clean up early platform code
1331  */
1332 void __init early_platform_cleanup(void)
1333 {
1334 	struct platform_device *pd, *pd2;
1335 
1336 	/* clean up the devres list used to chain devices */
1337 	list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
1338 				 dev.devres_head) {
1339 		list_del(&pd->dev.devres_head);
1340 		memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
1341 	}
1342 }
1343 
1344