xref: /linux/drivers/base/power/main.c (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A different set of lists than the global subsystem list are used to
16  * keep track of power info because we use different lists to hold
17  * devices based on what stage of the power management process they
18  * are in. The power domain dependencies may also differ from the
19  * ancestral dependencies that the subsystem list maintains.
20  */
21 
22 #include <linux/device.h>
23 #include <linux/kallsyms.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/resume-trace.h>
27 #include <linux/rwsem.h>
28 
29 #include "../base.h"
30 #include "power.h"
31 
32 /*
33  * The entries in the dpm_active list are in a depth first order, simply
34  * because children are guaranteed to be discovered after parents, and
35  * are inserted at the back of the list on discovery.
36  *
37  * All the other lists are kept in the same order, for consistency.
38  * However the lists aren't always traversed in the same order.
39  * Semaphores must be acquired from the top (i.e., front) down
40  * and released in the opposite order.  Devices must be suspended
41  * from the bottom (i.e., end) up and resumed in the opposite order.
42  * That way no parent will be suspended while it still has an active
43  * child.
44  *
45  * Since device_pm_add() may be called with a device semaphore held,
46  * we must never try to acquire a device semaphore while holding
47  * dpm_list_mutex.
48  */
49 
50 LIST_HEAD(dpm_active);
51 static LIST_HEAD(dpm_locked);
52 static LIST_HEAD(dpm_off);
53 static LIST_HEAD(dpm_off_irq);
54 static LIST_HEAD(dpm_destroy);
55 
56 static DEFINE_MUTEX(dpm_list_mtx);
57 
58 static DECLARE_RWSEM(pm_sleep_rwsem);
59 
60 int (*platform_enable_wakeup)(struct device *dev, int is_on);
61 
62 /**
63  *	device_pm_add - add a device to the list of active devices
64  *	@dev:	Device to be added to the list
65  */
66 void device_pm_add(struct device *dev)
67 {
68 	pr_debug("PM: Adding info for %s:%s\n",
69 		 dev->bus ? dev->bus->name : "No Bus",
70 		 kobject_name(&dev->kobj));
71 	mutex_lock(&dpm_list_mtx);
72 	list_add_tail(&dev->power.entry, &dpm_active);
73 	mutex_unlock(&dpm_list_mtx);
74 }
75 
76 /**
77  *	device_pm_remove - remove a device from the list of active devices
78  *	@dev:	Device to be removed from the list
79  *
80  *	This function also removes the device's PM-related sysfs attributes.
81  */
82 void device_pm_remove(struct device *dev)
83 {
84 	/*
85 	 * If this function is called during a suspend, it will be blocked,
86 	 * because we're holding the device's semaphore at that time, which may
87 	 * lead to a deadlock.  In that case we want to print a warning.
88 	 * However, it may also be called by unregister_dropped_devices() with
89 	 * the device's semaphore released, in which case the warning should
90 	 * not be printed.
91 	 */
92 	if (down_trylock(&dev->sem)) {
93 		if (down_read_trylock(&pm_sleep_rwsem)) {
94 			/* No suspend in progress, wait on dev->sem */
95 			down(&dev->sem);
96 			up_read(&pm_sleep_rwsem);
97 		} else {
98 			/* Suspend in progress, we may deadlock */
99 			dev_warn(dev, "Suspicious %s during suspend\n",
100 				__FUNCTION__);
101 			dump_stack();
102 			/* The user has been warned ... */
103 			down(&dev->sem);
104 		}
105 	}
106 	pr_debug("PM: Removing info for %s:%s\n",
107 		 dev->bus ? dev->bus->name : "No Bus",
108 		 kobject_name(&dev->kobj));
109 	mutex_lock(&dpm_list_mtx);
110 	dpm_sysfs_remove(dev);
111 	list_del_init(&dev->power.entry);
112 	mutex_unlock(&dpm_list_mtx);
113 	up(&dev->sem);
114 }
115 
116 /**
117  *	device_pm_schedule_removal - schedule the removal of a suspended device
118  *	@dev:	Device to destroy
119  *
120  *	Moves the device to the dpm_destroy list for further processing by
121  *	unregister_dropped_devices().
122  */
123 void device_pm_schedule_removal(struct device *dev)
124 {
125 	pr_debug("PM: Preparing for removal: %s:%s\n",
126 		dev->bus ? dev->bus->name : "No Bus",
127 		kobject_name(&dev->kobj));
128 	mutex_lock(&dpm_list_mtx);
129 	list_move_tail(&dev->power.entry, &dpm_destroy);
130 	mutex_unlock(&dpm_list_mtx);
131 }
132 EXPORT_SYMBOL_GPL(device_pm_schedule_removal);
133 
134 /**
135  *	pm_sleep_lock - mutual exclusion for registration and suspend
136  *
137  *	Returns 0 if no suspend is underway and device registration
138  *	may proceed, otherwise -EBUSY.
139  */
140 int pm_sleep_lock(void)
141 {
142 	if (down_read_trylock(&pm_sleep_rwsem))
143 		return 0;
144 
145 	return -EBUSY;
146 }
147 
148 /**
149  *	pm_sleep_unlock - mutual exclusion for registration and suspend
150  *
151  *	This routine undoes the effect of device_pm_add_lock
152  *	when a device's registration is complete.
153  */
154 void pm_sleep_unlock(void)
155 {
156 	up_read(&pm_sleep_rwsem);
157 }
158 
159 
160 /*------------------------- Resume routines -------------------------*/
161 
162 /**
163  *	resume_device_early - Power on one device (early resume).
164  *	@dev:	Device.
165  *
166  *	Must be called with interrupts disabled.
167  */
168 static int resume_device_early(struct device *dev)
169 {
170 	int error = 0;
171 
172 	TRACE_DEVICE(dev);
173 	TRACE_RESUME(0);
174 
175 	if (dev->bus && dev->bus->resume_early) {
176 		dev_dbg(dev, "EARLY resume\n");
177 		error = dev->bus->resume_early(dev);
178 	}
179 
180 	TRACE_RESUME(error);
181 	return error;
182 }
183 
184 /**
185  *	dpm_power_up - Power on all regular (non-sysdev) devices.
186  *
187  *	Walk the dpm_off_irq list and power each device up. This
188  *	is used for devices that required they be powered down with
189  *	interrupts disabled. As devices are powered on, they are moved
190  *	to the dpm_off list.
191  *
192  *	Must be called with interrupts disabled and only one CPU running.
193  */
194 static void dpm_power_up(void)
195 {
196 
197 	while (!list_empty(&dpm_off_irq)) {
198 		struct list_head *entry = dpm_off_irq.next;
199 		struct device *dev = to_device(entry);
200 
201 		list_move_tail(entry, &dpm_off);
202 		resume_device_early(dev);
203 	}
204 }
205 
206 /**
207  *	device_power_up - Turn on all devices that need special attention.
208  *
209  *	Power on system devices, then devices that required we shut them down
210  *	with interrupts disabled.
211  *
212  *	Must be called with interrupts disabled.
213  */
214 void device_power_up(void)
215 {
216 	sysdev_resume();
217 	dpm_power_up();
218 }
219 EXPORT_SYMBOL_GPL(device_power_up);
220 
221 /**
222  *	resume_device - Restore state for one device.
223  *	@dev:	Device.
224  *
225  */
226 static int resume_device(struct device *dev)
227 {
228 	int error = 0;
229 
230 	TRACE_DEVICE(dev);
231 	TRACE_RESUME(0);
232 
233 	if (dev->bus && dev->bus->resume) {
234 		dev_dbg(dev,"resuming\n");
235 		error = dev->bus->resume(dev);
236 	}
237 
238 	if (!error && dev->type && dev->type->resume) {
239 		dev_dbg(dev,"resuming\n");
240 		error = dev->type->resume(dev);
241 	}
242 
243 	if (!error && dev->class && dev->class->resume) {
244 		dev_dbg(dev,"class resume\n");
245 		error = dev->class->resume(dev);
246 	}
247 
248 	TRACE_RESUME(error);
249 	return error;
250 }
251 
252 /**
253  *	dpm_resume - Resume every device.
254  *
255  *	Resume the devices that have either not gone through
256  *	the late suspend, or that did go through it but also
257  *	went through the early resume.
258  *
259  *	Take devices from the dpm_off_list, resume them,
260  *	and put them on the dpm_locked list.
261  */
262 static void dpm_resume(void)
263 {
264 	mutex_lock(&dpm_list_mtx);
265 	while(!list_empty(&dpm_off)) {
266 		struct list_head *entry = dpm_off.next;
267 		struct device *dev = to_device(entry);
268 
269 		list_move_tail(entry, &dpm_locked);
270 		mutex_unlock(&dpm_list_mtx);
271 		resume_device(dev);
272 		mutex_lock(&dpm_list_mtx);
273 	}
274 	mutex_unlock(&dpm_list_mtx);
275 }
276 
277 /**
278  *	unlock_all_devices - Release each device's semaphore
279  *
280  *	Go through the dpm_off list.  Put each device on the dpm_active
281  *	list and unlock it.
282  */
283 static void unlock_all_devices(void)
284 {
285 	mutex_lock(&dpm_list_mtx);
286 	while (!list_empty(&dpm_locked)) {
287 		struct list_head *entry = dpm_locked.prev;
288 		struct device *dev = to_device(entry);
289 
290 		list_move(entry, &dpm_active);
291 		up(&dev->sem);
292 	}
293 	mutex_unlock(&dpm_list_mtx);
294 }
295 
296 /**
297  *	unregister_dropped_devices - Unregister devices scheduled for removal
298  *
299  *	Unregister all devices on the dpm_destroy list.
300  */
301 static void unregister_dropped_devices(void)
302 {
303 	mutex_lock(&dpm_list_mtx);
304 	while (!list_empty(&dpm_destroy)) {
305 		struct list_head *entry = dpm_destroy.next;
306 		struct device *dev = to_device(entry);
307 
308 		up(&dev->sem);
309 		mutex_unlock(&dpm_list_mtx);
310 		/* This also removes the device from the list */
311 		device_unregister(dev);
312 		mutex_lock(&dpm_list_mtx);
313 	}
314 	mutex_unlock(&dpm_list_mtx);
315 }
316 
317 /**
318  *	device_resume - Restore state of each device in system.
319  *
320  *	Resume all the devices, unlock them all, and allow new
321  *	devices to be registered once again.
322  */
323 void device_resume(void)
324 {
325 	might_sleep();
326 	dpm_resume();
327 	unlock_all_devices();
328 	unregister_dropped_devices();
329 	up_write(&pm_sleep_rwsem);
330 }
331 EXPORT_SYMBOL_GPL(device_resume);
332 
333 
334 /*------------------------- Suspend routines -------------------------*/
335 
336 static inline char *suspend_verb(u32 event)
337 {
338 	switch (event) {
339 	case PM_EVENT_SUSPEND:	return "suspend";
340 	case PM_EVENT_FREEZE:	return "freeze";
341 	case PM_EVENT_PRETHAW:	return "prethaw";
342 	default:		return "(unknown suspend event)";
343 	}
344 }
345 
346 static void
347 suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
348 {
349 	dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
350 		((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
351 		", may wakeup" : "");
352 }
353 
354 /**
355  *	suspend_device_late - Shut down one device (late suspend).
356  *	@dev:	Device.
357  *	@state:	Power state device is entering.
358  *
359  *	This is called with interrupts off and only a single CPU running.
360  */
361 static int suspend_device_late(struct device *dev, pm_message_t state)
362 {
363 	int error = 0;
364 
365 	if (dev->bus && dev->bus->suspend_late) {
366 		suspend_device_dbg(dev, state, "LATE ");
367 		error = dev->bus->suspend_late(dev, state);
368 		suspend_report_result(dev->bus->suspend_late, error);
369 	}
370 	return error;
371 }
372 
373 /**
374  *	device_power_down - Shut down special devices.
375  *	@state:		Power state to enter.
376  *
377  *	Power down devices that require interrupts to be disabled
378  *	and move them from the dpm_off list to the dpm_off_irq list.
379  *	Then power down system devices.
380  *
381  *	Must be called with interrupts disabled and only one CPU running.
382  */
383 int device_power_down(pm_message_t state)
384 {
385 	int error = 0;
386 
387 	while (!list_empty(&dpm_off)) {
388 		struct list_head *entry = dpm_off.prev;
389 		struct device *dev = to_device(entry);
390 
391 		list_del_init(&dev->power.entry);
392 		error = suspend_device_late(dev, state);
393 		if (error) {
394 			printk(KERN_ERR "Could not power down device %s: "
395 					"error %d\n",
396 					kobject_name(&dev->kobj), error);
397 			if (list_empty(&dev->power.entry))
398 				list_add(&dev->power.entry, &dpm_off);
399 			break;
400 		}
401 		if (list_empty(&dev->power.entry))
402 			list_add(&dev->power.entry, &dpm_off_irq);
403 	}
404 
405 	if (!error)
406 		error = sysdev_suspend(state);
407 	if (error)
408 		dpm_power_up();
409 	return error;
410 }
411 EXPORT_SYMBOL_GPL(device_power_down);
412 
413 /**
414  *	suspend_device - Save state of one device.
415  *	@dev:	Device.
416  *	@state:	Power state device is entering.
417  */
418 int suspend_device(struct device *dev, pm_message_t state)
419 {
420 	int error = 0;
421 
422 	if (dev->power.power_state.event) {
423 		dev_dbg(dev, "PM: suspend %d-->%d\n",
424 			dev->power.power_state.event, state.event);
425 	}
426 
427 	if (dev->class && dev->class->suspend) {
428 		suspend_device_dbg(dev, state, "class ");
429 		error = dev->class->suspend(dev, state);
430 		suspend_report_result(dev->class->suspend, error);
431 	}
432 
433 	if (!error && dev->type && dev->type->suspend) {
434 		suspend_device_dbg(dev, state, "type ");
435 		error = dev->type->suspend(dev, state);
436 		suspend_report_result(dev->type->suspend, error);
437 	}
438 
439 	if (!error && dev->bus && dev->bus->suspend) {
440 		suspend_device_dbg(dev, state, "");
441 		error = dev->bus->suspend(dev, state);
442 		suspend_report_result(dev->bus->suspend, error);
443 	}
444 	return error;
445 }
446 
447 /**
448  *	dpm_suspend - Suspend every device.
449  *	@state:	Power state to put each device in.
450  *
451  *	Walk the dpm_locked list.  Suspend each device and move it
452  *	to the dpm_off list.
453  *
454  *	(For historical reasons, if it returns -EAGAIN, that used to mean
455  *	that the device would be called again with interrupts disabled.
456  *	These days, we use the "suspend_late()" callback for that, so we
457  *	print a warning and consider it an error).
458  */
459 static int dpm_suspend(pm_message_t state)
460 {
461 	int error = 0;
462 
463 	mutex_lock(&dpm_list_mtx);
464 	while (!list_empty(&dpm_locked)) {
465 		struct list_head *entry = dpm_locked.prev;
466 		struct device *dev = to_device(entry);
467 
468 		list_del_init(&dev->power.entry);
469 		mutex_unlock(&dpm_list_mtx);
470 		error = suspend_device(dev, state);
471 		if (error) {
472 			printk(KERN_ERR "Could not suspend device %s: "
473 					"error %d%s\n",
474 					kobject_name(&dev->kobj),
475 					error,
476 					(error == -EAGAIN ?
477 					" (please convert to suspend_late)" :
478 					""));
479 			mutex_lock(&dpm_list_mtx);
480 			if (list_empty(&dev->power.entry))
481 				list_add(&dev->power.entry, &dpm_locked);
482 			mutex_unlock(&dpm_list_mtx);
483 			break;
484 		}
485 		mutex_lock(&dpm_list_mtx);
486 		if (list_empty(&dev->power.entry))
487 			list_add(&dev->power.entry, &dpm_off);
488 	}
489 	mutex_unlock(&dpm_list_mtx);
490 
491 	return error;
492 }
493 
494 /**
495  *	lock_all_devices - Acquire every device's semaphore
496  *
497  *	Go through the dpm_active list. Carefully lock each device's
498  *	semaphore and put it in on the dpm_locked list.
499  */
500 static void lock_all_devices(void)
501 {
502 	mutex_lock(&dpm_list_mtx);
503 	while (!list_empty(&dpm_active)) {
504 		struct list_head *entry = dpm_active.next;
505 		struct device *dev = to_device(entry);
506 
507 		/* Required locking order is dev->sem first,
508 		 * then dpm_list_mutex.  Hence this awkward code.
509 		 */
510 		get_device(dev);
511 		mutex_unlock(&dpm_list_mtx);
512 		down(&dev->sem);
513 		mutex_lock(&dpm_list_mtx);
514 
515 		if (list_empty(entry))
516 			up(&dev->sem);		/* Device was removed */
517 		else
518 			list_move_tail(entry, &dpm_locked);
519 		put_device(dev);
520 	}
521 	mutex_unlock(&dpm_list_mtx);
522 }
523 
524 /**
525  *	device_suspend - Save state and stop all devices in system.
526  *
527  *	Prevent new devices from being registered, then lock all devices
528  *	and suspend them.
529  */
530 int device_suspend(pm_message_t state)
531 {
532 	int error;
533 
534 	might_sleep();
535 	down_write(&pm_sleep_rwsem);
536 	lock_all_devices();
537 	error = dpm_suspend(state);
538 	if (error)
539 		device_resume();
540 	return error;
541 }
542 EXPORT_SYMBOL_GPL(device_suspend);
543 
544 void __suspend_report_result(const char *function, void *fn, int ret)
545 {
546 	if (ret) {
547 		printk(KERN_ERR "%s(): ", function);
548 		print_fn_descriptor_symbol("%s() returns ", (unsigned long)fn);
549 		printk("%d\n", ret);
550 	}
551 }
552 EXPORT_SYMBOL_GPL(__suspend_report_result);
553