xref: /linux/drivers/base/power/main.c (revision 7f3edee81fbd49114c28057512906f169caa0bed)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A different set of lists than the global subsystem list are used to
16  * keep track of power info because we use different lists to hold
17  * devices based on what stage of the power management process they
18  * are in. The power domain dependencies may also differ from the
19  * ancestral dependencies that the subsystem list maintains.
20  */
21 
22 #include <linux/device.h>
23 #include <linux/kallsyms.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/resume-trace.h>
27 #include <linux/rwsem.h>
28 
29 #include "../base.h"
30 #include "power.h"
31 
32 /*
33  * The entries in the dpm_active list are in a depth first order, simply
34  * because children are guaranteed to be discovered after parents, and
35  * are inserted at the back of the list on discovery.
36  *
37  * All the other lists are kept in the same order, for consistency.
38  * However the lists aren't always traversed in the same order.
39  * Semaphores must be acquired from the top (i.e., front) down
40  * and released in the opposite order.  Devices must be suspended
41  * from the bottom (i.e., end) up and resumed in the opposite order.
42  * That way no parent will be suspended while it still has an active
43  * child.
44  *
45  * Since device_pm_add() may be called with a device semaphore held,
46  * we must never try to acquire a device semaphore while holding
47  * dpm_list_mutex.
48  */
49 
50 LIST_HEAD(dpm_active);
51 static LIST_HEAD(dpm_locked);
52 static LIST_HEAD(dpm_off);
53 static LIST_HEAD(dpm_off_irq);
54 static LIST_HEAD(dpm_destroy);
55 
56 static DEFINE_MUTEX(dpm_list_mtx);
57 
58 static DECLARE_RWSEM(pm_sleep_rwsem);
59 
60 int (*platform_enable_wakeup)(struct device *dev, int is_on);
61 
62 /**
63  *	device_pm_add - add a device to the list of active devices
64  *	@dev:	Device to be added to the list
65  */
66 void device_pm_add(struct device *dev)
67 {
68 	pr_debug("PM: Adding info for %s:%s\n",
69 		 dev->bus ? dev->bus->name : "No Bus",
70 		 kobject_name(&dev->kobj));
71 	mutex_lock(&dpm_list_mtx);
72 	list_add_tail(&dev->power.entry, &dpm_active);
73 	mutex_unlock(&dpm_list_mtx);
74 }
75 
76 /**
77  *	device_pm_remove - remove a device from the list of active devices
78  *	@dev:	Device to be removed from the list
79  *
80  *	This function also removes the device's PM-related sysfs attributes.
81  */
82 void device_pm_remove(struct device *dev)
83 {
84 	/*
85 	 * If this function is called during a suspend, it will be blocked,
86 	 * because we're holding the device's semaphore at that time, which may
87 	 * lead to a deadlock.  In that case we want to print a warning.
88 	 * However, it may also be called by unregister_dropped_devices() with
89 	 * the device's semaphore released, in which case the warning should
90 	 * not be printed.
91 	 */
92 	if (down_trylock(&dev->sem)) {
93 		if (down_read_trylock(&pm_sleep_rwsem)) {
94 			/* No suspend in progress, wait on dev->sem */
95 			down(&dev->sem);
96 			up_read(&pm_sleep_rwsem);
97 		} else {
98 			/* Suspend in progress, we may deadlock */
99 			dev_warn(dev, "Suspicious %s during suspend\n",
100 				__FUNCTION__);
101 			dump_stack();
102 			/* The user has been warned ... */
103 			down(&dev->sem);
104 		}
105 	}
106 	pr_debug("PM: Removing info for %s:%s\n",
107 		 dev->bus ? dev->bus->name : "No Bus",
108 		 kobject_name(&dev->kobj));
109 	mutex_lock(&dpm_list_mtx);
110 	dpm_sysfs_remove(dev);
111 	list_del_init(&dev->power.entry);
112 	mutex_unlock(&dpm_list_mtx);
113 	up(&dev->sem);
114 }
115 
116 /**
117  *	device_pm_schedule_removal - schedule the removal of a suspended device
118  *	@dev:	Device to destroy
119  *
120  *	Moves the device to the dpm_destroy list for further processing by
121  *	unregister_dropped_devices().
122  */
123 void device_pm_schedule_removal(struct device *dev)
124 {
125 	pr_debug("PM: Preparing for removal: %s:%s\n",
126 		dev->bus ? dev->bus->name : "No Bus",
127 		kobject_name(&dev->kobj));
128 	mutex_lock(&dpm_list_mtx);
129 	list_move_tail(&dev->power.entry, &dpm_destroy);
130 	mutex_unlock(&dpm_list_mtx);
131 }
132 
133 /**
134  *	pm_sleep_lock - mutual exclusion for registration and suspend
135  *
136  *	Returns 0 if no suspend is underway and device registration
137  *	may proceed, otherwise -EBUSY.
138  */
139 int pm_sleep_lock(void)
140 {
141 	if (down_read_trylock(&pm_sleep_rwsem))
142 		return 0;
143 
144 	return -EBUSY;
145 }
146 
147 /**
148  *	pm_sleep_unlock - mutual exclusion for registration and suspend
149  *
150  *	This routine undoes the effect of device_pm_add_lock
151  *	when a device's registration is complete.
152  */
153 void pm_sleep_unlock(void)
154 {
155 	up_read(&pm_sleep_rwsem);
156 }
157 
158 
159 /*------------------------- Resume routines -------------------------*/
160 
161 /**
162  *	resume_device_early - Power on one device (early resume).
163  *	@dev:	Device.
164  *
165  *	Must be called with interrupts disabled.
166  */
167 static int resume_device_early(struct device *dev)
168 {
169 	int error = 0;
170 
171 	TRACE_DEVICE(dev);
172 	TRACE_RESUME(0);
173 
174 	if (dev->bus && dev->bus->resume_early) {
175 		dev_dbg(dev, "EARLY resume\n");
176 		error = dev->bus->resume_early(dev);
177 	}
178 
179 	TRACE_RESUME(error);
180 	return error;
181 }
182 
183 /**
184  *	dpm_power_up - Power on all regular (non-sysdev) devices.
185  *
186  *	Walk the dpm_off_irq list and power each device up. This
187  *	is used for devices that required they be powered down with
188  *	interrupts disabled. As devices are powered on, they are moved
189  *	to the dpm_off list.
190  *
191  *	Must be called with interrupts disabled and only one CPU running.
192  */
193 static void dpm_power_up(void)
194 {
195 
196 	while (!list_empty(&dpm_off_irq)) {
197 		struct list_head *entry = dpm_off_irq.next;
198 		struct device *dev = to_device(entry);
199 
200 		list_move_tail(entry, &dpm_off);
201 		resume_device_early(dev);
202 	}
203 }
204 
205 /**
206  *	device_power_up - Turn on all devices that need special attention.
207  *
208  *	Power on system devices, then devices that required we shut them down
209  *	with interrupts disabled.
210  *
211  *	Must be called with interrupts disabled.
212  */
213 void device_power_up(void)
214 {
215 	sysdev_resume();
216 	dpm_power_up();
217 }
218 EXPORT_SYMBOL_GPL(device_power_up);
219 
220 /**
221  *	resume_device - Restore state for one device.
222  *	@dev:	Device.
223  *
224  */
225 static int resume_device(struct device *dev)
226 {
227 	int error = 0;
228 
229 	TRACE_DEVICE(dev);
230 	TRACE_RESUME(0);
231 
232 	if (dev->bus && dev->bus->resume) {
233 		dev_dbg(dev,"resuming\n");
234 		error = dev->bus->resume(dev);
235 	}
236 
237 	if (!error && dev->type && dev->type->resume) {
238 		dev_dbg(dev,"resuming\n");
239 		error = dev->type->resume(dev);
240 	}
241 
242 	if (!error && dev->class && dev->class->resume) {
243 		dev_dbg(dev,"class resume\n");
244 		error = dev->class->resume(dev);
245 	}
246 
247 	TRACE_RESUME(error);
248 	return error;
249 }
250 
251 /**
252  *	dpm_resume - Resume every device.
253  *
254  *	Resume the devices that have either not gone through
255  *	the late suspend, or that did go through it but also
256  *	went through the early resume.
257  *
258  *	Take devices from the dpm_off_list, resume them,
259  *	and put them on the dpm_locked list.
260  */
261 static void dpm_resume(void)
262 {
263 	mutex_lock(&dpm_list_mtx);
264 	while(!list_empty(&dpm_off)) {
265 		struct list_head *entry = dpm_off.next;
266 		struct device *dev = to_device(entry);
267 
268 		list_move_tail(entry, &dpm_locked);
269 		mutex_unlock(&dpm_list_mtx);
270 		resume_device(dev);
271 		mutex_lock(&dpm_list_mtx);
272 	}
273 	mutex_unlock(&dpm_list_mtx);
274 }
275 
276 /**
277  *	unlock_all_devices - Release each device's semaphore
278  *
279  *	Go through the dpm_off list.  Put each device on the dpm_active
280  *	list and unlock it.
281  */
282 static void unlock_all_devices(void)
283 {
284 	mutex_lock(&dpm_list_mtx);
285 	while (!list_empty(&dpm_locked)) {
286 		struct list_head *entry = dpm_locked.prev;
287 		struct device *dev = to_device(entry);
288 
289 		list_move(entry, &dpm_active);
290 		up(&dev->sem);
291 	}
292 	mutex_unlock(&dpm_list_mtx);
293 }
294 
295 /**
296  *	unregister_dropped_devices - Unregister devices scheduled for removal
297  *
298  *	Unregister all devices on the dpm_destroy list.
299  */
300 static void unregister_dropped_devices(void)
301 {
302 	mutex_lock(&dpm_list_mtx);
303 	while (!list_empty(&dpm_destroy)) {
304 		struct list_head *entry = dpm_destroy.next;
305 		struct device *dev = to_device(entry);
306 
307 		up(&dev->sem);
308 		mutex_unlock(&dpm_list_mtx);
309 		/* This also removes the device from the list */
310 		device_unregister(dev);
311 		mutex_lock(&dpm_list_mtx);
312 	}
313 	mutex_unlock(&dpm_list_mtx);
314 }
315 
316 /**
317  *	device_resume - Restore state of each device in system.
318  *
319  *	Resume all the devices, unlock them all, and allow new
320  *	devices to be registered once again.
321  */
322 void device_resume(void)
323 {
324 	might_sleep();
325 	dpm_resume();
326 	unlock_all_devices();
327 	unregister_dropped_devices();
328 	up_write(&pm_sleep_rwsem);
329 }
330 EXPORT_SYMBOL_GPL(device_resume);
331 
332 
333 /*------------------------- Suspend routines -------------------------*/
334 
335 static inline char *suspend_verb(u32 event)
336 {
337 	switch (event) {
338 	case PM_EVENT_SUSPEND:	return "suspend";
339 	case PM_EVENT_FREEZE:	return "freeze";
340 	case PM_EVENT_PRETHAW:	return "prethaw";
341 	default:		return "(unknown suspend event)";
342 	}
343 }
344 
345 static void
346 suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
347 {
348 	dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
349 		((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
350 		", may wakeup" : "");
351 }
352 
353 /**
354  *	suspend_device_late - Shut down one device (late suspend).
355  *	@dev:	Device.
356  *	@state:	Power state device is entering.
357  *
358  *	This is called with interrupts off and only a single CPU running.
359  */
360 static int suspend_device_late(struct device *dev, pm_message_t state)
361 {
362 	int error = 0;
363 
364 	if (dev->bus && dev->bus->suspend_late) {
365 		suspend_device_dbg(dev, state, "LATE ");
366 		error = dev->bus->suspend_late(dev, state);
367 		suspend_report_result(dev->bus->suspend_late, error);
368 	}
369 	return error;
370 }
371 
372 /**
373  *	device_power_down - Shut down special devices.
374  *	@state:		Power state to enter.
375  *
376  *	Power down devices that require interrupts to be disabled
377  *	and move them from the dpm_off list to the dpm_off_irq list.
378  *	Then power down system devices.
379  *
380  *	Must be called with interrupts disabled and only one CPU running.
381  */
382 int device_power_down(pm_message_t state)
383 {
384 	int error = 0;
385 
386 	while (!list_empty(&dpm_off)) {
387 		struct list_head *entry = dpm_off.prev;
388 		struct device *dev = to_device(entry);
389 
390 		list_del_init(&dev->power.entry);
391 		error = suspend_device_late(dev, state);
392 		if (error) {
393 			printk(KERN_ERR "Could not power down device %s: "
394 					"error %d\n",
395 					kobject_name(&dev->kobj), error);
396 			if (list_empty(&dev->power.entry))
397 				list_add(&dev->power.entry, &dpm_off);
398 			break;
399 		}
400 		if (list_empty(&dev->power.entry))
401 			list_add(&dev->power.entry, &dpm_off_irq);
402 	}
403 
404 	if (!error)
405 		error = sysdev_suspend(state);
406 	if (error)
407 		dpm_power_up();
408 	return error;
409 }
410 EXPORT_SYMBOL_GPL(device_power_down);
411 
412 /**
413  *	suspend_device - Save state of one device.
414  *	@dev:	Device.
415  *	@state:	Power state device is entering.
416  */
417 int suspend_device(struct device *dev, pm_message_t state)
418 {
419 	int error = 0;
420 
421 	if (dev->power.power_state.event) {
422 		dev_dbg(dev, "PM: suspend %d-->%d\n",
423 			dev->power.power_state.event, state.event);
424 	}
425 
426 	if (dev->class && dev->class->suspend) {
427 		suspend_device_dbg(dev, state, "class ");
428 		error = dev->class->suspend(dev, state);
429 		suspend_report_result(dev->class->suspend, error);
430 	}
431 
432 	if (!error && dev->type && dev->type->suspend) {
433 		suspend_device_dbg(dev, state, "type ");
434 		error = dev->type->suspend(dev, state);
435 		suspend_report_result(dev->type->suspend, error);
436 	}
437 
438 	if (!error && dev->bus && dev->bus->suspend) {
439 		suspend_device_dbg(dev, state, "");
440 		error = dev->bus->suspend(dev, state);
441 		suspend_report_result(dev->bus->suspend, error);
442 	}
443 	return error;
444 }
445 
446 /**
447  *	dpm_suspend - Suspend every device.
448  *	@state:	Power state to put each device in.
449  *
450  *	Walk the dpm_locked list.  Suspend each device and move it
451  *	to the dpm_off list.
452  *
453  *	(For historical reasons, if it returns -EAGAIN, that used to mean
454  *	that the device would be called again with interrupts disabled.
455  *	These days, we use the "suspend_late()" callback for that, so we
456  *	print a warning and consider it an error).
457  */
458 static int dpm_suspend(pm_message_t state)
459 {
460 	int error = 0;
461 
462 	mutex_lock(&dpm_list_mtx);
463 	while (!list_empty(&dpm_locked)) {
464 		struct list_head *entry = dpm_locked.prev;
465 		struct device *dev = to_device(entry);
466 
467 		list_del_init(&dev->power.entry);
468 		mutex_unlock(&dpm_list_mtx);
469 		error = suspend_device(dev, state);
470 		if (error) {
471 			printk(KERN_ERR "Could not suspend device %s: "
472 					"error %d%s\n",
473 					kobject_name(&dev->kobj),
474 					error,
475 					(error == -EAGAIN ?
476 					" (please convert to suspend_late)" :
477 					""));
478 			mutex_lock(&dpm_list_mtx);
479 			if (list_empty(&dev->power.entry))
480 				list_add(&dev->power.entry, &dpm_locked);
481 			mutex_unlock(&dpm_list_mtx);
482 			break;
483 		}
484 		mutex_lock(&dpm_list_mtx);
485 		if (list_empty(&dev->power.entry))
486 			list_add(&dev->power.entry, &dpm_off);
487 	}
488 	mutex_unlock(&dpm_list_mtx);
489 
490 	return error;
491 }
492 
493 /**
494  *	lock_all_devices - Acquire every device's semaphore
495  *
496  *	Go through the dpm_active list. Carefully lock each device's
497  *	semaphore and put it in on the dpm_locked list.
498  */
499 static void lock_all_devices(void)
500 {
501 	mutex_lock(&dpm_list_mtx);
502 	while (!list_empty(&dpm_active)) {
503 		struct list_head *entry = dpm_active.next;
504 		struct device *dev = to_device(entry);
505 
506 		/* Required locking order is dev->sem first,
507 		 * then dpm_list_mutex.  Hence this awkward code.
508 		 */
509 		get_device(dev);
510 		mutex_unlock(&dpm_list_mtx);
511 		down(&dev->sem);
512 		mutex_lock(&dpm_list_mtx);
513 
514 		if (list_empty(entry))
515 			up(&dev->sem);		/* Device was removed */
516 		else
517 			list_move_tail(entry, &dpm_locked);
518 		put_device(dev);
519 	}
520 	mutex_unlock(&dpm_list_mtx);
521 }
522 
523 /**
524  *	device_suspend - Save state and stop all devices in system.
525  *
526  *	Prevent new devices from being registered, then lock all devices
527  *	and suspend them.
528  */
529 int device_suspend(pm_message_t state)
530 {
531 	int error;
532 
533 	might_sleep();
534 	down_write(&pm_sleep_rwsem);
535 	lock_all_devices();
536 	error = dpm_suspend(state);
537 	if (error)
538 		device_resume();
539 	return error;
540 }
541 EXPORT_SYMBOL_GPL(device_suspend);
542 
543 void __suspend_report_result(const char *function, void *fn, int ret)
544 {
545 	if (ret) {
546 		printk(KERN_ERR "%s(): ", function);
547 		print_fn_descriptor_symbol("%s() returns ", (unsigned long)fn);
548 		printk("%d\n", ret);
549 	}
550 }
551 EXPORT_SYMBOL_GPL(__suspend_report_result);
552