xref: /linux/drivers/base/power/main.c (revision 1f28960b465afe0dc3ccb8bd53354cb435ea0384)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A different set of lists than the global subsystem list are used to
16  * keep track of power info because we use different lists to hold
17  * devices based on what stage of the power management process they
18  * are in. The power domain dependencies may also differ from the
19  * ancestral dependencies that the subsystem list maintains.
20  */
21 
22 #include <linux/device.h>
23 #include <linux/kallsyms.h>
24 #include <linux/mutex.h>
25 #include <linux/pm.h>
26 #include <linux/resume-trace.h>
27 
28 #include "../base.h"
29 #include "power.h"
30 
31 LIST_HEAD(dpm_active);
32 static LIST_HEAD(dpm_off);
33 static LIST_HEAD(dpm_off_irq);
34 
35 static DEFINE_MUTEX(dpm_mtx);
36 static DEFINE_MUTEX(dpm_list_mtx);
37 
38 int (*platform_enable_wakeup)(struct device *dev, int is_on);
39 
40 
41 int device_pm_add(struct device *dev)
42 {
43 	int error;
44 
45 	pr_debug("PM: Adding info for %s:%s\n",
46 		 dev->bus ? dev->bus->name : "No Bus",
47 		 kobject_name(&dev->kobj));
48 	mutex_lock(&dpm_list_mtx);
49 	list_add_tail(&dev->power.entry, &dpm_active);
50 	error = dpm_sysfs_add(dev);
51 	if (error)
52 		list_del(&dev->power.entry);
53 	mutex_unlock(&dpm_list_mtx);
54 	return error;
55 }
56 
57 void device_pm_remove(struct device *dev)
58 {
59 	pr_debug("PM: Removing info for %s:%s\n",
60 		 dev->bus ? dev->bus->name : "No Bus",
61 		 kobject_name(&dev->kobj));
62 	mutex_lock(&dpm_list_mtx);
63 	dpm_sysfs_remove(dev);
64 	list_del_init(&dev->power.entry);
65 	mutex_unlock(&dpm_list_mtx);
66 }
67 
68 
69 /*------------------------- Resume routines -------------------------*/
70 
71 /**
72  *	resume_device - Restore state for one device.
73  *	@dev:	Device.
74  *
75  */
76 
77 static int resume_device(struct device * dev)
78 {
79 	int error = 0;
80 
81 	TRACE_DEVICE(dev);
82 	TRACE_RESUME(0);
83 
84 	down(&dev->sem);
85 
86 	if (dev->bus && dev->bus->resume) {
87 		dev_dbg(dev,"resuming\n");
88 		error = dev->bus->resume(dev);
89 	}
90 
91 	if (!error && dev->type && dev->type->resume) {
92 		dev_dbg(dev,"resuming\n");
93 		error = dev->type->resume(dev);
94 	}
95 
96 	if (!error && dev->class && dev->class->resume) {
97 		dev_dbg(dev,"class resume\n");
98 		error = dev->class->resume(dev);
99 	}
100 
101 	up(&dev->sem);
102 
103 	TRACE_RESUME(error);
104 	return error;
105 }
106 
107 
108 static int resume_device_early(struct device * dev)
109 {
110 	int error = 0;
111 
112 	TRACE_DEVICE(dev);
113 	TRACE_RESUME(0);
114 	if (dev->bus && dev->bus->resume_early) {
115 		dev_dbg(dev,"EARLY resume\n");
116 		error = dev->bus->resume_early(dev);
117 	}
118 	TRACE_RESUME(error);
119 	return error;
120 }
121 
122 /*
123  * Resume the devices that have either not gone through
124  * the late suspend, or that did go through it but also
125  * went through the early resume
126  */
127 static void dpm_resume(void)
128 {
129 	mutex_lock(&dpm_list_mtx);
130 	while(!list_empty(&dpm_off)) {
131 		struct list_head * entry = dpm_off.next;
132 		struct device * dev = to_device(entry);
133 
134 		get_device(dev);
135 		list_move_tail(entry, &dpm_active);
136 
137 		mutex_unlock(&dpm_list_mtx);
138 		resume_device(dev);
139 		mutex_lock(&dpm_list_mtx);
140 		put_device(dev);
141 	}
142 	mutex_unlock(&dpm_list_mtx);
143 }
144 
145 
146 /**
147  *	device_resume - Restore state of each device in system.
148  *
149  *	Walk the dpm_off list, remove each entry, resume the device,
150  *	then add it to the dpm_active list.
151  */
152 
153 void device_resume(void)
154 {
155 	might_sleep();
156 	mutex_lock(&dpm_mtx);
157 	dpm_resume();
158 	mutex_unlock(&dpm_mtx);
159 }
160 
161 EXPORT_SYMBOL_GPL(device_resume);
162 
163 
164 /**
165  *	dpm_power_up - Power on some devices.
166  *
167  *	Walk the dpm_off_irq list and power each device up. This
168  *	is used for devices that required they be powered down with
169  *	interrupts disabled. As devices are powered on, they are moved
170  *	to the dpm_active list.
171  *
172  *	Interrupts must be disabled when calling this.
173  */
174 
175 static void dpm_power_up(void)
176 {
177 	while(!list_empty(&dpm_off_irq)) {
178 		struct list_head * entry = dpm_off_irq.next;
179 		struct device * dev = to_device(entry);
180 
181 		list_move_tail(entry, &dpm_off);
182 		resume_device_early(dev);
183 	}
184 }
185 
186 
187 /**
188  *	device_power_up - Turn on all devices that need special attention.
189  *
190  *	Power on system devices then devices that required we shut them down
191  *	with interrupts disabled.
192  *	Called with interrupts disabled.
193  */
194 
195 void device_power_up(void)
196 {
197 	sysdev_resume();
198 	dpm_power_up();
199 }
200 
201 EXPORT_SYMBOL_GPL(device_power_up);
202 
203 
204 /*------------------------- Suspend routines -------------------------*/
205 
206 /*
207  * The entries in the dpm_active list are in a depth first order, simply
208  * because children are guaranteed to be discovered after parents, and
209  * are inserted at the back of the list on discovery.
210  *
211  * All list on the suspend path are done in reverse order, so we operate
212  * on the leaves of the device tree (or forests, depending on how you want
213  * to look at it ;) first. As nodes are removed from the back of the list,
214  * they are inserted into the front of their destintation lists.
215  *
216  * Things are the reverse on the resume path - iterations are done in
217  * forward order, and nodes are inserted at the back of their destination
218  * lists. This way, the ancestors will be accessed before their descendents.
219  */
220 
221 static inline char *suspend_verb(u32 event)
222 {
223 	switch (event) {
224 	case PM_EVENT_SUSPEND:	return "suspend";
225 	case PM_EVENT_FREEZE:	return "freeze";
226 	case PM_EVENT_PRETHAW:	return "prethaw";
227 	default:		return "(unknown suspend event)";
228 	}
229 }
230 
231 
232 static void
233 suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
234 {
235 	dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
236 		((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
237 		", may wakeup" : "");
238 }
239 
240 /**
241  *	suspend_device - Save state of one device.
242  *	@dev:	Device.
243  *	@state:	Power state device is entering.
244  */
245 
246 static int suspend_device(struct device * dev, pm_message_t state)
247 {
248 	int error = 0;
249 
250 	down(&dev->sem);
251 	if (dev->power.power_state.event) {
252 		dev_dbg(dev, "PM: suspend %d-->%d\n",
253 			dev->power.power_state.event, state.event);
254 	}
255 
256 	if (dev->class && dev->class->suspend) {
257 		suspend_device_dbg(dev, state, "class ");
258 		error = dev->class->suspend(dev, state);
259 		suspend_report_result(dev->class->suspend, error);
260 	}
261 
262 	if (!error && dev->type && dev->type->suspend) {
263 		suspend_device_dbg(dev, state, "type ");
264 		error = dev->type->suspend(dev, state);
265 		suspend_report_result(dev->type->suspend, error);
266 	}
267 
268 	if (!error && dev->bus && dev->bus->suspend) {
269 		suspend_device_dbg(dev, state, "");
270 		error = dev->bus->suspend(dev, state);
271 		suspend_report_result(dev->bus->suspend, error);
272 	}
273 	up(&dev->sem);
274 	return error;
275 }
276 
277 
278 /*
279  * This is called with interrupts off, only a single CPU
280  * running. We can't acquire a mutex or semaphore (and we don't
281  * need the protection)
282  */
283 static int suspend_device_late(struct device *dev, pm_message_t state)
284 {
285 	int error = 0;
286 
287 	if (dev->bus && dev->bus->suspend_late) {
288 		suspend_device_dbg(dev, state, "LATE ");
289 		error = dev->bus->suspend_late(dev, state);
290 		suspend_report_result(dev->bus->suspend_late, error);
291 	}
292 	return error;
293 }
294 
295 /**
296  *	device_suspend - Save state and stop all devices in system.
297  *	@state:		Power state to put each device in.
298  *
299  *	Walk the dpm_active list, call ->suspend() for each device, and move
300  *	it to the dpm_off list.
301  *
302  *	(For historical reasons, if it returns -EAGAIN, that used to mean
303  *	that the device would be called again with interrupts disabled.
304  *	These days, we use the "suspend_late()" callback for that, so we
305  *	print a warning and consider it an error).
306  *
307  *	If we get a different error, try and back out.
308  *
309  *	If we hit a failure with any of the devices, call device_resume()
310  *	above to bring the suspended devices back to life.
311  *
312  */
313 
314 int device_suspend(pm_message_t state)
315 {
316 	int error = 0;
317 
318 	might_sleep();
319 	mutex_lock(&dpm_mtx);
320 	mutex_lock(&dpm_list_mtx);
321 	while (!list_empty(&dpm_active) && error == 0) {
322 		struct list_head * entry = dpm_active.prev;
323 		struct device * dev = to_device(entry);
324 
325 		get_device(dev);
326 		mutex_unlock(&dpm_list_mtx);
327 
328 		error = suspend_device(dev, state);
329 
330 		mutex_lock(&dpm_list_mtx);
331 
332 		/* Check if the device got removed */
333 		if (!list_empty(&dev->power.entry)) {
334 			/* Move it to the dpm_off list */
335 			if (!error)
336 				list_move(&dev->power.entry, &dpm_off);
337 		}
338 		if (error)
339 			printk(KERN_ERR "Could not suspend device %s: "
340 				"error %d%s\n",
341 				kobject_name(&dev->kobj), error,
342 				error == -EAGAIN ? " (please convert to suspend_late)" : "");
343 		put_device(dev);
344 	}
345 	mutex_unlock(&dpm_list_mtx);
346 	if (error)
347 		dpm_resume();
348 
349 	mutex_unlock(&dpm_mtx);
350 	return error;
351 }
352 
353 EXPORT_SYMBOL_GPL(device_suspend);
354 
355 /**
356  *	device_power_down - Shut down special devices.
357  *	@state:		Power state to enter.
358  *
359  *	Walk the dpm_off_irq list, calling ->power_down() for each device that
360  *	couldn't power down the device with interrupts enabled. When we're
361  *	done, power down system devices.
362  */
363 
364 int device_power_down(pm_message_t state)
365 {
366 	int error = 0;
367 	struct device * dev;
368 
369 	while (!list_empty(&dpm_off)) {
370 		struct list_head * entry = dpm_off.prev;
371 
372 		dev = to_device(entry);
373 		error = suspend_device_late(dev, state);
374 		if (error)
375 			goto Error;
376 		list_move(&dev->power.entry, &dpm_off_irq);
377 	}
378 
379 	error = sysdev_suspend(state);
380  Done:
381 	return error;
382  Error:
383 	printk(KERN_ERR "Could not power down device %s: "
384 		"error %d\n", kobject_name(&dev->kobj), error);
385 	dpm_power_up();
386 	goto Done;
387 }
388 
389 EXPORT_SYMBOL_GPL(device_power_down);
390 
391 void __suspend_report_result(const char *function, void *fn, int ret)
392 {
393 	if (ret) {
394 		printk(KERN_ERR "%s(): ", function);
395 		print_fn_descriptor_symbol("%s() returns ", (unsigned long)fn);
396 		printk("%d\n", ret);
397 	}
398 }
399 EXPORT_SYMBOL_GPL(__suspend_report_result);
400