xref: /linux/drivers/base/power/runtime.c (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device run-time PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/sched.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/jiffies.h>
12 
13 static int __pm_runtime_resume(struct device *dev, bool from_wq);
14 static int __pm_request_idle(struct device *dev);
15 static int __pm_request_resume(struct device *dev);
16 
17 /**
18  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19  * @dev: Device to handle.
20  */
21 static void pm_runtime_deactivate_timer(struct device *dev)
22 {
23 	if (dev->power.timer_expires > 0) {
24 		del_timer(&dev->power.suspend_timer);
25 		dev->power.timer_expires = 0;
26 	}
27 }
28 
29 /**
30  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31  * @dev: Device to handle.
32  */
33 static void pm_runtime_cancel_pending(struct device *dev)
34 {
35 	pm_runtime_deactivate_timer(dev);
36 	/*
37 	 * In case there's a request pending, make sure its work function will
38 	 * return without doing anything.
39 	 */
40 	dev->power.request = RPM_REQ_NONE;
41 }
42 
43 /**
44  * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45  * @dev: Device to notify the bus type about.
46  *
47  * This function must be called under dev->power.lock with interrupts disabled.
48  */
49 static int __pm_runtime_idle(struct device *dev)
50 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
51 {
52 	int retval = 0;
53 
54 	dev_dbg(dev, "__pm_runtime_idle()!\n");
55 
56 	if (dev->power.runtime_error)
57 		retval = -EINVAL;
58 	else if (dev->power.idle_notification)
59 		retval = -EINPROGRESS;
60 	else if (atomic_read(&dev->power.usage_count) > 0
61 	    || dev->power.disable_depth > 0
62 	    || dev->power.runtime_status != RPM_ACTIVE)
63 		retval = -EAGAIN;
64 	else if (!pm_children_suspended(dev))
65 		retval = -EBUSY;
66 	if (retval)
67 		goto out;
68 
69 	if (dev->power.request_pending) {
70 		/*
71 		 * If an idle notification request is pending, cancel it.  Any
72 		 * other pending request takes precedence over us.
73 		 */
74 		if (dev->power.request == RPM_REQ_IDLE) {
75 			dev->power.request = RPM_REQ_NONE;
76 		} else if (dev->power.request != RPM_REQ_NONE) {
77 			retval = -EAGAIN;
78 			goto out;
79 		}
80 	}
81 
82 	dev->power.idle_notification = true;
83 
84 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
85 		spin_unlock_irq(&dev->power.lock);
86 
87 		dev->bus->pm->runtime_idle(dev);
88 
89 		spin_lock_irq(&dev->power.lock);
90 	}
91 
92 	dev->power.idle_notification = false;
93 	wake_up_all(&dev->power.wait_queue);
94 
95  out:
96 	dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval);
97 
98 	return retval;
99 }
100 
101 /**
102  * pm_runtime_idle - Notify device bus type if the device can be suspended.
103  * @dev: Device to notify the bus type about.
104  */
105 int pm_runtime_idle(struct device *dev)
106 {
107 	int retval;
108 
109 	spin_lock_irq(&dev->power.lock);
110 	retval = __pm_runtime_idle(dev);
111 	spin_unlock_irq(&dev->power.lock);
112 
113 	return retval;
114 }
115 EXPORT_SYMBOL_GPL(pm_runtime_idle);
116 
117 /**
118  * __pm_runtime_suspend - Carry out run-time suspend of given device.
119  * @dev: Device to suspend.
120  * @from_wq: If set, the function has been called via pm_wq.
121  *
122  * Check if the device can be suspended and run the ->runtime_suspend() callback
123  * provided by its bus type.  If another suspend has been started earlier, wait
124  * for it to finish.  If an idle notification or suspend request is pending or
125  * scheduled, cancel it.
126  *
127  * This function must be called under dev->power.lock with interrupts disabled.
128  */
129 int __pm_runtime_suspend(struct device *dev, bool from_wq)
130 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
131 {
132 	struct device *parent = NULL;
133 	bool notify = false;
134 	int retval = 0;
135 
136 	dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
137 		from_wq ? " from workqueue" : "");
138 
139  repeat:
140 	if (dev->power.runtime_error) {
141 		retval = -EINVAL;
142 		goto out;
143 	}
144 
145 	/* Pending resume requests take precedence over us. */
146 	if (dev->power.request_pending
147 	    && dev->power.request == RPM_REQ_RESUME) {
148 		retval = -EAGAIN;
149 		goto out;
150 	}
151 
152 	/* Other scheduled or pending requests need to be canceled. */
153 	pm_runtime_cancel_pending(dev);
154 
155 	if (dev->power.runtime_status == RPM_SUSPENDED)
156 		retval = 1;
157 	else if (dev->power.runtime_status == RPM_RESUMING
158 	    || dev->power.disable_depth > 0
159 	    || atomic_read(&dev->power.usage_count) > 0)
160 		retval = -EAGAIN;
161 	else if (!pm_children_suspended(dev))
162 		retval = -EBUSY;
163 	if (retval)
164 		goto out;
165 
166 	if (dev->power.runtime_status == RPM_SUSPENDING) {
167 		DEFINE_WAIT(wait);
168 
169 		if (from_wq) {
170 			retval = -EINPROGRESS;
171 			goto out;
172 		}
173 
174 		/* Wait for the other suspend running in parallel with us. */
175 		for (;;) {
176 			prepare_to_wait(&dev->power.wait_queue, &wait,
177 					TASK_UNINTERRUPTIBLE);
178 			if (dev->power.runtime_status != RPM_SUSPENDING)
179 				break;
180 
181 			spin_unlock_irq(&dev->power.lock);
182 
183 			schedule();
184 
185 			spin_lock_irq(&dev->power.lock);
186 		}
187 		finish_wait(&dev->power.wait_queue, &wait);
188 		goto repeat;
189 	}
190 
191 	dev->power.runtime_status = RPM_SUSPENDING;
192 
193 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
194 		spin_unlock_irq(&dev->power.lock);
195 
196 		retval = dev->bus->pm->runtime_suspend(dev);
197 
198 		spin_lock_irq(&dev->power.lock);
199 		dev->power.runtime_error = retval;
200 	} else {
201 		retval = -ENOSYS;
202 	}
203 
204 	if (retval) {
205 		dev->power.runtime_status = RPM_ACTIVE;
206 		pm_runtime_cancel_pending(dev);
207 		dev->power.deferred_resume = false;
208 
209 		if (retval == -EAGAIN || retval == -EBUSY) {
210 			notify = true;
211 			dev->power.runtime_error = 0;
212 		}
213 	} else {
214 		dev->power.runtime_status = RPM_SUSPENDED;
215 
216 		if (dev->parent) {
217 			parent = dev->parent;
218 			atomic_add_unless(&parent->power.child_count, -1, 0);
219 		}
220 	}
221 	wake_up_all(&dev->power.wait_queue);
222 
223 	if (dev->power.deferred_resume) {
224 		dev->power.deferred_resume = false;
225 		__pm_runtime_resume(dev, false);
226 		retval = -EAGAIN;
227 		goto out;
228 	}
229 
230 	if (notify)
231 		__pm_runtime_idle(dev);
232 
233 	if (parent && !parent->power.ignore_children) {
234 		spin_unlock_irq(&dev->power.lock);
235 
236 		pm_request_idle(parent);
237 
238 		spin_lock_irq(&dev->power.lock);
239 	}
240 
241  out:
242 	dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
243 
244 	return retval;
245 }
246 
247 /**
248  * pm_runtime_suspend - Carry out run-time suspend of given device.
249  * @dev: Device to suspend.
250  */
251 int pm_runtime_suspend(struct device *dev)
252 {
253 	int retval;
254 
255 	spin_lock_irq(&dev->power.lock);
256 	retval = __pm_runtime_suspend(dev, false);
257 	spin_unlock_irq(&dev->power.lock);
258 
259 	return retval;
260 }
261 EXPORT_SYMBOL_GPL(pm_runtime_suspend);
262 
263 /**
264  * __pm_runtime_resume - Carry out run-time resume of given device.
265  * @dev: Device to resume.
266  * @from_wq: If set, the function has been called via pm_wq.
267  *
268  * Check if the device can be woken up and run the ->runtime_resume() callback
269  * provided by its bus type.  If another resume has been started earlier, wait
270  * for it to finish.  If there's a suspend running in parallel with this
271  * function, wait for it to finish and resume the device.  Cancel any scheduled
272  * or pending requests.
273  *
274  * This function must be called under dev->power.lock with interrupts disabled.
275  */
276 int __pm_runtime_resume(struct device *dev, bool from_wq)
277 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
278 {
279 	struct device *parent = NULL;
280 	int retval = 0;
281 
282 	dev_dbg(dev, "__pm_runtime_resume()%s!\n",
283 		from_wq ? " from workqueue" : "");
284 
285  repeat:
286 	if (dev->power.runtime_error) {
287 		retval = -EINVAL;
288 		goto out;
289 	}
290 
291 	pm_runtime_cancel_pending(dev);
292 
293 	if (dev->power.runtime_status == RPM_ACTIVE)
294 		retval = 1;
295 	else if (dev->power.disable_depth > 0)
296 		retval = -EAGAIN;
297 	if (retval)
298 		goto out;
299 
300 	if (dev->power.runtime_status == RPM_RESUMING
301 	    || dev->power.runtime_status == RPM_SUSPENDING) {
302 		DEFINE_WAIT(wait);
303 
304 		if (from_wq) {
305 			if (dev->power.runtime_status == RPM_SUSPENDING)
306 				dev->power.deferred_resume = true;
307 			retval = -EINPROGRESS;
308 			goto out;
309 		}
310 
311 		/* Wait for the operation carried out in parallel with us. */
312 		for (;;) {
313 			prepare_to_wait(&dev->power.wait_queue, &wait,
314 					TASK_UNINTERRUPTIBLE);
315 			if (dev->power.runtime_status != RPM_RESUMING
316 			    && dev->power.runtime_status != RPM_SUSPENDING)
317 				break;
318 
319 			spin_unlock_irq(&dev->power.lock);
320 
321 			schedule();
322 
323 			spin_lock_irq(&dev->power.lock);
324 		}
325 		finish_wait(&dev->power.wait_queue, &wait);
326 		goto repeat;
327 	}
328 
329 	if (!parent && dev->parent) {
330 		/*
331 		 * Increment the parent's resume counter and resume it if
332 		 * necessary.
333 		 */
334 		parent = dev->parent;
335 		spin_unlock_irq(&dev->power.lock);
336 
337 		pm_runtime_get_noresume(parent);
338 
339 		spin_lock_irq(&parent->power.lock);
340 		/*
341 		 * We can resume if the parent's run-time PM is disabled or it
342 		 * is set to ignore children.
343 		 */
344 		if (!parent->power.disable_depth
345 		    && !parent->power.ignore_children) {
346 			__pm_runtime_resume(parent, false);
347 			if (parent->power.runtime_status != RPM_ACTIVE)
348 				retval = -EBUSY;
349 		}
350 		spin_unlock_irq(&parent->power.lock);
351 
352 		spin_lock_irq(&dev->power.lock);
353 		if (retval)
354 			goto out;
355 		goto repeat;
356 	}
357 
358 	dev->power.runtime_status = RPM_RESUMING;
359 
360 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
361 		spin_unlock_irq(&dev->power.lock);
362 
363 		retval = dev->bus->pm->runtime_resume(dev);
364 
365 		spin_lock_irq(&dev->power.lock);
366 		dev->power.runtime_error = retval;
367 	} else {
368 		retval = -ENOSYS;
369 	}
370 
371 	if (retval) {
372 		dev->power.runtime_status = RPM_SUSPENDED;
373 		pm_runtime_cancel_pending(dev);
374 	} else {
375 		dev->power.runtime_status = RPM_ACTIVE;
376 		if (parent)
377 			atomic_inc(&parent->power.child_count);
378 	}
379 	wake_up_all(&dev->power.wait_queue);
380 
381 	if (!retval)
382 		__pm_request_idle(dev);
383 
384  out:
385 	if (parent) {
386 		spin_unlock_irq(&dev->power.lock);
387 
388 		pm_runtime_put(parent);
389 
390 		spin_lock_irq(&dev->power.lock);
391 	}
392 
393 	dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
394 
395 	return retval;
396 }
397 
398 /**
399  * pm_runtime_resume - Carry out run-time resume of given device.
400  * @dev: Device to suspend.
401  */
402 int pm_runtime_resume(struct device *dev)
403 {
404 	int retval;
405 
406 	spin_lock_irq(&dev->power.lock);
407 	retval = __pm_runtime_resume(dev, false);
408 	spin_unlock_irq(&dev->power.lock);
409 
410 	return retval;
411 }
412 EXPORT_SYMBOL_GPL(pm_runtime_resume);
413 
414 /**
415  * pm_runtime_work - Universal run-time PM work function.
416  * @work: Work structure used for scheduling the execution of this function.
417  *
418  * Use @work to get the device object the work is to be done for, determine what
419  * is to be done and execute the appropriate run-time PM function.
420  */
421 static void pm_runtime_work(struct work_struct *work)
422 {
423 	struct device *dev = container_of(work, struct device, power.work);
424 	enum rpm_request req;
425 
426 	spin_lock_irq(&dev->power.lock);
427 
428 	if (!dev->power.request_pending)
429 		goto out;
430 
431 	req = dev->power.request;
432 	dev->power.request = RPM_REQ_NONE;
433 	dev->power.request_pending = false;
434 
435 	switch (req) {
436 	case RPM_REQ_NONE:
437 		break;
438 	case RPM_REQ_IDLE:
439 		__pm_runtime_idle(dev);
440 		break;
441 	case RPM_REQ_SUSPEND:
442 		__pm_runtime_suspend(dev, true);
443 		break;
444 	case RPM_REQ_RESUME:
445 		__pm_runtime_resume(dev, true);
446 		break;
447 	}
448 
449  out:
450 	spin_unlock_irq(&dev->power.lock);
451 }
452 
453 /**
454  * __pm_request_idle - Submit an idle notification request for given device.
455  * @dev: Device to handle.
456  *
457  * Check if the device's run-time PM status is correct for suspending the device
458  * and queue up a request to run __pm_runtime_idle() for it.
459  *
460  * This function must be called under dev->power.lock with interrupts disabled.
461  */
462 static int __pm_request_idle(struct device *dev)
463 {
464 	int retval = 0;
465 
466 	if (dev->power.runtime_error)
467 		retval = -EINVAL;
468 	else if (atomic_read(&dev->power.usage_count) > 0
469 	    || dev->power.disable_depth > 0
470 	    || dev->power.runtime_status == RPM_SUSPENDED
471 	    || dev->power.runtime_status == RPM_SUSPENDING)
472 		retval = -EAGAIN;
473 	else if (!pm_children_suspended(dev))
474 		retval = -EBUSY;
475 	if (retval)
476 		return retval;
477 
478 	if (dev->power.request_pending) {
479 		/* Any requests other then RPM_REQ_IDLE take precedence. */
480 		if (dev->power.request == RPM_REQ_NONE)
481 			dev->power.request = RPM_REQ_IDLE;
482 		else if (dev->power.request != RPM_REQ_IDLE)
483 			retval = -EAGAIN;
484 		return retval;
485 	}
486 
487 	dev->power.request = RPM_REQ_IDLE;
488 	dev->power.request_pending = true;
489 	queue_work(pm_wq, &dev->power.work);
490 
491 	return retval;
492 }
493 
494 /**
495  * pm_request_idle - Submit an idle notification request for given device.
496  * @dev: Device to handle.
497  */
498 int pm_request_idle(struct device *dev)
499 {
500 	unsigned long flags;
501 	int retval;
502 
503 	spin_lock_irqsave(&dev->power.lock, flags);
504 	retval = __pm_request_idle(dev);
505 	spin_unlock_irqrestore(&dev->power.lock, flags);
506 
507 	return retval;
508 }
509 EXPORT_SYMBOL_GPL(pm_request_idle);
510 
511 /**
512  * __pm_request_suspend - Submit a suspend request for given device.
513  * @dev: Device to suspend.
514  *
515  * This function must be called under dev->power.lock with interrupts disabled.
516  */
517 static int __pm_request_suspend(struct device *dev)
518 {
519 	int retval = 0;
520 
521 	if (dev->power.runtime_error)
522 		return -EINVAL;
523 
524 	if (dev->power.runtime_status == RPM_SUSPENDED)
525 		retval = 1;
526 	else if (atomic_read(&dev->power.usage_count) > 0
527 	    || dev->power.disable_depth > 0)
528 		retval = -EAGAIN;
529 	else if (dev->power.runtime_status == RPM_SUSPENDING)
530 		retval = -EINPROGRESS;
531 	else if (!pm_children_suspended(dev))
532 		retval = -EBUSY;
533 	if (retval < 0)
534 		return retval;
535 
536 	pm_runtime_deactivate_timer(dev);
537 
538 	if (dev->power.request_pending) {
539 		/*
540 		 * Pending resume requests take precedence over us, but we can
541 		 * overtake any other pending request.
542 		 */
543 		if (dev->power.request == RPM_REQ_RESUME)
544 			retval = -EAGAIN;
545 		else if (dev->power.request != RPM_REQ_SUSPEND)
546 			dev->power.request = retval ?
547 						RPM_REQ_NONE : RPM_REQ_SUSPEND;
548 		return retval;
549 	} else if (retval) {
550 		return retval;
551 	}
552 
553 	dev->power.request = RPM_REQ_SUSPEND;
554 	dev->power.request_pending = true;
555 	queue_work(pm_wq, &dev->power.work);
556 
557 	return 0;
558 }
559 
560 /**
561  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
562  * @data: Device pointer passed by pm_schedule_suspend().
563  *
564  * Check if the time is right and execute __pm_request_suspend() in that case.
565  */
566 static void pm_suspend_timer_fn(unsigned long data)
567 {
568 	struct device *dev = (struct device *)data;
569 	unsigned long flags;
570 	unsigned long expires;
571 
572 	spin_lock_irqsave(&dev->power.lock, flags);
573 
574 	expires = dev->power.timer_expires;
575 	/* If 'expire' is after 'jiffies' we've been called too early. */
576 	if (expires > 0 && !time_after(expires, jiffies)) {
577 		dev->power.timer_expires = 0;
578 		__pm_request_suspend(dev);
579 	}
580 
581 	spin_unlock_irqrestore(&dev->power.lock, flags);
582 }
583 
584 /**
585  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
586  * @dev: Device to suspend.
587  * @delay: Time to wait before submitting a suspend request, in milliseconds.
588  */
589 int pm_schedule_suspend(struct device *dev, unsigned int delay)
590 {
591 	unsigned long flags;
592 	int retval = 0;
593 
594 	spin_lock_irqsave(&dev->power.lock, flags);
595 
596 	if (dev->power.runtime_error) {
597 		retval = -EINVAL;
598 		goto out;
599 	}
600 
601 	if (!delay) {
602 		retval = __pm_request_suspend(dev);
603 		goto out;
604 	}
605 
606 	pm_runtime_deactivate_timer(dev);
607 
608 	if (dev->power.request_pending) {
609 		/*
610 		 * Pending resume requests take precedence over us, but any
611 		 * other pending requests have to be canceled.
612 		 */
613 		if (dev->power.request == RPM_REQ_RESUME) {
614 			retval = -EAGAIN;
615 			goto out;
616 		}
617 		dev->power.request = RPM_REQ_NONE;
618 	}
619 
620 	if (dev->power.runtime_status == RPM_SUSPENDED)
621 		retval = 1;
622 	else if (dev->power.runtime_status == RPM_SUSPENDING)
623 		retval = -EINPROGRESS;
624 	else if (atomic_read(&dev->power.usage_count) > 0
625 	    || dev->power.disable_depth > 0)
626 		retval = -EAGAIN;
627 	else if (!pm_children_suspended(dev))
628 		retval = -EBUSY;
629 	if (retval)
630 		goto out;
631 
632 	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
633 	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
634 
635  out:
636 	spin_unlock_irqrestore(&dev->power.lock, flags);
637 
638 	return retval;
639 }
640 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
641 
642 /**
643  * pm_request_resume - Submit a resume request for given device.
644  * @dev: Device to resume.
645  *
646  * This function must be called under dev->power.lock with interrupts disabled.
647  */
648 static int __pm_request_resume(struct device *dev)
649 {
650 	int retval = 0;
651 
652 	if (dev->power.runtime_error)
653 		return -EINVAL;
654 
655 	if (dev->power.runtime_status == RPM_ACTIVE)
656 		retval = 1;
657 	else if (dev->power.runtime_status == RPM_RESUMING)
658 		retval = -EINPROGRESS;
659 	else if (dev->power.disable_depth > 0)
660 		retval = -EAGAIN;
661 	if (retval < 0)
662 		return retval;
663 
664 	pm_runtime_deactivate_timer(dev);
665 
666 	if (dev->power.request_pending) {
667 		/* If non-resume request is pending, we can overtake it. */
668 		dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
669 		return retval;
670 	} else if (retval) {
671 		return retval;
672 	}
673 
674 	dev->power.request = RPM_REQ_RESUME;
675 	dev->power.request_pending = true;
676 	queue_work(pm_wq, &dev->power.work);
677 
678 	return retval;
679 }
680 
681 /**
682  * pm_request_resume - Submit a resume request for given device.
683  * @dev: Device to resume.
684  */
685 int pm_request_resume(struct device *dev)
686 {
687 	unsigned long flags;
688 	int retval;
689 
690 	spin_lock_irqsave(&dev->power.lock, flags);
691 	retval = __pm_request_resume(dev);
692 	spin_unlock_irqrestore(&dev->power.lock, flags);
693 
694 	return retval;
695 }
696 EXPORT_SYMBOL_GPL(pm_request_resume);
697 
698 /**
699  * __pm_runtime_get - Reference count a device and wake it up, if necessary.
700  * @dev: Device to handle.
701  * @sync: If set and the device is suspended, resume it synchronously.
702  *
703  * Increment the usage count of the device and if it was zero previously,
704  * resume it or submit a resume request for it, depending on the value of @sync.
705  */
706 int __pm_runtime_get(struct device *dev, bool sync)
707 {
708 	int retval = 1;
709 
710 	if (atomic_add_return(1, &dev->power.usage_count) == 1)
711 		retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
712 
713 	return retval;
714 }
715 EXPORT_SYMBOL_GPL(__pm_runtime_get);
716 
717 /**
718  * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
719  * @dev: Device to handle.
720  * @sync: If the device's bus type is to be notified, do that synchronously.
721  *
722  * Decrement the usage count of the device and if it reaches zero, carry out a
723  * synchronous idle notification or submit an idle notification request for it,
724  * depending on the value of @sync.
725  */
726 int __pm_runtime_put(struct device *dev, bool sync)
727 {
728 	int retval = 0;
729 
730 	if (atomic_dec_and_test(&dev->power.usage_count))
731 		retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
732 
733 	return retval;
734 }
735 EXPORT_SYMBOL_GPL(__pm_runtime_put);
736 
737 /**
738  * __pm_runtime_set_status - Set run-time PM status of a device.
739  * @dev: Device to handle.
740  * @status: New run-time PM status of the device.
741  *
742  * If run-time PM of the device is disabled or its power.runtime_error field is
743  * different from zero, the status may be changed either to RPM_ACTIVE, or to
744  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
745  * However, if the device has a parent and the parent is not active, and the
746  * parent's power.ignore_children flag is unset, the device's status cannot be
747  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
748  *
749  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
750  * and the device parent's counter of unsuspended children is modified to
751  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
752  * notification request for the parent is submitted.
753  */
754 int __pm_runtime_set_status(struct device *dev, unsigned int status)
755 {
756 	struct device *parent = dev->parent;
757 	unsigned long flags;
758 	bool notify_parent = false;
759 	int error = 0;
760 
761 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
762 		return -EINVAL;
763 
764 	spin_lock_irqsave(&dev->power.lock, flags);
765 
766 	if (!dev->power.runtime_error && !dev->power.disable_depth) {
767 		error = -EAGAIN;
768 		goto out;
769 	}
770 
771 	if (dev->power.runtime_status == status)
772 		goto out_set;
773 
774 	if (status == RPM_SUSPENDED) {
775 		/* It always is possible to set the status to 'suspended'. */
776 		if (parent) {
777 			atomic_add_unless(&parent->power.child_count, -1, 0);
778 			notify_parent = !parent->power.ignore_children;
779 		}
780 		goto out_set;
781 	}
782 
783 	if (parent) {
784 		spin_lock_irq(&parent->power.lock);
785 
786 		/*
787 		 * It is invalid to put an active child under a parent that is
788 		 * not active, has run-time PM enabled and the
789 		 * 'power.ignore_children' flag unset.
790 		 */
791 		if (!parent->power.disable_depth
792 		    && !parent->power.ignore_children
793 		    && parent->power.runtime_status != RPM_ACTIVE) {
794 			error = -EBUSY;
795 		} else {
796 			if (dev->power.runtime_status == RPM_SUSPENDED)
797 				atomic_inc(&parent->power.child_count);
798 		}
799 
800 		spin_unlock_irq(&parent->power.lock);
801 
802 		if (error)
803 			goto out;
804 	}
805 
806  out_set:
807 	dev->power.runtime_status = status;
808 	dev->power.runtime_error = 0;
809  out:
810 	spin_unlock_irqrestore(&dev->power.lock, flags);
811 
812 	if (notify_parent)
813 		pm_request_idle(parent);
814 
815 	return error;
816 }
817 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
818 
819 /**
820  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
821  * @dev: Device to handle.
822  *
823  * Flush all pending requests for the device from pm_wq and wait for all
824  * run-time PM operations involving the device in progress to complete.
825  *
826  * Should be called under dev->power.lock with interrupts disabled.
827  */
828 static void __pm_runtime_barrier(struct device *dev)
829 {
830 	pm_runtime_deactivate_timer(dev);
831 
832 	if (dev->power.request_pending) {
833 		dev->power.request = RPM_REQ_NONE;
834 		spin_unlock_irq(&dev->power.lock);
835 
836 		cancel_work_sync(&dev->power.work);
837 
838 		spin_lock_irq(&dev->power.lock);
839 		dev->power.request_pending = false;
840 	}
841 
842 	if (dev->power.runtime_status == RPM_SUSPENDING
843 	    || dev->power.runtime_status == RPM_RESUMING
844 	    || dev->power.idle_notification) {
845 		DEFINE_WAIT(wait);
846 
847 		/* Suspend, wake-up or idle notification in progress. */
848 		for (;;) {
849 			prepare_to_wait(&dev->power.wait_queue, &wait,
850 					TASK_UNINTERRUPTIBLE);
851 			if (dev->power.runtime_status != RPM_SUSPENDING
852 			    && dev->power.runtime_status != RPM_RESUMING
853 			    && !dev->power.idle_notification)
854 				break;
855 			spin_unlock_irq(&dev->power.lock);
856 
857 			schedule();
858 
859 			spin_lock_irq(&dev->power.lock);
860 		}
861 		finish_wait(&dev->power.wait_queue, &wait);
862 	}
863 }
864 
865 /**
866  * pm_runtime_barrier - Flush pending requests and wait for completions.
867  * @dev: Device to handle.
868  *
869  * Prevent the device from being suspended by incrementing its usage counter and
870  * if there's a pending resume request for the device, wake the device up.
871  * Next, make sure that all pending requests for the device have been flushed
872  * from pm_wq and wait for all run-time PM operations involving the device in
873  * progress to complete.
874  *
875  * Return value:
876  * 1, if there was a resume request pending and the device had to be woken up,
877  * 0, otherwise
878  */
879 int pm_runtime_barrier(struct device *dev)
880 {
881 	int retval = 0;
882 
883 	pm_runtime_get_noresume(dev);
884 	spin_lock_irq(&dev->power.lock);
885 
886 	if (dev->power.request_pending
887 	    && dev->power.request == RPM_REQ_RESUME) {
888 		__pm_runtime_resume(dev, false);
889 		retval = 1;
890 	}
891 
892 	__pm_runtime_barrier(dev);
893 
894 	spin_unlock_irq(&dev->power.lock);
895 	pm_runtime_put_noidle(dev);
896 
897 	return retval;
898 }
899 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
900 
901 /**
902  * __pm_runtime_disable - Disable run-time PM of a device.
903  * @dev: Device to handle.
904  * @check_resume: If set, check if there's a resume request for the device.
905  *
906  * Increment power.disable_depth for the device and if was zero previously,
907  * cancel all pending run-time PM requests for the device and wait for all
908  * operations in progress to complete.  The device can be either active or
909  * suspended after its run-time PM has been disabled.
910  *
911  * If @check_resume is set and there's a resume request pending when
912  * __pm_runtime_disable() is called and power.disable_depth is zero, the
913  * function will wake up the device before disabling its run-time PM.
914  */
915 void __pm_runtime_disable(struct device *dev, bool check_resume)
916 {
917 	spin_lock_irq(&dev->power.lock);
918 
919 	if (dev->power.disable_depth > 0) {
920 		dev->power.disable_depth++;
921 		goto out;
922 	}
923 
924 	/*
925 	 * Wake up the device if there's a resume request pending, because that
926 	 * means there probably is some I/O to process and disabling run-time PM
927 	 * shouldn't prevent the device from processing the I/O.
928 	 */
929 	if (check_resume && dev->power.request_pending
930 	    && dev->power.request == RPM_REQ_RESUME) {
931 		/*
932 		 * Prevent suspends and idle notifications from being carried
933 		 * out after we have woken up the device.
934 		 */
935 		pm_runtime_get_noresume(dev);
936 
937 		__pm_runtime_resume(dev, false);
938 
939 		pm_runtime_put_noidle(dev);
940 	}
941 
942 	if (!dev->power.disable_depth++)
943 		__pm_runtime_barrier(dev);
944 
945  out:
946 	spin_unlock_irq(&dev->power.lock);
947 }
948 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
949 
950 /**
951  * pm_runtime_enable - Enable run-time PM of a device.
952  * @dev: Device to handle.
953  */
954 void pm_runtime_enable(struct device *dev)
955 {
956 	unsigned long flags;
957 
958 	spin_lock_irqsave(&dev->power.lock, flags);
959 
960 	if (dev->power.disable_depth > 0)
961 		dev->power.disable_depth--;
962 	else
963 		dev_warn(dev, "Unbalanced %s!\n", __func__);
964 
965 	spin_unlock_irqrestore(&dev->power.lock, flags);
966 }
967 EXPORT_SYMBOL_GPL(pm_runtime_enable);
968 
969 /**
970  * pm_runtime_init - Initialize run-time PM fields in given device object.
971  * @dev: Device object to initialize.
972  */
973 void pm_runtime_init(struct device *dev)
974 {
975 	spin_lock_init(&dev->power.lock);
976 
977 	dev->power.runtime_status = RPM_SUSPENDED;
978 	dev->power.idle_notification = false;
979 
980 	dev->power.disable_depth = 1;
981 	atomic_set(&dev->power.usage_count, 0);
982 
983 	dev->power.runtime_error = 0;
984 
985 	atomic_set(&dev->power.child_count, 0);
986 	pm_suspend_ignore_children(dev, false);
987 
988 	dev->power.request_pending = false;
989 	dev->power.request = RPM_REQ_NONE;
990 	dev->power.deferred_resume = false;
991 	INIT_WORK(&dev->power.work, pm_runtime_work);
992 
993 	dev->power.timer_expires = 0;
994 	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
995 			(unsigned long)dev);
996 
997 	init_waitqueue_head(&dev->power.wait_queue);
998 }
999 
1000 /**
1001  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1002  * @dev: Device object being removed from device hierarchy.
1003  */
1004 void pm_runtime_remove(struct device *dev)
1005 {
1006 	__pm_runtime_disable(dev, false);
1007 
1008 	/* Change the status back to 'suspended' to match the initial status. */
1009 	if (dev->power.runtime_status == RPM_ACTIVE)
1010 		pm_runtime_set_suspended(dev);
1011 }
1012