xref: /linux/drivers/base/power/runtime.c (revision 273b281fa22c293963ee3e6eec418f5dda2dbc83)
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device run-time PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/sched.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/jiffies.h>
12 
13 static int __pm_runtime_resume(struct device *dev, bool from_wq);
14 static int __pm_request_idle(struct device *dev);
15 static int __pm_request_resume(struct device *dev);
16 
17 /**
18  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
19  * @dev: Device to handle.
20  */
21 static void pm_runtime_deactivate_timer(struct device *dev)
22 {
23 	if (dev->power.timer_expires > 0) {
24 		del_timer(&dev->power.suspend_timer);
25 		dev->power.timer_expires = 0;
26 	}
27 }
28 
29 /**
30  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
31  * @dev: Device to handle.
32  */
33 static void pm_runtime_cancel_pending(struct device *dev)
34 {
35 	pm_runtime_deactivate_timer(dev);
36 	/*
37 	 * In case there's a request pending, make sure its work function will
38 	 * return without doing anything.
39 	 */
40 	dev->power.request = RPM_REQ_NONE;
41 }
42 
43 /**
44  * __pm_runtime_idle - Notify device bus type if the device can be suspended.
45  * @dev: Device to notify the bus type about.
46  *
47  * This function must be called under dev->power.lock with interrupts disabled.
48  */
49 static int __pm_runtime_idle(struct device *dev)
50 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
51 {
52 	int retval = 0;
53 
54 	if (dev->power.runtime_error)
55 		retval = -EINVAL;
56 	else if (dev->power.idle_notification)
57 		retval = -EINPROGRESS;
58 	else if (atomic_read(&dev->power.usage_count) > 0
59 	    || dev->power.disable_depth > 0
60 	    || dev->power.runtime_status != RPM_ACTIVE)
61 		retval = -EAGAIN;
62 	else if (!pm_children_suspended(dev))
63 		retval = -EBUSY;
64 	if (retval)
65 		goto out;
66 
67 	if (dev->power.request_pending) {
68 		/*
69 		 * If an idle notification request is pending, cancel it.  Any
70 		 * other pending request takes precedence over us.
71 		 */
72 		if (dev->power.request == RPM_REQ_IDLE) {
73 			dev->power.request = RPM_REQ_NONE;
74 		} else if (dev->power.request != RPM_REQ_NONE) {
75 			retval = -EAGAIN;
76 			goto out;
77 		}
78 	}
79 
80 	dev->power.idle_notification = true;
81 
82 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
83 		spin_unlock_irq(&dev->power.lock);
84 
85 		dev->bus->pm->runtime_idle(dev);
86 
87 		spin_lock_irq(&dev->power.lock);
88 	}
89 
90 	dev->power.idle_notification = false;
91 	wake_up_all(&dev->power.wait_queue);
92 
93  out:
94 	return retval;
95 }
96 
97 /**
98  * pm_runtime_idle - Notify device bus type if the device can be suspended.
99  * @dev: Device to notify the bus type about.
100  */
101 int pm_runtime_idle(struct device *dev)
102 {
103 	int retval;
104 
105 	spin_lock_irq(&dev->power.lock);
106 	retval = __pm_runtime_idle(dev);
107 	spin_unlock_irq(&dev->power.lock);
108 
109 	return retval;
110 }
111 EXPORT_SYMBOL_GPL(pm_runtime_idle);
112 
113 /**
114  * __pm_runtime_suspend - Carry out run-time suspend of given device.
115  * @dev: Device to suspend.
116  * @from_wq: If set, the function has been called via pm_wq.
117  *
118  * Check if the device can be suspended and run the ->runtime_suspend() callback
119  * provided by its bus type.  If another suspend has been started earlier, wait
120  * for it to finish.  If an idle notification or suspend request is pending or
121  * scheduled, cancel it.
122  *
123  * This function must be called under dev->power.lock with interrupts disabled.
124  */
125 int __pm_runtime_suspend(struct device *dev, bool from_wq)
126 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
127 {
128 	struct device *parent = NULL;
129 	bool notify = false;
130 	int retval = 0;
131 
132 	dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
133 		from_wq ? " from workqueue" : "");
134 
135  repeat:
136 	if (dev->power.runtime_error) {
137 		retval = -EINVAL;
138 		goto out;
139 	}
140 
141 	/* Pending resume requests take precedence over us. */
142 	if (dev->power.request_pending
143 	    && dev->power.request == RPM_REQ_RESUME) {
144 		retval = -EAGAIN;
145 		goto out;
146 	}
147 
148 	/* Other scheduled or pending requests need to be canceled. */
149 	pm_runtime_cancel_pending(dev);
150 
151 	if (dev->power.runtime_status == RPM_SUSPENDED)
152 		retval = 1;
153 	else if (dev->power.runtime_status == RPM_RESUMING
154 	    || dev->power.disable_depth > 0
155 	    || atomic_read(&dev->power.usage_count) > 0)
156 		retval = -EAGAIN;
157 	else if (!pm_children_suspended(dev))
158 		retval = -EBUSY;
159 	if (retval)
160 		goto out;
161 
162 	if (dev->power.runtime_status == RPM_SUSPENDING) {
163 		DEFINE_WAIT(wait);
164 
165 		if (from_wq) {
166 			retval = -EINPROGRESS;
167 			goto out;
168 		}
169 
170 		/* Wait for the other suspend running in parallel with us. */
171 		for (;;) {
172 			prepare_to_wait(&dev->power.wait_queue, &wait,
173 					TASK_UNINTERRUPTIBLE);
174 			if (dev->power.runtime_status != RPM_SUSPENDING)
175 				break;
176 
177 			spin_unlock_irq(&dev->power.lock);
178 
179 			schedule();
180 
181 			spin_lock_irq(&dev->power.lock);
182 		}
183 		finish_wait(&dev->power.wait_queue, &wait);
184 		goto repeat;
185 	}
186 
187 	dev->power.runtime_status = RPM_SUSPENDING;
188 	dev->power.deferred_resume = false;
189 
190 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
191 		spin_unlock_irq(&dev->power.lock);
192 
193 		retval = dev->bus->pm->runtime_suspend(dev);
194 
195 		spin_lock_irq(&dev->power.lock);
196 		dev->power.runtime_error = retval;
197 	} else {
198 		retval = -ENOSYS;
199 	}
200 
201 	if (retval) {
202 		dev->power.runtime_status = RPM_ACTIVE;
203 		pm_runtime_cancel_pending(dev);
204 
205 		if (retval == -EAGAIN || retval == -EBUSY) {
206 			notify = true;
207 			dev->power.runtime_error = 0;
208 		}
209 	} else {
210 		dev->power.runtime_status = RPM_SUSPENDED;
211 
212 		if (dev->parent) {
213 			parent = dev->parent;
214 			atomic_add_unless(&parent->power.child_count, -1, 0);
215 		}
216 	}
217 	wake_up_all(&dev->power.wait_queue);
218 
219 	if (dev->power.deferred_resume) {
220 		__pm_runtime_resume(dev, false);
221 		retval = -EAGAIN;
222 		goto out;
223 	}
224 
225 	if (notify)
226 		__pm_runtime_idle(dev);
227 
228 	if (parent && !parent->power.ignore_children) {
229 		spin_unlock_irq(&dev->power.lock);
230 
231 		pm_request_idle(parent);
232 
233 		spin_lock_irq(&dev->power.lock);
234 	}
235 
236  out:
237 	dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
238 
239 	return retval;
240 }
241 
242 /**
243  * pm_runtime_suspend - Carry out run-time suspend of given device.
244  * @dev: Device to suspend.
245  */
246 int pm_runtime_suspend(struct device *dev)
247 {
248 	int retval;
249 
250 	spin_lock_irq(&dev->power.lock);
251 	retval = __pm_runtime_suspend(dev, false);
252 	spin_unlock_irq(&dev->power.lock);
253 
254 	return retval;
255 }
256 EXPORT_SYMBOL_GPL(pm_runtime_suspend);
257 
258 /**
259  * __pm_runtime_resume - Carry out run-time resume of given device.
260  * @dev: Device to resume.
261  * @from_wq: If set, the function has been called via pm_wq.
262  *
263  * Check if the device can be woken up and run the ->runtime_resume() callback
264  * provided by its bus type.  If another resume has been started earlier, wait
265  * for it to finish.  If there's a suspend running in parallel with this
266  * function, wait for it to finish and resume the device.  Cancel any scheduled
267  * or pending requests.
268  *
269  * This function must be called under dev->power.lock with interrupts disabled.
270  */
271 int __pm_runtime_resume(struct device *dev, bool from_wq)
272 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
273 {
274 	struct device *parent = NULL;
275 	int retval = 0;
276 
277 	dev_dbg(dev, "__pm_runtime_resume()%s!\n",
278 		from_wq ? " from workqueue" : "");
279 
280  repeat:
281 	if (dev->power.runtime_error) {
282 		retval = -EINVAL;
283 		goto out;
284 	}
285 
286 	pm_runtime_cancel_pending(dev);
287 
288 	if (dev->power.runtime_status == RPM_ACTIVE)
289 		retval = 1;
290 	else if (dev->power.disable_depth > 0)
291 		retval = -EAGAIN;
292 	if (retval)
293 		goto out;
294 
295 	if (dev->power.runtime_status == RPM_RESUMING
296 	    || dev->power.runtime_status == RPM_SUSPENDING) {
297 		DEFINE_WAIT(wait);
298 
299 		if (from_wq) {
300 			if (dev->power.runtime_status == RPM_SUSPENDING)
301 				dev->power.deferred_resume = true;
302 			retval = -EINPROGRESS;
303 			goto out;
304 		}
305 
306 		/* Wait for the operation carried out in parallel with us. */
307 		for (;;) {
308 			prepare_to_wait(&dev->power.wait_queue, &wait,
309 					TASK_UNINTERRUPTIBLE);
310 			if (dev->power.runtime_status != RPM_RESUMING
311 			    && dev->power.runtime_status != RPM_SUSPENDING)
312 				break;
313 
314 			spin_unlock_irq(&dev->power.lock);
315 
316 			schedule();
317 
318 			spin_lock_irq(&dev->power.lock);
319 		}
320 		finish_wait(&dev->power.wait_queue, &wait);
321 		goto repeat;
322 	}
323 
324 	if (!parent && dev->parent) {
325 		/*
326 		 * Increment the parent's resume counter and resume it if
327 		 * necessary.
328 		 */
329 		parent = dev->parent;
330 		spin_unlock(&dev->power.lock);
331 
332 		pm_runtime_get_noresume(parent);
333 
334 		spin_lock(&parent->power.lock);
335 		/*
336 		 * We can resume if the parent's run-time PM is disabled or it
337 		 * is set to ignore children.
338 		 */
339 		if (!parent->power.disable_depth
340 		    && !parent->power.ignore_children) {
341 			__pm_runtime_resume(parent, false);
342 			if (parent->power.runtime_status != RPM_ACTIVE)
343 				retval = -EBUSY;
344 		}
345 		spin_unlock(&parent->power.lock);
346 
347 		spin_lock(&dev->power.lock);
348 		if (retval)
349 			goto out;
350 		goto repeat;
351 	}
352 
353 	dev->power.runtime_status = RPM_RESUMING;
354 
355 	if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
356 		spin_unlock_irq(&dev->power.lock);
357 
358 		retval = dev->bus->pm->runtime_resume(dev);
359 
360 		spin_lock_irq(&dev->power.lock);
361 		dev->power.runtime_error = retval;
362 	} else {
363 		retval = -ENOSYS;
364 	}
365 
366 	if (retval) {
367 		dev->power.runtime_status = RPM_SUSPENDED;
368 		pm_runtime_cancel_pending(dev);
369 	} else {
370 		dev->power.runtime_status = RPM_ACTIVE;
371 		if (parent)
372 			atomic_inc(&parent->power.child_count);
373 	}
374 	wake_up_all(&dev->power.wait_queue);
375 
376 	if (!retval)
377 		__pm_request_idle(dev);
378 
379  out:
380 	if (parent) {
381 		spin_unlock_irq(&dev->power.lock);
382 
383 		pm_runtime_put(parent);
384 
385 		spin_lock_irq(&dev->power.lock);
386 	}
387 
388 	dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
389 
390 	return retval;
391 }
392 
393 /**
394  * pm_runtime_resume - Carry out run-time resume of given device.
395  * @dev: Device to suspend.
396  */
397 int pm_runtime_resume(struct device *dev)
398 {
399 	int retval;
400 
401 	spin_lock_irq(&dev->power.lock);
402 	retval = __pm_runtime_resume(dev, false);
403 	spin_unlock_irq(&dev->power.lock);
404 
405 	return retval;
406 }
407 EXPORT_SYMBOL_GPL(pm_runtime_resume);
408 
409 /**
410  * pm_runtime_work - Universal run-time PM work function.
411  * @work: Work structure used for scheduling the execution of this function.
412  *
413  * Use @work to get the device object the work is to be done for, determine what
414  * is to be done and execute the appropriate run-time PM function.
415  */
416 static void pm_runtime_work(struct work_struct *work)
417 {
418 	struct device *dev = container_of(work, struct device, power.work);
419 	enum rpm_request req;
420 
421 	spin_lock_irq(&dev->power.lock);
422 
423 	if (!dev->power.request_pending)
424 		goto out;
425 
426 	req = dev->power.request;
427 	dev->power.request = RPM_REQ_NONE;
428 	dev->power.request_pending = false;
429 
430 	switch (req) {
431 	case RPM_REQ_NONE:
432 		break;
433 	case RPM_REQ_IDLE:
434 		__pm_runtime_idle(dev);
435 		break;
436 	case RPM_REQ_SUSPEND:
437 		__pm_runtime_suspend(dev, true);
438 		break;
439 	case RPM_REQ_RESUME:
440 		__pm_runtime_resume(dev, true);
441 		break;
442 	}
443 
444  out:
445 	spin_unlock_irq(&dev->power.lock);
446 }
447 
448 /**
449  * __pm_request_idle - Submit an idle notification request for given device.
450  * @dev: Device to handle.
451  *
452  * Check if the device's run-time PM status is correct for suspending the device
453  * and queue up a request to run __pm_runtime_idle() for it.
454  *
455  * This function must be called under dev->power.lock with interrupts disabled.
456  */
457 static int __pm_request_idle(struct device *dev)
458 {
459 	int retval = 0;
460 
461 	if (dev->power.runtime_error)
462 		retval = -EINVAL;
463 	else if (atomic_read(&dev->power.usage_count) > 0
464 	    || dev->power.disable_depth > 0
465 	    || dev->power.runtime_status == RPM_SUSPENDED
466 	    || dev->power.runtime_status == RPM_SUSPENDING)
467 		retval = -EAGAIN;
468 	else if (!pm_children_suspended(dev))
469 		retval = -EBUSY;
470 	if (retval)
471 		return retval;
472 
473 	if (dev->power.request_pending) {
474 		/* Any requests other then RPM_REQ_IDLE take precedence. */
475 		if (dev->power.request == RPM_REQ_NONE)
476 			dev->power.request = RPM_REQ_IDLE;
477 		else if (dev->power.request != RPM_REQ_IDLE)
478 			retval = -EAGAIN;
479 		return retval;
480 	}
481 
482 	dev->power.request = RPM_REQ_IDLE;
483 	dev->power.request_pending = true;
484 	queue_work(pm_wq, &dev->power.work);
485 
486 	return retval;
487 }
488 
489 /**
490  * pm_request_idle - Submit an idle notification request for given device.
491  * @dev: Device to handle.
492  */
493 int pm_request_idle(struct device *dev)
494 {
495 	unsigned long flags;
496 	int retval;
497 
498 	spin_lock_irqsave(&dev->power.lock, flags);
499 	retval = __pm_request_idle(dev);
500 	spin_unlock_irqrestore(&dev->power.lock, flags);
501 
502 	return retval;
503 }
504 EXPORT_SYMBOL_GPL(pm_request_idle);
505 
506 /**
507  * __pm_request_suspend - Submit a suspend request for given device.
508  * @dev: Device to suspend.
509  *
510  * This function must be called under dev->power.lock with interrupts disabled.
511  */
512 static int __pm_request_suspend(struct device *dev)
513 {
514 	int retval = 0;
515 
516 	if (dev->power.runtime_error)
517 		return -EINVAL;
518 
519 	if (dev->power.runtime_status == RPM_SUSPENDED)
520 		retval = 1;
521 	else if (atomic_read(&dev->power.usage_count) > 0
522 	    || dev->power.disable_depth > 0)
523 		retval = -EAGAIN;
524 	else if (dev->power.runtime_status == RPM_SUSPENDING)
525 		retval = -EINPROGRESS;
526 	else if (!pm_children_suspended(dev))
527 		retval = -EBUSY;
528 	if (retval < 0)
529 		return retval;
530 
531 	pm_runtime_deactivate_timer(dev);
532 
533 	if (dev->power.request_pending) {
534 		/*
535 		 * Pending resume requests take precedence over us, but we can
536 		 * overtake any other pending request.
537 		 */
538 		if (dev->power.request == RPM_REQ_RESUME)
539 			retval = -EAGAIN;
540 		else if (dev->power.request != RPM_REQ_SUSPEND)
541 			dev->power.request = retval ?
542 						RPM_REQ_NONE : RPM_REQ_SUSPEND;
543 		return retval;
544 	} else if (retval) {
545 		return retval;
546 	}
547 
548 	dev->power.request = RPM_REQ_SUSPEND;
549 	dev->power.request_pending = true;
550 	queue_work(pm_wq, &dev->power.work);
551 
552 	return 0;
553 }
554 
555 /**
556  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
557  * @data: Device pointer passed by pm_schedule_suspend().
558  *
559  * Check if the time is right and execute __pm_request_suspend() in that case.
560  */
561 static void pm_suspend_timer_fn(unsigned long data)
562 {
563 	struct device *dev = (struct device *)data;
564 	unsigned long flags;
565 	unsigned long expires;
566 
567 	spin_lock_irqsave(&dev->power.lock, flags);
568 
569 	expires = dev->power.timer_expires;
570 	/* If 'expire' is after 'jiffies' we've been called too early. */
571 	if (expires > 0 && !time_after(expires, jiffies)) {
572 		dev->power.timer_expires = 0;
573 		__pm_request_suspend(dev);
574 	}
575 
576 	spin_unlock_irqrestore(&dev->power.lock, flags);
577 }
578 
579 /**
580  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
581  * @dev: Device to suspend.
582  * @delay: Time to wait before submitting a suspend request, in milliseconds.
583  */
584 int pm_schedule_suspend(struct device *dev, unsigned int delay)
585 {
586 	unsigned long flags;
587 	int retval = 0;
588 
589 	spin_lock_irqsave(&dev->power.lock, flags);
590 
591 	if (dev->power.runtime_error) {
592 		retval = -EINVAL;
593 		goto out;
594 	}
595 
596 	if (!delay) {
597 		retval = __pm_request_suspend(dev);
598 		goto out;
599 	}
600 
601 	pm_runtime_deactivate_timer(dev);
602 
603 	if (dev->power.request_pending) {
604 		/*
605 		 * Pending resume requests take precedence over us, but any
606 		 * other pending requests have to be canceled.
607 		 */
608 		if (dev->power.request == RPM_REQ_RESUME) {
609 			retval = -EAGAIN;
610 			goto out;
611 		}
612 		dev->power.request = RPM_REQ_NONE;
613 	}
614 
615 	if (dev->power.runtime_status == RPM_SUSPENDED)
616 		retval = 1;
617 	else if (dev->power.runtime_status == RPM_SUSPENDING)
618 		retval = -EINPROGRESS;
619 	else if (atomic_read(&dev->power.usage_count) > 0
620 	    || dev->power.disable_depth > 0)
621 		retval = -EAGAIN;
622 	else if (!pm_children_suspended(dev))
623 		retval = -EBUSY;
624 	if (retval)
625 		goto out;
626 
627 	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
628 	if (!dev->power.timer_expires)
629 		dev->power.timer_expires = 1;
630 	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
631 
632  out:
633 	spin_unlock_irqrestore(&dev->power.lock, flags);
634 
635 	return retval;
636 }
637 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
638 
639 /**
640  * pm_request_resume - Submit a resume request for given device.
641  * @dev: Device to resume.
642  *
643  * This function must be called under dev->power.lock with interrupts disabled.
644  */
645 static int __pm_request_resume(struct device *dev)
646 {
647 	int retval = 0;
648 
649 	if (dev->power.runtime_error)
650 		return -EINVAL;
651 
652 	if (dev->power.runtime_status == RPM_ACTIVE)
653 		retval = 1;
654 	else if (dev->power.runtime_status == RPM_RESUMING)
655 		retval = -EINPROGRESS;
656 	else if (dev->power.disable_depth > 0)
657 		retval = -EAGAIN;
658 	if (retval < 0)
659 		return retval;
660 
661 	pm_runtime_deactivate_timer(dev);
662 
663 	if (dev->power.runtime_status == RPM_SUSPENDING) {
664 		dev->power.deferred_resume = true;
665 		return retval;
666 	}
667 	if (dev->power.request_pending) {
668 		/* If non-resume request is pending, we can overtake it. */
669 		dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
670 		return retval;
671 	}
672 	if (retval)
673 		return retval;
674 
675 	dev->power.request = RPM_REQ_RESUME;
676 	dev->power.request_pending = true;
677 	queue_work(pm_wq, &dev->power.work);
678 
679 	return retval;
680 }
681 
682 /**
683  * pm_request_resume - Submit a resume request for given device.
684  * @dev: Device to resume.
685  */
686 int pm_request_resume(struct device *dev)
687 {
688 	unsigned long flags;
689 	int retval;
690 
691 	spin_lock_irqsave(&dev->power.lock, flags);
692 	retval = __pm_request_resume(dev);
693 	spin_unlock_irqrestore(&dev->power.lock, flags);
694 
695 	return retval;
696 }
697 EXPORT_SYMBOL_GPL(pm_request_resume);
698 
699 /**
700  * __pm_runtime_get - Reference count a device and wake it up, if necessary.
701  * @dev: Device to handle.
702  * @sync: If set and the device is suspended, resume it synchronously.
703  *
704  * Increment the usage count of the device and if it was zero previously,
705  * resume it or submit a resume request for it, depending on the value of @sync.
706  */
707 int __pm_runtime_get(struct device *dev, bool sync)
708 {
709 	int retval = 1;
710 
711 	if (atomic_add_return(1, &dev->power.usage_count) == 1)
712 		retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
713 
714 	return retval;
715 }
716 EXPORT_SYMBOL_GPL(__pm_runtime_get);
717 
718 /**
719  * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
720  * @dev: Device to handle.
721  * @sync: If the device's bus type is to be notified, do that synchronously.
722  *
723  * Decrement the usage count of the device and if it reaches zero, carry out a
724  * synchronous idle notification or submit an idle notification request for it,
725  * depending on the value of @sync.
726  */
727 int __pm_runtime_put(struct device *dev, bool sync)
728 {
729 	int retval = 0;
730 
731 	if (atomic_dec_and_test(&dev->power.usage_count))
732 		retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
733 
734 	return retval;
735 }
736 EXPORT_SYMBOL_GPL(__pm_runtime_put);
737 
738 /**
739  * __pm_runtime_set_status - Set run-time PM status of a device.
740  * @dev: Device to handle.
741  * @status: New run-time PM status of the device.
742  *
743  * If run-time PM of the device is disabled or its power.runtime_error field is
744  * different from zero, the status may be changed either to RPM_ACTIVE, or to
745  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
746  * However, if the device has a parent and the parent is not active, and the
747  * parent's power.ignore_children flag is unset, the device's status cannot be
748  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
749  *
750  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
751  * and the device parent's counter of unsuspended children is modified to
752  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
753  * notification request for the parent is submitted.
754  */
755 int __pm_runtime_set_status(struct device *dev, unsigned int status)
756 {
757 	struct device *parent = dev->parent;
758 	unsigned long flags;
759 	bool notify_parent = false;
760 	int error = 0;
761 
762 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
763 		return -EINVAL;
764 
765 	spin_lock_irqsave(&dev->power.lock, flags);
766 
767 	if (!dev->power.runtime_error && !dev->power.disable_depth) {
768 		error = -EAGAIN;
769 		goto out;
770 	}
771 
772 	if (dev->power.runtime_status == status)
773 		goto out_set;
774 
775 	if (status == RPM_SUSPENDED) {
776 		/* It always is possible to set the status to 'suspended'. */
777 		if (parent) {
778 			atomic_add_unless(&parent->power.child_count, -1, 0);
779 			notify_parent = !parent->power.ignore_children;
780 		}
781 		goto out_set;
782 	}
783 
784 	if (parent) {
785 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
786 
787 		/*
788 		 * It is invalid to put an active child under a parent that is
789 		 * not active, has run-time PM enabled and the
790 		 * 'power.ignore_children' flag unset.
791 		 */
792 		if (!parent->power.disable_depth
793 		    && !parent->power.ignore_children
794 		    && parent->power.runtime_status != RPM_ACTIVE)
795 			error = -EBUSY;
796 		else if (dev->power.runtime_status == RPM_SUSPENDED)
797 			atomic_inc(&parent->power.child_count);
798 
799 		spin_unlock(&parent->power.lock);
800 
801 		if (error)
802 			goto out;
803 	}
804 
805  out_set:
806 	dev->power.runtime_status = status;
807 	dev->power.runtime_error = 0;
808  out:
809 	spin_unlock_irqrestore(&dev->power.lock, flags);
810 
811 	if (notify_parent)
812 		pm_request_idle(parent);
813 
814 	return error;
815 }
816 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
817 
818 /**
819  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
820  * @dev: Device to handle.
821  *
822  * Flush all pending requests for the device from pm_wq and wait for all
823  * run-time PM operations involving the device in progress to complete.
824  *
825  * Should be called under dev->power.lock with interrupts disabled.
826  */
827 static void __pm_runtime_barrier(struct device *dev)
828 {
829 	pm_runtime_deactivate_timer(dev);
830 
831 	if (dev->power.request_pending) {
832 		dev->power.request = RPM_REQ_NONE;
833 		spin_unlock_irq(&dev->power.lock);
834 
835 		cancel_work_sync(&dev->power.work);
836 
837 		spin_lock_irq(&dev->power.lock);
838 		dev->power.request_pending = false;
839 	}
840 
841 	if (dev->power.runtime_status == RPM_SUSPENDING
842 	    || dev->power.runtime_status == RPM_RESUMING
843 	    || dev->power.idle_notification) {
844 		DEFINE_WAIT(wait);
845 
846 		/* Suspend, wake-up or idle notification in progress. */
847 		for (;;) {
848 			prepare_to_wait(&dev->power.wait_queue, &wait,
849 					TASK_UNINTERRUPTIBLE);
850 			if (dev->power.runtime_status != RPM_SUSPENDING
851 			    && dev->power.runtime_status != RPM_RESUMING
852 			    && !dev->power.idle_notification)
853 				break;
854 			spin_unlock_irq(&dev->power.lock);
855 
856 			schedule();
857 
858 			spin_lock_irq(&dev->power.lock);
859 		}
860 		finish_wait(&dev->power.wait_queue, &wait);
861 	}
862 }
863 
864 /**
865  * pm_runtime_barrier - Flush pending requests and wait for completions.
866  * @dev: Device to handle.
867  *
868  * Prevent the device from being suspended by incrementing its usage counter and
869  * if there's a pending resume request for the device, wake the device up.
870  * Next, make sure that all pending requests for the device have been flushed
871  * from pm_wq and wait for all run-time PM operations involving the device in
872  * progress to complete.
873  *
874  * Return value:
875  * 1, if there was a resume request pending and the device had to be woken up,
876  * 0, otherwise
877  */
878 int pm_runtime_barrier(struct device *dev)
879 {
880 	int retval = 0;
881 
882 	pm_runtime_get_noresume(dev);
883 	spin_lock_irq(&dev->power.lock);
884 
885 	if (dev->power.request_pending
886 	    && dev->power.request == RPM_REQ_RESUME) {
887 		__pm_runtime_resume(dev, false);
888 		retval = 1;
889 	}
890 
891 	__pm_runtime_barrier(dev);
892 
893 	spin_unlock_irq(&dev->power.lock);
894 	pm_runtime_put_noidle(dev);
895 
896 	return retval;
897 }
898 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
899 
900 /**
901  * __pm_runtime_disable - Disable run-time PM of a device.
902  * @dev: Device to handle.
903  * @check_resume: If set, check if there's a resume request for the device.
904  *
905  * Increment power.disable_depth for the device and if was zero previously,
906  * cancel all pending run-time PM requests for the device and wait for all
907  * operations in progress to complete.  The device can be either active or
908  * suspended after its run-time PM has been disabled.
909  *
910  * If @check_resume is set and there's a resume request pending when
911  * __pm_runtime_disable() is called and power.disable_depth is zero, the
912  * function will wake up the device before disabling its run-time PM.
913  */
914 void __pm_runtime_disable(struct device *dev, bool check_resume)
915 {
916 	spin_lock_irq(&dev->power.lock);
917 
918 	if (dev->power.disable_depth > 0) {
919 		dev->power.disable_depth++;
920 		goto out;
921 	}
922 
923 	/*
924 	 * Wake up the device if there's a resume request pending, because that
925 	 * means there probably is some I/O to process and disabling run-time PM
926 	 * shouldn't prevent the device from processing the I/O.
927 	 */
928 	if (check_resume && dev->power.request_pending
929 	    && dev->power.request == RPM_REQ_RESUME) {
930 		/*
931 		 * Prevent suspends and idle notifications from being carried
932 		 * out after we have woken up the device.
933 		 */
934 		pm_runtime_get_noresume(dev);
935 
936 		__pm_runtime_resume(dev, false);
937 
938 		pm_runtime_put_noidle(dev);
939 	}
940 
941 	if (!dev->power.disable_depth++)
942 		__pm_runtime_barrier(dev);
943 
944  out:
945 	spin_unlock_irq(&dev->power.lock);
946 }
947 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
948 
949 /**
950  * pm_runtime_enable - Enable run-time PM of a device.
951  * @dev: Device to handle.
952  */
953 void pm_runtime_enable(struct device *dev)
954 {
955 	unsigned long flags;
956 
957 	spin_lock_irqsave(&dev->power.lock, flags);
958 
959 	if (dev->power.disable_depth > 0)
960 		dev->power.disable_depth--;
961 	else
962 		dev_warn(dev, "Unbalanced %s!\n", __func__);
963 
964 	spin_unlock_irqrestore(&dev->power.lock, flags);
965 }
966 EXPORT_SYMBOL_GPL(pm_runtime_enable);
967 
968 /**
969  * pm_runtime_init - Initialize run-time PM fields in given device object.
970  * @dev: Device object to initialize.
971  */
972 void pm_runtime_init(struct device *dev)
973 {
974 	spin_lock_init(&dev->power.lock);
975 
976 	dev->power.runtime_status = RPM_SUSPENDED;
977 	dev->power.idle_notification = false;
978 
979 	dev->power.disable_depth = 1;
980 	atomic_set(&dev->power.usage_count, 0);
981 
982 	dev->power.runtime_error = 0;
983 
984 	atomic_set(&dev->power.child_count, 0);
985 	pm_suspend_ignore_children(dev, false);
986 
987 	dev->power.request_pending = false;
988 	dev->power.request = RPM_REQ_NONE;
989 	dev->power.deferred_resume = false;
990 	INIT_WORK(&dev->power.work, pm_runtime_work);
991 
992 	dev->power.timer_expires = 0;
993 	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
994 			(unsigned long)dev);
995 
996 	init_waitqueue_head(&dev->power.wait_queue);
997 }
998 
999 /**
1000  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1001  * @dev: Device object being removed from device hierarchy.
1002  */
1003 void pm_runtime_remove(struct device *dev)
1004 {
1005 	__pm_runtime_disable(dev, false);
1006 
1007 	/* Change the status back to 'suspended' to match the initial status. */
1008 	if (dev->power.runtime_status == RPM_ACTIVE)
1009 		pm_runtime_set_suspended(dev);
1010 }
1011