xref: /linux/drivers/base/power/runtime.c (revision db4e83957f961f9053282409c5062c6baef857a4)
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <trace/events/rpm.h>
13 #include "power.h"
14 
15 static int rpm_resume(struct device *dev, int rpmflags);
16 static int rpm_suspend(struct device *dev, int rpmflags);
17 
18 /**
19  * update_pm_runtime_accounting - Update the time accounting of power states
20  * @dev: Device to update the accounting for
21  *
22  * In order to be able to have time accounting of the various power states
23  * (as used by programs such as PowerTOP to show the effectiveness of runtime
24  * PM), we need to track the time spent in each state.
25  * update_pm_runtime_accounting must be called each time before the
26  * runtime_status field is updated, to account the time in the old state
27  * correctly.
28  */
29 void update_pm_runtime_accounting(struct device *dev)
30 {
31 	unsigned long now = jiffies;
32 	int delta;
33 
34 	delta = now - dev->power.accounting_timestamp;
35 
36 	if (delta < 0)
37 		delta = 0;
38 
39 	dev->power.accounting_timestamp = now;
40 
41 	if (dev->power.disable_depth > 0)
42 		return;
43 
44 	if (dev->power.runtime_status == RPM_SUSPENDED)
45 		dev->power.suspended_jiffies += delta;
46 	else
47 		dev->power.active_jiffies += delta;
48 }
49 
50 static void __update_runtime_status(struct device *dev, enum rpm_status status)
51 {
52 	update_pm_runtime_accounting(dev);
53 	dev->power.runtime_status = status;
54 }
55 
56 /**
57  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
58  * @dev: Device to handle.
59  */
60 static void pm_runtime_deactivate_timer(struct device *dev)
61 {
62 	if (dev->power.timer_expires > 0) {
63 		del_timer(&dev->power.suspend_timer);
64 		dev->power.timer_expires = 0;
65 	}
66 }
67 
68 /**
69  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
70  * @dev: Device to handle.
71  */
72 static void pm_runtime_cancel_pending(struct device *dev)
73 {
74 	pm_runtime_deactivate_timer(dev);
75 	/*
76 	 * In case there's a request pending, make sure its work function will
77 	 * return without doing anything.
78 	 */
79 	dev->power.request = RPM_REQ_NONE;
80 }
81 
82 /*
83  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
84  * @dev: Device to handle.
85  *
86  * Compute the autosuspend-delay expiration time based on the device's
87  * power.last_busy time.  If the delay has already expired or is disabled
88  * (negative) or the power.use_autosuspend flag isn't set, return 0.
89  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
90  *
91  * This function may be called either with or without dev->power.lock held.
92  * Either way it can be racy, since power.last_busy may be updated at any time.
93  */
94 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
95 {
96 	int autosuspend_delay;
97 	long elapsed;
98 	unsigned long last_busy;
99 	unsigned long expires = 0;
100 
101 	if (!dev->power.use_autosuspend)
102 		goto out;
103 
104 	autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
105 	if (autosuspend_delay < 0)
106 		goto out;
107 
108 	last_busy = ACCESS_ONCE(dev->power.last_busy);
109 	elapsed = jiffies - last_busy;
110 	if (elapsed < 0)
111 		goto out;	/* jiffies has wrapped around. */
112 
113 	/*
114 	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
115 	 * up to the nearest second.
116 	 */
117 	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
118 	if (autosuspend_delay >= 1000)
119 		expires = round_jiffies(expires);
120 	expires += !expires;
121 	if (elapsed >= expires - last_busy)
122 		expires = 0;	/* Already expired. */
123 
124  out:
125 	return expires;
126 }
127 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
128 
129 /**
130  * rpm_check_suspend_allowed - Test whether a device may be suspended.
131  * @dev: Device to test.
132  */
133 static int rpm_check_suspend_allowed(struct device *dev)
134 {
135 	int retval = 0;
136 
137 	if (dev->power.runtime_error)
138 		retval = -EINVAL;
139 	else if (dev->power.disable_depth > 0)
140 		retval = -EACCES;
141 	else if (atomic_read(&dev->power.usage_count) > 0)
142 		retval = -EAGAIN;
143 	else if (!pm_children_suspended(dev))
144 		retval = -EBUSY;
145 
146 	/* Pending resume requests take precedence over suspends. */
147 	else if ((dev->power.deferred_resume
148 			&& dev->power.runtime_status == RPM_SUSPENDING)
149 	    || (dev->power.request_pending
150 			&& dev->power.request == RPM_REQ_RESUME))
151 		retval = -EAGAIN;
152 	else if (dev->power.runtime_status == RPM_SUSPENDED)
153 		retval = 1;
154 
155 	return retval;
156 }
157 
158 /**
159  * __rpm_callback - Run a given runtime PM callback for a given device.
160  * @cb: Runtime PM callback to run.
161  * @dev: Device to run the callback for.
162  */
163 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
164 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
165 {
166 	int retval;
167 
168 	if (dev->power.irq_safe)
169 		spin_unlock(&dev->power.lock);
170 	else
171 		spin_unlock_irq(&dev->power.lock);
172 
173 	retval = cb(dev);
174 
175 	if (dev->power.irq_safe)
176 		spin_lock(&dev->power.lock);
177 	else
178 		spin_lock_irq(&dev->power.lock);
179 
180 	return retval;
181 }
182 
183 /**
184  * rpm_idle - Notify device bus type if the device can be suspended.
185  * @dev: Device to notify the bus type about.
186  * @rpmflags: Flag bits.
187  *
188  * Check if the device's runtime PM status allows it to be suspended.  If
189  * another idle notification has been started earlier, return immediately.  If
190  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
191  * run the ->runtime_idle() callback directly.
192  *
193  * This function must be called under dev->power.lock with interrupts disabled.
194  */
195 static int rpm_idle(struct device *dev, int rpmflags)
196 {
197 	int (*callback)(struct device *);
198 	int retval;
199 
200 	trace_rpm_idle(dev, rpmflags);
201 	retval = rpm_check_suspend_allowed(dev);
202 	if (retval < 0)
203 		;	/* Conditions are wrong. */
204 
205 	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
206 	else if (dev->power.runtime_status != RPM_ACTIVE)
207 		retval = -EAGAIN;
208 
209 	/*
210 	 * Any pending request other than an idle notification takes
211 	 * precedence over us, except that the timer may be running.
212 	 */
213 	else if (dev->power.request_pending &&
214 	    dev->power.request > RPM_REQ_IDLE)
215 		retval = -EAGAIN;
216 
217 	/* Act as though RPM_NOWAIT is always set. */
218 	else if (dev->power.idle_notification)
219 		retval = -EINPROGRESS;
220 	if (retval)
221 		goto out;
222 
223 	/* Pending requests need to be canceled. */
224 	dev->power.request = RPM_REQ_NONE;
225 
226 	if (dev->power.no_callbacks) {
227 		/* Assume ->runtime_idle() callback would have suspended. */
228 		retval = rpm_suspend(dev, rpmflags);
229 		goto out;
230 	}
231 
232 	/* Carry out an asynchronous or a synchronous idle notification. */
233 	if (rpmflags & RPM_ASYNC) {
234 		dev->power.request = RPM_REQ_IDLE;
235 		if (!dev->power.request_pending) {
236 			dev->power.request_pending = true;
237 			queue_work(pm_wq, &dev->power.work);
238 		}
239 		goto out;
240 	}
241 
242 	dev->power.idle_notification = true;
243 
244 	if (dev->pm_domain)
245 		callback = dev->pm_domain->ops.runtime_idle;
246 	else if (dev->type && dev->type->pm)
247 		callback = dev->type->pm->runtime_idle;
248 	else if (dev->class && dev->class->pm)
249 		callback = dev->class->pm->runtime_idle;
250 	else if (dev->bus && dev->bus->pm)
251 		callback = dev->bus->pm->runtime_idle;
252 	else
253 		callback = NULL;
254 
255 	if (callback)
256 		__rpm_callback(callback, dev);
257 
258 	dev->power.idle_notification = false;
259 	wake_up_all(&dev->power.wait_queue);
260 
261  out:
262 	trace_rpm_return_int(dev, _THIS_IP_, retval);
263 	return retval;
264 }
265 
266 /**
267  * rpm_callback - Run a given runtime PM callback for a given device.
268  * @cb: Runtime PM callback to run.
269  * @dev: Device to run the callback for.
270  */
271 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
272 {
273 	int retval;
274 
275 	if (!cb)
276 		return -ENOSYS;
277 
278 	retval = __rpm_callback(cb, dev);
279 
280 	dev->power.runtime_error = retval;
281 	return retval != -EACCES ? retval : -EIO;
282 }
283 
284 /**
285  * rpm_suspend - Carry out runtime suspend of given device.
286  * @dev: Device to suspend.
287  * @rpmflags: Flag bits.
288  *
289  * Check if the device's runtime PM status allows it to be suspended.
290  * Cancel a pending idle notification, autosuspend or suspend. If
291  * another suspend has been started earlier, either return immediately
292  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
293  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
294  * otherwise run the ->runtime_suspend() callback directly. When
295  * ->runtime_suspend succeeded, if a deferred resume was requested while
296  * the callback was running then carry it out, otherwise send an idle
297  * notification for its parent (if the suspend succeeded and both
298  * ignore_children of parent->power and irq_safe of dev->power are not set).
299  *
300  * This function must be called under dev->power.lock with interrupts disabled.
301  */
302 static int rpm_suspend(struct device *dev, int rpmflags)
303 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
304 {
305 	int (*callback)(struct device *);
306 	struct device *parent = NULL;
307 	int retval;
308 
309 	trace_rpm_suspend(dev, rpmflags);
310 
311  repeat:
312 	retval = rpm_check_suspend_allowed(dev);
313 
314 	if (retval < 0)
315 		;	/* Conditions are wrong. */
316 
317 	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
318 	else if (dev->power.runtime_status == RPM_RESUMING &&
319 	    !(rpmflags & RPM_ASYNC))
320 		retval = -EAGAIN;
321 	if (retval)
322 		goto out;
323 
324 	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
325 	if ((rpmflags & RPM_AUTO)
326 	    && dev->power.runtime_status != RPM_SUSPENDING) {
327 		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
328 
329 		if (expires != 0) {
330 			/* Pending requests need to be canceled. */
331 			dev->power.request = RPM_REQ_NONE;
332 
333 			/*
334 			 * Optimization: If the timer is already running and is
335 			 * set to expire at or before the autosuspend delay,
336 			 * avoid the overhead of resetting it.  Just let it
337 			 * expire; pm_suspend_timer_fn() will take care of the
338 			 * rest.
339 			 */
340 			if (!(dev->power.timer_expires && time_before_eq(
341 			    dev->power.timer_expires, expires))) {
342 				dev->power.timer_expires = expires;
343 				mod_timer(&dev->power.suspend_timer, expires);
344 			}
345 			dev->power.timer_autosuspends = 1;
346 			goto out;
347 		}
348 	}
349 
350 	/* Other scheduled or pending requests need to be canceled. */
351 	pm_runtime_cancel_pending(dev);
352 
353 	if (dev->power.runtime_status == RPM_SUSPENDING) {
354 		DEFINE_WAIT(wait);
355 
356 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
357 			retval = -EINPROGRESS;
358 			goto out;
359 		}
360 
361 		if (dev->power.irq_safe) {
362 			spin_unlock(&dev->power.lock);
363 
364 			cpu_relax();
365 
366 			spin_lock(&dev->power.lock);
367 			goto repeat;
368 		}
369 
370 		/* Wait for the other suspend running in parallel with us. */
371 		for (;;) {
372 			prepare_to_wait(&dev->power.wait_queue, &wait,
373 					TASK_UNINTERRUPTIBLE);
374 			if (dev->power.runtime_status != RPM_SUSPENDING)
375 				break;
376 
377 			spin_unlock_irq(&dev->power.lock);
378 
379 			schedule();
380 
381 			spin_lock_irq(&dev->power.lock);
382 		}
383 		finish_wait(&dev->power.wait_queue, &wait);
384 		goto repeat;
385 	}
386 
387 	dev->power.deferred_resume = false;
388 	if (dev->power.no_callbacks)
389 		goto no_callback;	/* Assume success. */
390 
391 	/* Carry out an asynchronous or a synchronous suspend. */
392 	if (rpmflags & RPM_ASYNC) {
393 		dev->power.request = (rpmflags & RPM_AUTO) ?
394 		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
395 		if (!dev->power.request_pending) {
396 			dev->power.request_pending = true;
397 			queue_work(pm_wq, &dev->power.work);
398 		}
399 		goto out;
400 	}
401 
402 	__update_runtime_status(dev, RPM_SUSPENDING);
403 
404 	if (dev->pm_domain)
405 		callback = dev->pm_domain->ops.runtime_suspend;
406 	else if (dev->type && dev->type->pm)
407 		callback = dev->type->pm->runtime_suspend;
408 	else if (dev->class && dev->class->pm)
409 		callback = dev->class->pm->runtime_suspend;
410 	else if (dev->bus && dev->bus->pm)
411 		callback = dev->bus->pm->runtime_suspend;
412 	else
413 		callback = NULL;
414 
415 	retval = rpm_callback(callback, dev);
416 	if (retval) {
417 		__update_runtime_status(dev, RPM_ACTIVE);
418 		dev->power.deferred_resume = false;
419 		if (retval == -EAGAIN || retval == -EBUSY)
420 			dev->power.runtime_error = 0;
421 		else
422 			pm_runtime_cancel_pending(dev);
423 		wake_up_all(&dev->power.wait_queue);
424 		goto out;
425 	}
426  no_callback:
427 	__update_runtime_status(dev, RPM_SUSPENDED);
428 	pm_runtime_deactivate_timer(dev);
429 
430 	if (dev->parent) {
431 		parent = dev->parent;
432 		atomic_add_unless(&parent->power.child_count, -1, 0);
433 	}
434 	wake_up_all(&dev->power.wait_queue);
435 
436 	if (dev->power.deferred_resume) {
437 		rpm_resume(dev, 0);
438 		retval = -EAGAIN;
439 		goto out;
440 	}
441 
442 	/* Maybe the parent is now able to suspend. */
443 	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
444 		spin_unlock(&dev->power.lock);
445 
446 		spin_lock(&parent->power.lock);
447 		rpm_idle(parent, RPM_ASYNC);
448 		spin_unlock(&parent->power.lock);
449 
450 		spin_lock(&dev->power.lock);
451 	}
452 
453  out:
454 	trace_rpm_return_int(dev, _THIS_IP_, retval);
455 
456 	return retval;
457 }
458 
459 /**
460  * rpm_resume - Carry out runtime resume of given device.
461  * @dev: Device to resume.
462  * @rpmflags: Flag bits.
463  *
464  * Check if the device's runtime PM status allows it to be resumed.  Cancel
465  * any scheduled or pending requests.  If another resume has been started
466  * earlier, either return immediately or wait for it to finish, depending on the
467  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
468  * parallel with this function, either tell the other process to resume after
469  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
470  * flag is set then queue a resume request; otherwise run the
471  * ->runtime_resume() callback directly.  Queue an idle notification for the
472  * device if the resume succeeded.
473  *
474  * This function must be called under dev->power.lock with interrupts disabled.
475  */
476 static int rpm_resume(struct device *dev, int rpmflags)
477 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
478 {
479 	int (*callback)(struct device *);
480 	struct device *parent = NULL;
481 	int retval = 0;
482 
483 	trace_rpm_resume(dev, rpmflags);
484 
485  repeat:
486 	if (dev->power.runtime_error)
487 		retval = -EINVAL;
488 	else if (dev->power.disable_depth > 0)
489 		retval = -EACCES;
490 	if (retval)
491 		goto out;
492 
493 	/*
494 	 * Other scheduled or pending requests need to be canceled.  Small
495 	 * optimization: If an autosuspend timer is running, leave it running
496 	 * rather than cancelling it now only to restart it again in the near
497 	 * future.
498 	 */
499 	dev->power.request = RPM_REQ_NONE;
500 	if (!dev->power.timer_autosuspends)
501 		pm_runtime_deactivate_timer(dev);
502 
503 	if (dev->power.runtime_status == RPM_ACTIVE) {
504 		retval = 1;
505 		goto out;
506 	}
507 
508 	if (dev->power.runtime_status == RPM_RESUMING
509 	    || dev->power.runtime_status == RPM_SUSPENDING) {
510 		DEFINE_WAIT(wait);
511 
512 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
513 			if (dev->power.runtime_status == RPM_SUSPENDING)
514 				dev->power.deferred_resume = true;
515 			else
516 				retval = -EINPROGRESS;
517 			goto out;
518 		}
519 
520 		if (dev->power.irq_safe) {
521 			spin_unlock(&dev->power.lock);
522 
523 			cpu_relax();
524 
525 			spin_lock(&dev->power.lock);
526 			goto repeat;
527 		}
528 
529 		/* Wait for the operation carried out in parallel with us. */
530 		for (;;) {
531 			prepare_to_wait(&dev->power.wait_queue, &wait,
532 					TASK_UNINTERRUPTIBLE);
533 			if (dev->power.runtime_status != RPM_RESUMING
534 			    && dev->power.runtime_status != RPM_SUSPENDING)
535 				break;
536 
537 			spin_unlock_irq(&dev->power.lock);
538 
539 			schedule();
540 
541 			spin_lock_irq(&dev->power.lock);
542 		}
543 		finish_wait(&dev->power.wait_queue, &wait);
544 		goto repeat;
545 	}
546 
547 	/*
548 	 * See if we can skip waking up the parent.  This is safe only if
549 	 * power.no_callbacks is set, because otherwise we don't know whether
550 	 * the resume will actually succeed.
551 	 */
552 	if (dev->power.no_callbacks && !parent && dev->parent) {
553 		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
554 		if (dev->parent->power.disable_depth > 0
555 		    || dev->parent->power.ignore_children
556 		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
557 			atomic_inc(&dev->parent->power.child_count);
558 			spin_unlock(&dev->parent->power.lock);
559 			goto no_callback;	/* Assume success. */
560 		}
561 		spin_unlock(&dev->parent->power.lock);
562 	}
563 
564 	/* Carry out an asynchronous or a synchronous resume. */
565 	if (rpmflags & RPM_ASYNC) {
566 		dev->power.request = RPM_REQ_RESUME;
567 		if (!dev->power.request_pending) {
568 			dev->power.request_pending = true;
569 			queue_work(pm_wq, &dev->power.work);
570 		}
571 		retval = 0;
572 		goto out;
573 	}
574 
575 	if (!parent && dev->parent) {
576 		/*
577 		 * Increment the parent's usage counter and resume it if
578 		 * necessary.  Not needed if dev is irq-safe; then the
579 		 * parent is permanently resumed.
580 		 */
581 		parent = dev->parent;
582 		if (dev->power.irq_safe)
583 			goto skip_parent;
584 		spin_unlock(&dev->power.lock);
585 
586 		pm_runtime_get_noresume(parent);
587 
588 		spin_lock(&parent->power.lock);
589 		/*
590 		 * We can resume if the parent's runtime PM is disabled or it
591 		 * is set to ignore children.
592 		 */
593 		if (!parent->power.disable_depth
594 		    && !parent->power.ignore_children) {
595 			rpm_resume(parent, 0);
596 			if (parent->power.runtime_status != RPM_ACTIVE)
597 				retval = -EBUSY;
598 		}
599 		spin_unlock(&parent->power.lock);
600 
601 		spin_lock(&dev->power.lock);
602 		if (retval)
603 			goto out;
604 		goto repeat;
605 	}
606  skip_parent:
607 
608 	if (dev->power.no_callbacks)
609 		goto no_callback;	/* Assume success. */
610 
611 	__update_runtime_status(dev, RPM_RESUMING);
612 
613 	if (dev->pm_domain)
614 		callback = dev->pm_domain->ops.runtime_resume;
615 	else if (dev->type && dev->type->pm)
616 		callback = dev->type->pm->runtime_resume;
617 	else if (dev->class && dev->class->pm)
618 		callback = dev->class->pm->runtime_resume;
619 	else if (dev->bus && dev->bus->pm)
620 		callback = dev->bus->pm->runtime_resume;
621 	else
622 		callback = NULL;
623 
624 	retval = rpm_callback(callback, dev);
625 	if (retval) {
626 		__update_runtime_status(dev, RPM_SUSPENDED);
627 		pm_runtime_cancel_pending(dev);
628 	} else {
629  no_callback:
630 		__update_runtime_status(dev, RPM_ACTIVE);
631 		if (parent)
632 			atomic_inc(&parent->power.child_count);
633 	}
634 	wake_up_all(&dev->power.wait_queue);
635 
636 	if (!retval)
637 		rpm_idle(dev, RPM_ASYNC);
638 
639  out:
640 	if (parent && !dev->power.irq_safe) {
641 		spin_unlock_irq(&dev->power.lock);
642 
643 		pm_runtime_put(parent);
644 
645 		spin_lock_irq(&dev->power.lock);
646 	}
647 
648 	trace_rpm_return_int(dev, _THIS_IP_, retval);
649 
650 	return retval;
651 }
652 
653 /**
654  * pm_runtime_work - Universal runtime PM work function.
655  * @work: Work structure used for scheduling the execution of this function.
656  *
657  * Use @work to get the device object the work is to be done for, determine what
658  * is to be done and execute the appropriate runtime PM function.
659  */
660 static void pm_runtime_work(struct work_struct *work)
661 {
662 	struct device *dev = container_of(work, struct device, power.work);
663 	enum rpm_request req;
664 
665 	spin_lock_irq(&dev->power.lock);
666 
667 	if (!dev->power.request_pending)
668 		goto out;
669 
670 	req = dev->power.request;
671 	dev->power.request = RPM_REQ_NONE;
672 	dev->power.request_pending = false;
673 
674 	switch (req) {
675 	case RPM_REQ_NONE:
676 		break;
677 	case RPM_REQ_IDLE:
678 		rpm_idle(dev, RPM_NOWAIT);
679 		break;
680 	case RPM_REQ_SUSPEND:
681 		rpm_suspend(dev, RPM_NOWAIT);
682 		break;
683 	case RPM_REQ_AUTOSUSPEND:
684 		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
685 		break;
686 	case RPM_REQ_RESUME:
687 		rpm_resume(dev, RPM_NOWAIT);
688 		break;
689 	}
690 
691  out:
692 	spin_unlock_irq(&dev->power.lock);
693 }
694 
695 /**
696  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
697  * @data: Device pointer passed by pm_schedule_suspend().
698  *
699  * Check if the time is right and queue a suspend request.
700  */
701 static void pm_suspend_timer_fn(unsigned long data)
702 {
703 	struct device *dev = (struct device *)data;
704 	unsigned long flags;
705 	unsigned long expires;
706 
707 	spin_lock_irqsave(&dev->power.lock, flags);
708 
709 	expires = dev->power.timer_expires;
710 	/* If 'expire' is after 'jiffies' we've been called too early. */
711 	if (expires > 0 && !time_after(expires, jiffies)) {
712 		dev->power.timer_expires = 0;
713 		rpm_suspend(dev, dev->power.timer_autosuspends ?
714 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
715 	}
716 
717 	spin_unlock_irqrestore(&dev->power.lock, flags);
718 }
719 
720 /**
721  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
722  * @dev: Device to suspend.
723  * @delay: Time to wait before submitting a suspend request, in milliseconds.
724  */
725 int pm_schedule_suspend(struct device *dev, unsigned int delay)
726 {
727 	unsigned long flags;
728 	int retval;
729 
730 	spin_lock_irqsave(&dev->power.lock, flags);
731 
732 	if (!delay) {
733 		retval = rpm_suspend(dev, RPM_ASYNC);
734 		goto out;
735 	}
736 
737 	retval = rpm_check_suspend_allowed(dev);
738 	if (retval)
739 		goto out;
740 
741 	/* Other scheduled or pending requests need to be canceled. */
742 	pm_runtime_cancel_pending(dev);
743 
744 	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
745 	dev->power.timer_expires += !dev->power.timer_expires;
746 	dev->power.timer_autosuspends = 0;
747 	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
748 
749  out:
750 	spin_unlock_irqrestore(&dev->power.lock, flags);
751 
752 	return retval;
753 }
754 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
755 
756 /**
757  * __pm_runtime_idle - Entry point for runtime idle operations.
758  * @dev: Device to send idle notification for.
759  * @rpmflags: Flag bits.
760  *
761  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
762  * return immediately if it is larger than zero.  Then carry out an idle
763  * notification, either synchronous or asynchronous.
764  *
765  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
766  * or if pm_runtime_irq_safe() has been called.
767  */
768 int __pm_runtime_idle(struct device *dev, int rpmflags)
769 {
770 	unsigned long flags;
771 	int retval;
772 
773 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
774 
775 	if (rpmflags & RPM_GET_PUT) {
776 		if (!atomic_dec_and_test(&dev->power.usage_count))
777 			return 0;
778 	}
779 
780 	spin_lock_irqsave(&dev->power.lock, flags);
781 	retval = rpm_idle(dev, rpmflags);
782 	spin_unlock_irqrestore(&dev->power.lock, flags);
783 
784 	return retval;
785 }
786 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
787 
788 /**
789  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
790  * @dev: Device to suspend.
791  * @rpmflags: Flag bits.
792  *
793  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
794  * return immediately if it is larger than zero.  Then carry out a suspend,
795  * either synchronous or asynchronous.
796  *
797  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
798  * or if pm_runtime_irq_safe() has been called.
799  */
800 int __pm_runtime_suspend(struct device *dev, int rpmflags)
801 {
802 	unsigned long flags;
803 	int retval;
804 
805 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
806 
807 	if (rpmflags & RPM_GET_PUT) {
808 		if (!atomic_dec_and_test(&dev->power.usage_count))
809 			return 0;
810 	}
811 
812 	spin_lock_irqsave(&dev->power.lock, flags);
813 	retval = rpm_suspend(dev, rpmflags);
814 	spin_unlock_irqrestore(&dev->power.lock, flags);
815 
816 	return retval;
817 }
818 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
819 
820 /**
821  * __pm_runtime_resume - Entry point for runtime resume operations.
822  * @dev: Device to resume.
823  * @rpmflags: Flag bits.
824  *
825  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
826  * carry out a resume, either synchronous or asynchronous.
827  *
828  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
829  * or if pm_runtime_irq_safe() has been called.
830  */
831 int __pm_runtime_resume(struct device *dev, int rpmflags)
832 {
833 	unsigned long flags;
834 	int retval;
835 
836 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
837 
838 	if (rpmflags & RPM_GET_PUT)
839 		atomic_inc(&dev->power.usage_count);
840 
841 	spin_lock_irqsave(&dev->power.lock, flags);
842 	retval = rpm_resume(dev, rpmflags);
843 	spin_unlock_irqrestore(&dev->power.lock, flags);
844 
845 	return retval;
846 }
847 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
848 
849 /**
850  * __pm_runtime_set_status - Set runtime PM status of a device.
851  * @dev: Device to handle.
852  * @status: New runtime PM status of the device.
853  *
854  * If runtime PM of the device is disabled or its power.runtime_error field is
855  * different from zero, the status may be changed either to RPM_ACTIVE, or to
856  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
857  * However, if the device has a parent and the parent is not active, and the
858  * parent's power.ignore_children flag is unset, the device's status cannot be
859  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
860  *
861  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
862  * and the device parent's counter of unsuspended children is modified to
863  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
864  * notification request for the parent is submitted.
865  */
866 int __pm_runtime_set_status(struct device *dev, unsigned int status)
867 {
868 	struct device *parent = dev->parent;
869 	unsigned long flags;
870 	bool notify_parent = false;
871 	int error = 0;
872 
873 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
874 		return -EINVAL;
875 
876 	spin_lock_irqsave(&dev->power.lock, flags);
877 
878 	if (!dev->power.runtime_error && !dev->power.disable_depth) {
879 		error = -EAGAIN;
880 		goto out;
881 	}
882 
883 	if (dev->power.runtime_status == status)
884 		goto out_set;
885 
886 	if (status == RPM_SUSPENDED) {
887 		/* It always is possible to set the status to 'suspended'. */
888 		if (parent) {
889 			atomic_add_unless(&parent->power.child_count, -1, 0);
890 			notify_parent = !parent->power.ignore_children;
891 		}
892 		goto out_set;
893 	}
894 
895 	if (parent) {
896 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
897 
898 		/*
899 		 * It is invalid to put an active child under a parent that is
900 		 * not active, has runtime PM enabled and the
901 		 * 'power.ignore_children' flag unset.
902 		 */
903 		if (!parent->power.disable_depth
904 		    && !parent->power.ignore_children
905 		    && parent->power.runtime_status != RPM_ACTIVE)
906 			error = -EBUSY;
907 		else if (dev->power.runtime_status == RPM_SUSPENDED)
908 			atomic_inc(&parent->power.child_count);
909 
910 		spin_unlock(&parent->power.lock);
911 
912 		if (error)
913 			goto out;
914 	}
915 
916  out_set:
917 	__update_runtime_status(dev, status);
918 	dev->power.runtime_error = 0;
919  out:
920 	spin_unlock_irqrestore(&dev->power.lock, flags);
921 
922 	if (notify_parent)
923 		pm_request_idle(parent);
924 
925 	return error;
926 }
927 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
928 
929 /**
930  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
931  * @dev: Device to handle.
932  *
933  * Flush all pending requests for the device from pm_wq and wait for all
934  * runtime PM operations involving the device in progress to complete.
935  *
936  * Should be called under dev->power.lock with interrupts disabled.
937  */
938 static void __pm_runtime_barrier(struct device *dev)
939 {
940 	pm_runtime_deactivate_timer(dev);
941 
942 	if (dev->power.request_pending) {
943 		dev->power.request = RPM_REQ_NONE;
944 		spin_unlock_irq(&dev->power.lock);
945 
946 		cancel_work_sync(&dev->power.work);
947 
948 		spin_lock_irq(&dev->power.lock);
949 		dev->power.request_pending = false;
950 	}
951 
952 	if (dev->power.runtime_status == RPM_SUSPENDING
953 	    || dev->power.runtime_status == RPM_RESUMING
954 	    || dev->power.idle_notification) {
955 		DEFINE_WAIT(wait);
956 
957 		/* Suspend, wake-up or idle notification in progress. */
958 		for (;;) {
959 			prepare_to_wait(&dev->power.wait_queue, &wait,
960 					TASK_UNINTERRUPTIBLE);
961 			if (dev->power.runtime_status != RPM_SUSPENDING
962 			    && dev->power.runtime_status != RPM_RESUMING
963 			    && !dev->power.idle_notification)
964 				break;
965 			spin_unlock_irq(&dev->power.lock);
966 
967 			schedule();
968 
969 			spin_lock_irq(&dev->power.lock);
970 		}
971 		finish_wait(&dev->power.wait_queue, &wait);
972 	}
973 }
974 
975 /**
976  * pm_runtime_barrier - Flush pending requests and wait for completions.
977  * @dev: Device to handle.
978  *
979  * Prevent the device from being suspended by incrementing its usage counter and
980  * if there's a pending resume request for the device, wake the device up.
981  * Next, make sure that all pending requests for the device have been flushed
982  * from pm_wq and wait for all runtime PM operations involving the device in
983  * progress to complete.
984  *
985  * Return value:
986  * 1, if there was a resume request pending and the device had to be woken up,
987  * 0, otherwise
988  */
989 int pm_runtime_barrier(struct device *dev)
990 {
991 	int retval = 0;
992 
993 	pm_runtime_get_noresume(dev);
994 	spin_lock_irq(&dev->power.lock);
995 
996 	if (dev->power.request_pending
997 	    && dev->power.request == RPM_REQ_RESUME) {
998 		rpm_resume(dev, 0);
999 		retval = 1;
1000 	}
1001 
1002 	__pm_runtime_barrier(dev);
1003 
1004 	spin_unlock_irq(&dev->power.lock);
1005 	pm_runtime_put_noidle(dev);
1006 
1007 	return retval;
1008 }
1009 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1010 
1011 /**
1012  * __pm_runtime_disable - Disable runtime PM of a device.
1013  * @dev: Device to handle.
1014  * @check_resume: If set, check if there's a resume request for the device.
1015  *
1016  * Increment power.disable_depth for the device and if was zero previously,
1017  * cancel all pending runtime PM requests for the device and wait for all
1018  * operations in progress to complete.  The device can be either active or
1019  * suspended after its runtime PM has been disabled.
1020  *
1021  * If @check_resume is set and there's a resume request pending when
1022  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1023  * function will wake up the device before disabling its runtime PM.
1024  */
1025 void __pm_runtime_disable(struct device *dev, bool check_resume)
1026 {
1027 	spin_lock_irq(&dev->power.lock);
1028 
1029 	if (dev->power.disable_depth > 0) {
1030 		dev->power.disable_depth++;
1031 		goto out;
1032 	}
1033 
1034 	/*
1035 	 * Wake up the device if there's a resume request pending, because that
1036 	 * means there probably is some I/O to process and disabling runtime PM
1037 	 * shouldn't prevent the device from processing the I/O.
1038 	 */
1039 	if (check_resume && dev->power.request_pending
1040 	    && dev->power.request == RPM_REQ_RESUME) {
1041 		/*
1042 		 * Prevent suspends and idle notifications from being carried
1043 		 * out after we have woken up the device.
1044 		 */
1045 		pm_runtime_get_noresume(dev);
1046 
1047 		rpm_resume(dev, 0);
1048 
1049 		pm_runtime_put_noidle(dev);
1050 	}
1051 
1052 	if (!dev->power.disable_depth++)
1053 		__pm_runtime_barrier(dev);
1054 
1055  out:
1056 	spin_unlock_irq(&dev->power.lock);
1057 }
1058 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1059 
1060 /**
1061  * pm_runtime_enable - Enable runtime PM of a device.
1062  * @dev: Device to handle.
1063  */
1064 void pm_runtime_enable(struct device *dev)
1065 {
1066 	unsigned long flags;
1067 
1068 	spin_lock_irqsave(&dev->power.lock, flags);
1069 
1070 	if (dev->power.disable_depth > 0)
1071 		dev->power.disable_depth--;
1072 	else
1073 		dev_warn(dev, "Unbalanced %s!\n", __func__);
1074 
1075 	spin_unlock_irqrestore(&dev->power.lock, flags);
1076 }
1077 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1078 
1079 /**
1080  * pm_runtime_forbid - Block runtime PM of a device.
1081  * @dev: Device to handle.
1082  *
1083  * Increase the device's usage count and clear its power.runtime_auto flag,
1084  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1085  * for it.
1086  */
1087 void pm_runtime_forbid(struct device *dev)
1088 {
1089 	spin_lock_irq(&dev->power.lock);
1090 	if (!dev->power.runtime_auto)
1091 		goto out;
1092 
1093 	dev->power.runtime_auto = false;
1094 	atomic_inc(&dev->power.usage_count);
1095 	rpm_resume(dev, 0);
1096 
1097  out:
1098 	spin_unlock_irq(&dev->power.lock);
1099 }
1100 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1101 
1102 /**
1103  * pm_runtime_allow - Unblock runtime PM of a device.
1104  * @dev: Device to handle.
1105  *
1106  * Decrease the device's usage count and set its power.runtime_auto flag.
1107  */
1108 void pm_runtime_allow(struct device *dev)
1109 {
1110 	spin_lock_irq(&dev->power.lock);
1111 	if (dev->power.runtime_auto)
1112 		goto out;
1113 
1114 	dev->power.runtime_auto = true;
1115 	if (atomic_dec_and_test(&dev->power.usage_count))
1116 		rpm_idle(dev, RPM_AUTO);
1117 
1118  out:
1119 	spin_unlock_irq(&dev->power.lock);
1120 }
1121 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1122 
1123 /**
1124  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1125  * @dev: Device to handle.
1126  *
1127  * Set the power.no_callbacks flag, which tells the PM core that this
1128  * device is power-managed through its parent and has no runtime PM
1129  * callbacks of its own.  The runtime sysfs attributes will be removed.
1130  */
1131 void pm_runtime_no_callbacks(struct device *dev)
1132 {
1133 	spin_lock_irq(&dev->power.lock);
1134 	dev->power.no_callbacks = 1;
1135 	spin_unlock_irq(&dev->power.lock);
1136 	if (device_is_registered(dev))
1137 		rpm_sysfs_remove(dev);
1138 }
1139 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1140 
1141 /**
1142  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1143  * @dev: Device to handle
1144  *
1145  * Set the power.irq_safe flag, which tells the PM core that the
1146  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1147  * always be invoked with the spinlock held and interrupts disabled.  It also
1148  * causes the parent's usage counter to be permanently incremented, preventing
1149  * the parent from runtime suspending -- otherwise an irq-safe child might have
1150  * to wait for a non-irq-safe parent.
1151  */
1152 void pm_runtime_irq_safe(struct device *dev)
1153 {
1154 	if (dev->parent)
1155 		pm_runtime_get_sync(dev->parent);
1156 	spin_lock_irq(&dev->power.lock);
1157 	dev->power.irq_safe = 1;
1158 	spin_unlock_irq(&dev->power.lock);
1159 }
1160 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1161 
1162 /**
1163  * update_autosuspend - Handle a change to a device's autosuspend settings.
1164  * @dev: Device to handle.
1165  * @old_delay: The former autosuspend_delay value.
1166  * @old_use: The former use_autosuspend value.
1167  *
1168  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1169  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1170  *
1171  * This function must be called under dev->power.lock with interrupts disabled.
1172  */
1173 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1174 {
1175 	int delay = dev->power.autosuspend_delay;
1176 
1177 	/* Should runtime suspend be prevented now? */
1178 	if (dev->power.use_autosuspend && delay < 0) {
1179 
1180 		/* If it used to be allowed then prevent it. */
1181 		if (!old_use || old_delay >= 0) {
1182 			atomic_inc(&dev->power.usage_count);
1183 			rpm_resume(dev, 0);
1184 		}
1185 	}
1186 
1187 	/* Runtime suspend should be allowed now. */
1188 	else {
1189 
1190 		/* If it used to be prevented then allow it. */
1191 		if (old_use && old_delay < 0)
1192 			atomic_dec(&dev->power.usage_count);
1193 
1194 		/* Maybe we can autosuspend now. */
1195 		rpm_idle(dev, RPM_AUTO);
1196 	}
1197 }
1198 
1199 /**
1200  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1201  * @dev: Device to handle.
1202  * @delay: Value of the new delay in milliseconds.
1203  *
1204  * Set the device's power.autosuspend_delay value.  If it changes to negative
1205  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1206  * changes the other way, allow runtime suspends.
1207  */
1208 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1209 {
1210 	int old_delay, old_use;
1211 
1212 	spin_lock_irq(&dev->power.lock);
1213 	old_delay = dev->power.autosuspend_delay;
1214 	old_use = dev->power.use_autosuspend;
1215 	dev->power.autosuspend_delay = delay;
1216 	update_autosuspend(dev, old_delay, old_use);
1217 	spin_unlock_irq(&dev->power.lock);
1218 }
1219 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1220 
1221 /**
1222  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1223  * @dev: Device to handle.
1224  * @use: New value for use_autosuspend.
1225  *
1226  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1227  * suspends as needed.
1228  */
1229 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1230 {
1231 	int old_delay, old_use;
1232 
1233 	spin_lock_irq(&dev->power.lock);
1234 	old_delay = dev->power.autosuspend_delay;
1235 	old_use = dev->power.use_autosuspend;
1236 	dev->power.use_autosuspend = use;
1237 	update_autosuspend(dev, old_delay, old_use);
1238 	spin_unlock_irq(&dev->power.lock);
1239 }
1240 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1241 
1242 /**
1243  * pm_runtime_init - Initialize runtime PM fields in given device object.
1244  * @dev: Device object to initialize.
1245  */
1246 void pm_runtime_init(struct device *dev)
1247 {
1248 	dev->power.runtime_status = RPM_SUSPENDED;
1249 	dev->power.idle_notification = false;
1250 
1251 	dev->power.disable_depth = 1;
1252 	atomic_set(&dev->power.usage_count, 0);
1253 
1254 	dev->power.runtime_error = 0;
1255 
1256 	atomic_set(&dev->power.child_count, 0);
1257 	pm_suspend_ignore_children(dev, false);
1258 	dev->power.runtime_auto = true;
1259 
1260 	dev->power.request_pending = false;
1261 	dev->power.request = RPM_REQ_NONE;
1262 	dev->power.deferred_resume = false;
1263 	dev->power.accounting_timestamp = jiffies;
1264 	INIT_WORK(&dev->power.work, pm_runtime_work);
1265 
1266 	dev->power.timer_expires = 0;
1267 	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1268 			(unsigned long)dev);
1269 
1270 	init_waitqueue_head(&dev->power.wait_queue);
1271 }
1272 
1273 /**
1274  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1275  * @dev: Device object being removed from device hierarchy.
1276  */
1277 void pm_runtime_remove(struct device *dev)
1278 {
1279 	__pm_runtime_disable(dev, false);
1280 
1281 	/* Change the status back to 'suspended' to match the initial status. */
1282 	if (dev->power.runtime_status == RPM_ACTIVE)
1283 		pm_runtime_set_suspended(dev);
1284 	if (dev->power.irq_safe && dev->parent)
1285 		pm_runtime_put_sync(dev->parent);
1286 }
1287