xref: /linux/drivers/base/power/runtime.c (revision d229807f669ba3dea9f64467ee965051c4366aed)
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device runtime PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <trace/events/rpm.h>
14 #include "power.h"
15 
16 static int rpm_resume(struct device *dev, int rpmflags);
17 static int rpm_suspend(struct device *dev, int rpmflags);
18 
19 /**
20  * update_pm_runtime_accounting - Update the time accounting of power states
21  * @dev: Device to update the accounting for
22  *
23  * In order to be able to have time accounting of the various power states
24  * (as used by programs such as PowerTOP to show the effectiveness of runtime
25  * PM), we need to track the time spent in each state.
26  * update_pm_runtime_accounting must be called each time before the
27  * runtime_status field is updated, to account the time in the old state
28  * correctly.
29  */
30 void update_pm_runtime_accounting(struct device *dev)
31 {
32 	unsigned long now = jiffies;
33 	int delta;
34 
35 	delta = now - dev->power.accounting_timestamp;
36 
37 	if (delta < 0)
38 		delta = 0;
39 
40 	dev->power.accounting_timestamp = now;
41 
42 	if (dev->power.disable_depth > 0)
43 		return;
44 
45 	if (dev->power.runtime_status == RPM_SUSPENDED)
46 		dev->power.suspended_jiffies += delta;
47 	else
48 		dev->power.active_jiffies += delta;
49 }
50 
51 static void __update_runtime_status(struct device *dev, enum rpm_status status)
52 {
53 	update_pm_runtime_accounting(dev);
54 	dev->power.runtime_status = status;
55 }
56 
57 /**
58  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
59  * @dev: Device to handle.
60  */
61 static void pm_runtime_deactivate_timer(struct device *dev)
62 {
63 	if (dev->power.timer_expires > 0) {
64 		del_timer(&dev->power.suspend_timer);
65 		dev->power.timer_expires = 0;
66 	}
67 }
68 
69 /**
70  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
71  * @dev: Device to handle.
72  */
73 static void pm_runtime_cancel_pending(struct device *dev)
74 {
75 	pm_runtime_deactivate_timer(dev);
76 	/*
77 	 * In case there's a request pending, make sure its work function will
78 	 * return without doing anything.
79 	 */
80 	dev->power.request = RPM_REQ_NONE;
81 }
82 
83 /*
84  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
85  * @dev: Device to handle.
86  *
87  * Compute the autosuspend-delay expiration time based on the device's
88  * power.last_busy time.  If the delay has already expired or is disabled
89  * (negative) or the power.use_autosuspend flag isn't set, return 0.
90  * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
91  *
92  * This function may be called either with or without dev->power.lock held.
93  * Either way it can be racy, since power.last_busy may be updated at any time.
94  */
95 unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
96 {
97 	int autosuspend_delay;
98 	long elapsed;
99 	unsigned long last_busy;
100 	unsigned long expires = 0;
101 
102 	if (!dev->power.use_autosuspend)
103 		goto out;
104 
105 	autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
106 	if (autosuspend_delay < 0)
107 		goto out;
108 
109 	last_busy = ACCESS_ONCE(dev->power.last_busy);
110 	elapsed = jiffies - last_busy;
111 	if (elapsed < 0)
112 		goto out;	/* jiffies has wrapped around. */
113 
114 	/*
115 	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
116 	 * up to the nearest second.
117 	 */
118 	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
119 	if (autosuspend_delay >= 1000)
120 		expires = round_jiffies(expires);
121 	expires += !expires;
122 	if (elapsed >= expires - last_busy)
123 		expires = 0;	/* Already expired. */
124 
125  out:
126 	return expires;
127 }
128 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
129 
130 /**
131  * rpm_check_suspend_allowed - Test whether a device may be suspended.
132  * @dev: Device to test.
133  */
134 static int rpm_check_suspend_allowed(struct device *dev)
135 {
136 	int retval = 0;
137 
138 	if (dev->power.runtime_error)
139 		retval = -EINVAL;
140 	else if (dev->power.disable_depth > 0)
141 		retval = -EACCES;
142 	else if (atomic_read(&dev->power.usage_count) > 0)
143 		retval = -EAGAIN;
144 	else if (!pm_children_suspended(dev))
145 		retval = -EBUSY;
146 
147 	/* Pending resume requests take precedence over suspends. */
148 	else if ((dev->power.deferred_resume
149 			&& dev->power.runtime_status == RPM_SUSPENDING)
150 	    || (dev->power.request_pending
151 			&& dev->power.request == RPM_REQ_RESUME))
152 		retval = -EAGAIN;
153 	else if (dev->power.runtime_status == RPM_SUSPENDED)
154 		retval = 1;
155 
156 	return retval;
157 }
158 
159 /**
160  * __rpm_callback - Run a given runtime PM callback for a given device.
161  * @cb: Runtime PM callback to run.
162  * @dev: Device to run the callback for.
163  */
164 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
165 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
166 {
167 	int retval;
168 
169 	if (dev->power.irq_safe)
170 		spin_unlock(&dev->power.lock);
171 	else
172 		spin_unlock_irq(&dev->power.lock);
173 
174 	retval = cb(dev);
175 
176 	if (dev->power.irq_safe)
177 		spin_lock(&dev->power.lock);
178 	else
179 		spin_lock_irq(&dev->power.lock);
180 
181 	return retval;
182 }
183 
184 /**
185  * rpm_idle - Notify device bus type if the device can be suspended.
186  * @dev: Device to notify the bus type about.
187  * @rpmflags: Flag bits.
188  *
189  * Check if the device's runtime PM status allows it to be suspended.  If
190  * another idle notification has been started earlier, return immediately.  If
191  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
192  * run the ->runtime_idle() callback directly.
193  *
194  * This function must be called under dev->power.lock with interrupts disabled.
195  */
196 static int rpm_idle(struct device *dev, int rpmflags)
197 {
198 	int (*callback)(struct device *);
199 	int retval;
200 
201 	trace_rpm_idle(dev, rpmflags);
202 	retval = rpm_check_suspend_allowed(dev);
203 	if (retval < 0)
204 		;	/* Conditions are wrong. */
205 
206 	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
207 	else if (dev->power.runtime_status != RPM_ACTIVE)
208 		retval = -EAGAIN;
209 
210 	/*
211 	 * Any pending request other than an idle notification takes
212 	 * precedence over us, except that the timer may be running.
213 	 */
214 	else if (dev->power.request_pending &&
215 	    dev->power.request > RPM_REQ_IDLE)
216 		retval = -EAGAIN;
217 
218 	/* Act as though RPM_NOWAIT is always set. */
219 	else if (dev->power.idle_notification)
220 		retval = -EINPROGRESS;
221 	if (retval)
222 		goto out;
223 
224 	/* Pending requests need to be canceled. */
225 	dev->power.request = RPM_REQ_NONE;
226 
227 	if (dev->power.no_callbacks) {
228 		/* Assume ->runtime_idle() callback would have suspended. */
229 		retval = rpm_suspend(dev, rpmflags);
230 		goto out;
231 	}
232 
233 	/* Carry out an asynchronous or a synchronous idle notification. */
234 	if (rpmflags & RPM_ASYNC) {
235 		dev->power.request = RPM_REQ_IDLE;
236 		if (!dev->power.request_pending) {
237 			dev->power.request_pending = true;
238 			queue_work(pm_wq, &dev->power.work);
239 		}
240 		goto out;
241 	}
242 
243 	dev->power.idle_notification = true;
244 
245 	if (dev->pm_domain)
246 		callback = dev->pm_domain->ops.runtime_idle;
247 	else if (dev->type && dev->type->pm)
248 		callback = dev->type->pm->runtime_idle;
249 	else if (dev->class && dev->class->pm)
250 		callback = dev->class->pm->runtime_idle;
251 	else if (dev->bus && dev->bus->pm)
252 		callback = dev->bus->pm->runtime_idle;
253 	else
254 		callback = NULL;
255 
256 	if (callback)
257 		__rpm_callback(callback, dev);
258 
259 	dev->power.idle_notification = false;
260 	wake_up_all(&dev->power.wait_queue);
261 
262  out:
263 	trace_rpm_return_int(dev, _THIS_IP_, retval);
264 	return retval;
265 }
266 
267 /**
268  * rpm_callback - Run a given runtime PM callback for a given device.
269  * @cb: Runtime PM callback to run.
270  * @dev: Device to run the callback for.
271  */
272 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
273 {
274 	int retval;
275 
276 	if (!cb)
277 		return -ENOSYS;
278 
279 	retval = __rpm_callback(cb, dev);
280 
281 	dev->power.runtime_error = retval;
282 	return retval != -EACCES ? retval : -EIO;
283 }
284 
285 /**
286  * rpm_suspend - Carry out runtime suspend of given device.
287  * @dev: Device to suspend.
288  * @rpmflags: Flag bits.
289  *
290  * Check if the device's runtime PM status allows it to be suspended.
291  * Cancel a pending idle notification, autosuspend or suspend. If
292  * another suspend has been started earlier, either return immediately
293  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
294  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
295  * otherwise run the ->runtime_suspend() callback directly. When
296  * ->runtime_suspend succeeded, if a deferred resume was requested while
297  * the callback was running then carry it out, otherwise send an idle
298  * notification for its parent (if the suspend succeeded and both
299  * ignore_children of parent->power and irq_safe of dev->power are not set).
300  *
301  * This function must be called under dev->power.lock with interrupts disabled.
302  */
303 static int rpm_suspend(struct device *dev, int rpmflags)
304 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
305 {
306 	int (*callback)(struct device *);
307 	struct device *parent = NULL;
308 	int retval;
309 
310 	trace_rpm_suspend(dev, rpmflags);
311 
312  repeat:
313 	retval = rpm_check_suspend_allowed(dev);
314 
315 	if (retval < 0)
316 		;	/* Conditions are wrong. */
317 
318 	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
319 	else if (dev->power.runtime_status == RPM_RESUMING &&
320 	    !(rpmflags & RPM_ASYNC))
321 		retval = -EAGAIN;
322 	if (retval)
323 		goto out;
324 
325 	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
326 	if ((rpmflags & RPM_AUTO)
327 	    && dev->power.runtime_status != RPM_SUSPENDING) {
328 		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
329 
330 		if (expires != 0) {
331 			/* Pending requests need to be canceled. */
332 			dev->power.request = RPM_REQ_NONE;
333 
334 			/*
335 			 * Optimization: If the timer is already running and is
336 			 * set to expire at or before the autosuspend delay,
337 			 * avoid the overhead of resetting it.  Just let it
338 			 * expire; pm_suspend_timer_fn() will take care of the
339 			 * rest.
340 			 */
341 			if (!(dev->power.timer_expires && time_before_eq(
342 			    dev->power.timer_expires, expires))) {
343 				dev->power.timer_expires = expires;
344 				mod_timer(&dev->power.suspend_timer, expires);
345 			}
346 			dev->power.timer_autosuspends = 1;
347 			goto out;
348 		}
349 	}
350 
351 	/* Other scheduled or pending requests need to be canceled. */
352 	pm_runtime_cancel_pending(dev);
353 
354 	if (dev->power.runtime_status == RPM_SUSPENDING) {
355 		DEFINE_WAIT(wait);
356 
357 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
358 			retval = -EINPROGRESS;
359 			goto out;
360 		}
361 
362 		if (dev->power.irq_safe) {
363 			spin_unlock(&dev->power.lock);
364 
365 			cpu_relax();
366 
367 			spin_lock(&dev->power.lock);
368 			goto repeat;
369 		}
370 
371 		/* Wait for the other suspend running in parallel with us. */
372 		for (;;) {
373 			prepare_to_wait(&dev->power.wait_queue, &wait,
374 					TASK_UNINTERRUPTIBLE);
375 			if (dev->power.runtime_status != RPM_SUSPENDING)
376 				break;
377 
378 			spin_unlock_irq(&dev->power.lock);
379 
380 			schedule();
381 
382 			spin_lock_irq(&dev->power.lock);
383 		}
384 		finish_wait(&dev->power.wait_queue, &wait);
385 		goto repeat;
386 	}
387 
388 	dev->power.deferred_resume = false;
389 	if (dev->power.no_callbacks)
390 		goto no_callback;	/* Assume success. */
391 
392 	/* Carry out an asynchronous or a synchronous suspend. */
393 	if (rpmflags & RPM_ASYNC) {
394 		dev->power.request = (rpmflags & RPM_AUTO) ?
395 		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
396 		if (!dev->power.request_pending) {
397 			dev->power.request_pending = true;
398 			queue_work(pm_wq, &dev->power.work);
399 		}
400 		goto out;
401 	}
402 
403 	__update_runtime_status(dev, RPM_SUSPENDING);
404 
405 	if (dev->pm_domain)
406 		callback = dev->pm_domain->ops.runtime_suspend;
407 	else if (dev->type && dev->type->pm)
408 		callback = dev->type->pm->runtime_suspend;
409 	else if (dev->class && dev->class->pm)
410 		callback = dev->class->pm->runtime_suspend;
411 	else if (dev->bus && dev->bus->pm)
412 		callback = dev->bus->pm->runtime_suspend;
413 	else
414 		callback = NULL;
415 
416 	retval = rpm_callback(callback, dev);
417 	if (retval) {
418 		__update_runtime_status(dev, RPM_ACTIVE);
419 		dev->power.deferred_resume = false;
420 		if (retval == -EAGAIN || retval == -EBUSY)
421 			dev->power.runtime_error = 0;
422 		else
423 			pm_runtime_cancel_pending(dev);
424 		wake_up_all(&dev->power.wait_queue);
425 		goto out;
426 	}
427  no_callback:
428 	__update_runtime_status(dev, RPM_SUSPENDED);
429 	pm_runtime_deactivate_timer(dev);
430 
431 	if (dev->parent) {
432 		parent = dev->parent;
433 		atomic_add_unless(&parent->power.child_count, -1, 0);
434 	}
435 	wake_up_all(&dev->power.wait_queue);
436 
437 	if (dev->power.deferred_resume) {
438 		rpm_resume(dev, 0);
439 		retval = -EAGAIN;
440 		goto out;
441 	}
442 
443 	/* Maybe the parent is now able to suspend. */
444 	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
445 		spin_unlock(&dev->power.lock);
446 
447 		spin_lock(&parent->power.lock);
448 		rpm_idle(parent, RPM_ASYNC);
449 		spin_unlock(&parent->power.lock);
450 
451 		spin_lock(&dev->power.lock);
452 	}
453 
454  out:
455 	trace_rpm_return_int(dev, _THIS_IP_, retval);
456 
457 	return retval;
458 }
459 
460 /**
461  * rpm_resume - Carry out runtime resume of given device.
462  * @dev: Device to resume.
463  * @rpmflags: Flag bits.
464  *
465  * Check if the device's runtime PM status allows it to be resumed.  Cancel
466  * any scheduled or pending requests.  If another resume has been started
467  * earlier, either return immediately or wait for it to finish, depending on the
468  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
469  * parallel with this function, either tell the other process to resume after
470  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
471  * flag is set then queue a resume request; otherwise run the
472  * ->runtime_resume() callback directly.  Queue an idle notification for the
473  * device if the resume succeeded.
474  *
475  * This function must be called under dev->power.lock with interrupts disabled.
476  */
477 static int rpm_resume(struct device *dev, int rpmflags)
478 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
479 {
480 	int (*callback)(struct device *);
481 	struct device *parent = NULL;
482 	int retval = 0;
483 
484 	trace_rpm_resume(dev, rpmflags);
485 
486  repeat:
487 	if (dev->power.runtime_error)
488 		retval = -EINVAL;
489 	else if (dev->power.disable_depth > 0)
490 		retval = -EACCES;
491 	if (retval)
492 		goto out;
493 
494 	/*
495 	 * Other scheduled or pending requests need to be canceled.  Small
496 	 * optimization: If an autosuspend timer is running, leave it running
497 	 * rather than cancelling it now only to restart it again in the near
498 	 * future.
499 	 */
500 	dev->power.request = RPM_REQ_NONE;
501 	if (!dev->power.timer_autosuspends)
502 		pm_runtime_deactivate_timer(dev);
503 
504 	if (dev->power.runtime_status == RPM_ACTIVE) {
505 		retval = 1;
506 		goto out;
507 	}
508 
509 	if (dev->power.runtime_status == RPM_RESUMING
510 	    || dev->power.runtime_status == RPM_SUSPENDING) {
511 		DEFINE_WAIT(wait);
512 
513 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
514 			if (dev->power.runtime_status == RPM_SUSPENDING)
515 				dev->power.deferred_resume = true;
516 			else
517 				retval = -EINPROGRESS;
518 			goto out;
519 		}
520 
521 		if (dev->power.irq_safe) {
522 			spin_unlock(&dev->power.lock);
523 
524 			cpu_relax();
525 
526 			spin_lock(&dev->power.lock);
527 			goto repeat;
528 		}
529 
530 		/* Wait for the operation carried out in parallel with us. */
531 		for (;;) {
532 			prepare_to_wait(&dev->power.wait_queue, &wait,
533 					TASK_UNINTERRUPTIBLE);
534 			if (dev->power.runtime_status != RPM_RESUMING
535 			    && dev->power.runtime_status != RPM_SUSPENDING)
536 				break;
537 
538 			spin_unlock_irq(&dev->power.lock);
539 
540 			schedule();
541 
542 			spin_lock_irq(&dev->power.lock);
543 		}
544 		finish_wait(&dev->power.wait_queue, &wait);
545 		goto repeat;
546 	}
547 
548 	/*
549 	 * See if we can skip waking up the parent.  This is safe only if
550 	 * power.no_callbacks is set, because otherwise we don't know whether
551 	 * the resume will actually succeed.
552 	 */
553 	if (dev->power.no_callbacks && !parent && dev->parent) {
554 		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
555 		if (dev->parent->power.disable_depth > 0
556 		    || dev->parent->power.ignore_children
557 		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
558 			atomic_inc(&dev->parent->power.child_count);
559 			spin_unlock(&dev->parent->power.lock);
560 			goto no_callback;	/* Assume success. */
561 		}
562 		spin_unlock(&dev->parent->power.lock);
563 	}
564 
565 	/* Carry out an asynchronous or a synchronous resume. */
566 	if (rpmflags & RPM_ASYNC) {
567 		dev->power.request = RPM_REQ_RESUME;
568 		if (!dev->power.request_pending) {
569 			dev->power.request_pending = true;
570 			queue_work(pm_wq, &dev->power.work);
571 		}
572 		retval = 0;
573 		goto out;
574 	}
575 
576 	if (!parent && dev->parent) {
577 		/*
578 		 * Increment the parent's usage counter and resume it if
579 		 * necessary.  Not needed if dev is irq-safe; then the
580 		 * parent is permanently resumed.
581 		 */
582 		parent = dev->parent;
583 		if (dev->power.irq_safe)
584 			goto skip_parent;
585 		spin_unlock(&dev->power.lock);
586 
587 		pm_runtime_get_noresume(parent);
588 
589 		spin_lock(&parent->power.lock);
590 		/*
591 		 * We can resume if the parent's runtime PM is disabled or it
592 		 * is set to ignore children.
593 		 */
594 		if (!parent->power.disable_depth
595 		    && !parent->power.ignore_children) {
596 			rpm_resume(parent, 0);
597 			if (parent->power.runtime_status != RPM_ACTIVE)
598 				retval = -EBUSY;
599 		}
600 		spin_unlock(&parent->power.lock);
601 
602 		spin_lock(&dev->power.lock);
603 		if (retval)
604 			goto out;
605 		goto repeat;
606 	}
607  skip_parent:
608 
609 	if (dev->power.no_callbacks)
610 		goto no_callback;	/* Assume success. */
611 
612 	__update_runtime_status(dev, RPM_RESUMING);
613 
614 	if (dev->pm_domain)
615 		callback = dev->pm_domain->ops.runtime_resume;
616 	else if (dev->type && dev->type->pm)
617 		callback = dev->type->pm->runtime_resume;
618 	else if (dev->class && dev->class->pm)
619 		callback = dev->class->pm->runtime_resume;
620 	else if (dev->bus && dev->bus->pm)
621 		callback = dev->bus->pm->runtime_resume;
622 	else
623 		callback = NULL;
624 
625 	retval = rpm_callback(callback, dev);
626 	if (retval) {
627 		__update_runtime_status(dev, RPM_SUSPENDED);
628 		pm_runtime_cancel_pending(dev);
629 	} else {
630  no_callback:
631 		__update_runtime_status(dev, RPM_ACTIVE);
632 		if (parent)
633 			atomic_inc(&parent->power.child_count);
634 	}
635 	wake_up_all(&dev->power.wait_queue);
636 
637 	if (!retval)
638 		rpm_idle(dev, RPM_ASYNC);
639 
640  out:
641 	if (parent && !dev->power.irq_safe) {
642 		spin_unlock_irq(&dev->power.lock);
643 
644 		pm_runtime_put(parent);
645 
646 		spin_lock_irq(&dev->power.lock);
647 	}
648 
649 	trace_rpm_return_int(dev, _THIS_IP_, retval);
650 
651 	return retval;
652 }
653 
654 /**
655  * pm_runtime_work - Universal runtime PM work function.
656  * @work: Work structure used for scheduling the execution of this function.
657  *
658  * Use @work to get the device object the work is to be done for, determine what
659  * is to be done and execute the appropriate runtime PM function.
660  */
661 static void pm_runtime_work(struct work_struct *work)
662 {
663 	struct device *dev = container_of(work, struct device, power.work);
664 	enum rpm_request req;
665 
666 	spin_lock_irq(&dev->power.lock);
667 
668 	if (!dev->power.request_pending)
669 		goto out;
670 
671 	req = dev->power.request;
672 	dev->power.request = RPM_REQ_NONE;
673 	dev->power.request_pending = false;
674 
675 	switch (req) {
676 	case RPM_REQ_NONE:
677 		break;
678 	case RPM_REQ_IDLE:
679 		rpm_idle(dev, RPM_NOWAIT);
680 		break;
681 	case RPM_REQ_SUSPEND:
682 		rpm_suspend(dev, RPM_NOWAIT);
683 		break;
684 	case RPM_REQ_AUTOSUSPEND:
685 		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
686 		break;
687 	case RPM_REQ_RESUME:
688 		rpm_resume(dev, RPM_NOWAIT);
689 		break;
690 	}
691 
692  out:
693 	spin_unlock_irq(&dev->power.lock);
694 }
695 
696 /**
697  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
698  * @data: Device pointer passed by pm_schedule_suspend().
699  *
700  * Check if the time is right and queue a suspend request.
701  */
702 static void pm_suspend_timer_fn(unsigned long data)
703 {
704 	struct device *dev = (struct device *)data;
705 	unsigned long flags;
706 	unsigned long expires;
707 
708 	spin_lock_irqsave(&dev->power.lock, flags);
709 
710 	expires = dev->power.timer_expires;
711 	/* If 'expire' is after 'jiffies' we've been called too early. */
712 	if (expires > 0 && !time_after(expires, jiffies)) {
713 		dev->power.timer_expires = 0;
714 		rpm_suspend(dev, dev->power.timer_autosuspends ?
715 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
716 	}
717 
718 	spin_unlock_irqrestore(&dev->power.lock, flags);
719 }
720 
721 /**
722  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
723  * @dev: Device to suspend.
724  * @delay: Time to wait before submitting a suspend request, in milliseconds.
725  */
726 int pm_schedule_suspend(struct device *dev, unsigned int delay)
727 {
728 	unsigned long flags;
729 	int retval;
730 
731 	spin_lock_irqsave(&dev->power.lock, flags);
732 
733 	if (!delay) {
734 		retval = rpm_suspend(dev, RPM_ASYNC);
735 		goto out;
736 	}
737 
738 	retval = rpm_check_suspend_allowed(dev);
739 	if (retval)
740 		goto out;
741 
742 	/* Other scheduled or pending requests need to be canceled. */
743 	pm_runtime_cancel_pending(dev);
744 
745 	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
746 	dev->power.timer_expires += !dev->power.timer_expires;
747 	dev->power.timer_autosuspends = 0;
748 	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
749 
750  out:
751 	spin_unlock_irqrestore(&dev->power.lock, flags);
752 
753 	return retval;
754 }
755 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
756 
757 /**
758  * __pm_runtime_idle - Entry point for runtime idle operations.
759  * @dev: Device to send idle notification for.
760  * @rpmflags: Flag bits.
761  *
762  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
763  * return immediately if it is larger than zero.  Then carry out an idle
764  * notification, either synchronous or asynchronous.
765  *
766  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
767  * or if pm_runtime_irq_safe() has been called.
768  */
769 int __pm_runtime_idle(struct device *dev, int rpmflags)
770 {
771 	unsigned long flags;
772 	int retval;
773 
774 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
775 
776 	if (rpmflags & RPM_GET_PUT) {
777 		if (!atomic_dec_and_test(&dev->power.usage_count))
778 			return 0;
779 	}
780 
781 	spin_lock_irqsave(&dev->power.lock, flags);
782 	retval = rpm_idle(dev, rpmflags);
783 	spin_unlock_irqrestore(&dev->power.lock, flags);
784 
785 	return retval;
786 }
787 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
788 
789 /**
790  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
791  * @dev: Device to suspend.
792  * @rpmflags: Flag bits.
793  *
794  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
795  * return immediately if it is larger than zero.  Then carry out a suspend,
796  * either synchronous or asynchronous.
797  *
798  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
799  * or if pm_runtime_irq_safe() has been called.
800  */
801 int __pm_runtime_suspend(struct device *dev, int rpmflags)
802 {
803 	unsigned long flags;
804 	int retval;
805 
806 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
807 
808 	if (rpmflags & RPM_GET_PUT) {
809 		if (!atomic_dec_and_test(&dev->power.usage_count))
810 			return 0;
811 	}
812 
813 	spin_lock_irqsave(&dev->power.lock, flags);
814 	retval = rpm_suspend(dev, rpmflags);
815 	spin_unlock_irqrestore(&dev->power.lock, flags);
816 
817 	return retval;
818 }
819 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
820 
821 /**
822  * __pm_runtime_resume - Entry point for runtime resume operations.
823  * @dev: Device to resume.
824  * @rpmflags: Flag bits.
825  *
826  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
827  * carry out a resume, either synchronous or asynchronous.
828  *
829  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
830  * or if pm_runtime_irq_safe() has been called.
831  */
832 int __pm_runtime_resume(struct device *dev, int rpmflags)
833 {
834 	unsigned long flags;
835 	int retval;
836 
837 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
838 
839 	if (rpmflags & RPM_GET_PUT)
840 		atomic_inc(&dev->power.usage_count);
841 
842 	spin_lock_irqsave(&dev->power.lock, flags);
843 	retval = rpm_resume(dev, rpmflags);
844 	spin_unlock_irqrestore(&dev->power.lock, flags);
845 
846 	return retval;
847 }
848 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
849 
850 /**
851  * __pm_runtime_set_status - Set runtime PM status of a device.
852  * @dev: Device to handle.
853  * @status: New runtime PM status of the device.
854  *
855  * If runtime PM of the device is disabled or its power.runtime_error field is
856  * different from zero, the status may be changed either to RPM_ACTIVE, or to
857  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
858  * However, if the device has a parent and the parent is not active, and the
859  * parent's power.ignore_children flag is unset, the device's status cannot be
860  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
861  *
862  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
863  * and the device parent's counter of unsuspended children is modified to
864  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
865  * notification request for the parent is submitted.
866  */
867 int __pm_runtime_set_status(struct device *dev, unsigned int status)
868 {
869 	struct device *parent = dev->parent;
870 	unsigned long flags;
871 	bool notify_parent = false;
872 	int error = 0;
873 
874 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
875 		return -EINVAL;
876 
877 	spin_lock_irqsave(&dev->power.lock, flags);
878 
879 	if (!dev->power.runtime_error && !dev->power.disable_depth) {
880 		error = -EAGAIN;
881 		goto out;
882 	}
883 
884 	if (dev->power.runtime_status == status)
885 		goto out_set;
886 
887 	if (status == RPM_SUSPENDED) {
888 		/* It always is possible to set the status to 'suspended'. */
889 		if (parent) {
890 			atomic_add_unless(&parent->power.child_count, -1, 0);
891 			notify_parent = !parent->power.ignore_children;
892 		}
893 		goto out_set;
894 	}
895 
896 	if (parent) {
897 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
898 
899 		/*
900 		 * It is invalid to put an active child under a parent that is
901 		 * not active, has runtime PM enabled and the
902 		 * 'power.ignore_children' flag unset.
903 		 */
904 		if (!parent->power.disable_depth
905 		    && !parent->power.ignore_children
906 		    && parent->power.runtime_status != RPM_ACTIVE)
907 			error = -EBUSY;
908 		else if (dev->power.runtime_status == RPM_SUSPENDED)
909 			atomic_inc(&parent->power.child_count);
910 
911 		spin_unlock(&parent->power.lock);
912 
913 		if (error)
914 			goto out;
915 	}
916 
917  out_set:
918 	__update_runtime_status(dev, status);
919 	dev->power.runtime_error = 0;
920  out:
921 	spin_unlock_irqrestore(&dev->power.lock, flags);
922 
923 	if (notify_parent)
924 		pm_request_idle(parent);
925 
926 	return error;
927 }
928 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
929 
930 /**
931  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
932  * @dev: Device to handle.
933  *
934  * Flush all pending requests for the device from pm_wq and wait for all
935  * runtime PM operations involving the device in progress to complete.
936  *
937  * Should be called under dev->power.lock with interrupts disabled.
938  */
939 static void __pm_runtime_barrier(struct device *dev)
940 {
941 	pm_runtime_deactivate_timer(dev);
942 
943 	if (dev->power.request_pending) {
944 		dev->power.request = RPM_REQ_NONE;
945 		spin_unlock_irq(&dev->power.lock);
946 
947 		cancel_work_sync(&dev->power.work);
948 
949 		spin_lock_irq(&dev->power.lock);
950 		dev->power.request_pending = false;
951 	}
952 
953 	if (dev->power.runtime_status == RPM_SUSPENDING
954 	    || dev->power.runtime_status == RPM_RESUMING
955 	    || dev->power.idle_notification) {
956 		DEFINE_WAIT(wait);
957 
958 		/* Suspend, wake-up or idle notification in progress. */
959 		for (;;) {
960 			prepare_to_wait(&dev->power.wait_queue, &wait,
961 					TASK_UNINTERRUPTIBLE);
962 			if (dev->power.runtime_status != RPM_SUSPENDING
963 			    && dev->power.runtime_status != RPM_RESUMING
964 			    && !dev->power.idle_notification)
965 				break;
966 			spin_unlock_irq(&dev->power.lock);
967 
968 			schedule();
969 
970 			spin_lock_irq(&dev->power.lock);
971 		}
972 		finish_wait(&dev->power.wait_queue, &wait);
973 	}
974 }
975 
976 /**
977  * pm_runtime_barrier - Flush pending requests and wait for completions.
978  * @dev: Device to handle.
979  *
980  * Prevent the device from being suspended by incrementing its usage counter and
981  * if there's a pending resume request for the device, wake the device up.
982  * Next, make sure that all pending requests for the device have been flushed
983  * from pm_wq and wait for all runtime PM operations involving the device in
984  * progress to complete.
985  *
986  * Return value:
987  * 1, if there was a resume request pending and the device had to be woken up,
988  * 0, otherwise
989  */
990 int pm_runtime_barrier(struct device *dev)
991 {
992 	int retval = 0;
993 
994 	pm_runtime_get_noresume(dev);
995 	spin_lock_irq(&dev->power.lock);
996 
997 	if (dev->power.request_pending
998 	    && dev->power.request == RPM_REQ_RESUME) {
999 		rpm_resume(dev, 0);
1000 		retval = 1;
1001 	}
1002 
1003 	__pm_runtime_barrier(dev);
1004 
1005 	spin_unlock_irq(&dev->power.lock);
1006 	pm_runtime_put_noidle(dev);
1007 
1008 	return retval;
1009 }
1010 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1011 
1012 /**
1013  * __pm_runtime_disable - Disable runtime PM of a device.
1014  * @dev: Device to handle.
1015  * @check_resume: If set, check if there's a resume request for the device.
1016  *
1017  * Increment power.disable_depth for the device and if was zero previously,
1018  * cancel all pending runtime PM requests for the device and wait for all
1019  * operations in progress to complete.  The device can be either active or
1020  * suspended after its runtime PM has been disabled.
1021  *
1022  * If @check_resume is set and there's a resume request pending when
1023  * __pm_runtime_disable() is called and power.disable_depth is zero, the
1024  * function will wake up the device before disabling its runtime PM.
1025  */
1026 void __pm_runtime_disable(struct device *dev, bool check_resume)
1027 {
1028 	spin_lock_irq(&dev->power.lock);
1029 
1030 	if (dev->power.disable_depth > 0) {
1031 		dev->power.disable_depth++;
1032 		goto out;
1033 	}
1034 
1035 	/*
1036 	 * Wake up the device if there's a resume request pending, because that
1037 	 * means there probably is some I/O to process and disabling runtime PM
1038 	 * shouldn't prevent the device from processing the I/O.
1039 	 */
1040 	if (check_resume && dev->power.request_pending
1041 	    && dev->power.request == RPM_REQ_RESUME) {
1042 		/*
1043 		 * Prevent suspends and idle notifications from being carried
1044 		 * out after we have woken up the device.
1045 		 */
1046 		pm_runtime_get_noresume(dev);
1047 
1048 		rpm_resume(dev, 0);
1049 
1050 		pm_runtime_put_noidle(dev);
1051 	}
1052 
1053 	if (!dev->power.disable_depth++)
1054 		__pm_runtime_barrier(dev);
1055 
1056  out:
1057 	spin_unlock_irq(&dev->power.lock);
1058 }
1059 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1060 
1061 /**
1062  * pm_runtime_enable - Enable runtime PM of a device.
1063  * @dev: Device to handle.
1064  */
1065 void pm_runtime_enable(struct device *dev)
1066 {
1067 	unsigned long flags;
1068 
1069 	spin_lock_irqsave(&dev->power.lock, flags);
1070 
1071 	if (dev->power.disable_depth > 0)
1072 		dev->power.disable_depth--;
1073 	else
1074 		dev_warn(dev, "Unbalanced %s!\n", __func__);
1075 
1076 	spin_unlock_irqrestore(&dev->power.lock, flags);
1077 }
1078 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1079 
1080 /**
1081  * pm_runtime_forbid - Block runtime PM of a device.
1082  * @dev: Device to handle.
1083  *
1084  * Increase the device's usage count and clear its power.runtime_auto flag,
1085  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1086  * for it.
1087  */
1088 void pm_runtime_forbid(struct device *dev)
1089 {
1090 	spin_lock_irq(&dev->power.lock);
1091 	if (!dev->power.runtime_auto)
1092 		goto out;
1093 
1094 	dev->power.runtime_auto = false;
1095 	atomic_inc(&dev->power.usage_count);
1096 	rpm_resume(dev, 0);
1097 
1098  out:
1099 	spin_unlock_irq(&dev->power.lock);
1100 }
1101 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1102 
1103 /**
1104  * pm_runtime_allow - Unblock runtime PM of a device.
1105  * @dev: Device to handle.
1106  *
1107  * Decrease the device's usage count and set its power.runtime_auto flag.
1108  */
1109 void pm_runtime_allow(struct device *dev)
1110 {
1111 	spin_lock_irq(&dev->power.lock);
1112 	if (dev->power.runtime_auto)
1113 		goto out;
1114 
1115 	dev->power.runtime_auto = true;
1116 	if (atomic_dec_and_test(&dev->power.usage_count))
1117 		rpm_idle(dev, RPM_AUTO);
1118 
1119  out:
1120 	spin_unlock_irq(&dev->power.lock);
1121 }
1122 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1123 
1124 /**
1125  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1126  * @dev: Device to handle.
1127  *
1128  * Set the power.no_callbacks flag, which tells the PM core that this
1129  * device is power-managed through its parent and has no runtime PM
1130  * callbacks of its own.  The runtime sysfs attributes will be removed.
1131  */
1132 void pm_runtime_no_callbacks(struct device *dev)
1133 {
1134 	spin_lock_irq(&dev->power.lock);
1135 	dev->power.no_callbacks = 1;
1136 	spin_unlock_irq(&dev->power.lock);
1137 	if (device_is_registered(dev))
1138 		rpm_sysfs_remove(dev);
1139 }
1140 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1141 
1142 /**
1143  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1144  * @dev: Device to handle
1145  *
1146  * Set the power.irq_safe flag, which tells the PM core that the
1147  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1148  * always be invoked with the spinlock held and interrupts disabled.  It also
1149  * causes the parent's usage counter to be permanently incremented, preventing
1150  * the parent from runtime suspending -- otherwise an irq-safe child might have
1151  * to wait for a non-irq-safe parent.
1152  */
1153 void pm_runtime_irq_safe(struct device *dev)
1154 {
1155 	if (dev->parent)
1156 		pm_runtime_get_sync(dev->parent);
1157 	spin_lock_irq(&dev->power.lock);
1158 	dev->power.irq_safe = 1;
1159 	spin_unlock_irq(&dev->power.lock);
1160 }
1161 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1162 
1163 /**
1164  * update_autosuspend - Handle a change to a device's autosuspend settings.
1165  * @dev: Device to handle.
1166  * @old_delay: The former autosuspend_delay value.
1167  * @old_use: The former use_autosuspend value.
1168  *
1169  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1170  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1171  *
1172  * This function must be called under dev->power.lock with interrupts disabled.
1173  */
1174 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1175 {
1176 	int delay = dev->power.autosuspend_delay;
1177 
1178 	/* Should runtime suspend be prevented now? */
1179 	if (dev->power.use_autosuspend && delay < 0) {
1180 
1181 		/* If it used to be allowed then prevent it. */
1182 		if (!old_use || old_delay >= 0) {
1183 			atomic_inc(&dev->power.usage_count);
1184 			rpm_resume(dev, 0);
1185 		}
1186 	}
1187 
1188 	/* Runtime suspend should be allowed now. */
1189 	else {
1190 
1191 		/* If it used to be prevented then allow it. */
1192 		if (old_use && old_delay < 0)
1193 			atomic_dec(&dev->power.usage_count);
1194 
1195 		/* Maybe we can autosuspend now. */
1196 		rpm_idle(dev, RPM_AUTO);
1197 	}
1198 }
1199 
1200 /**
1201  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1202  * @dev: Device to handle.
1203  * @delay: Value of the new delay in milliseconds.
1204  *
1205  * Set the device's power.autosuspend_delay value.  If it changes to negative
1206  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1207  * changes the other way, allow runtime suspends.
1208  */
1209 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1210 {
1211 	int old_delay, old_use;
1212 
1213 	spin_lock_irq(&dev->power.lock);
1214 	old_delay = dev->power.autosuspend_delay;
1215 	old_use = dev->power.use_autosuspend;
1216 	dev->power.autosuspend_delay = delay;
1217 	update_autosuspend(dev, old_delay, old_use);
1218 	spin_unlock_irq(&dev->power.lock);
1219 }
1220 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1221 
1222 /**
1223  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1224  * @dev: Device to handle.
1225  * @use: New value for use_autosuspend.
1226  *
1227  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1228  * suspends as needed.
1229  */
1230 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1231 {
1232 	int old_delay, old_use;
1233 
1234 	spin_lock_irq(&dev->power.lock);
1235 	old_delay = dev->power.autosuspend_delay;
1236 	old_use = dev->power.use_autosuspend;
1237 	dev->power.use_autosuspend = use;
1238 	update_autosuspend(dev, old_delay, old_use);
1239 	spin_unlock_irq(&dev->power.lock);
1240 }
1241 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1242 
1243 /**
1244  * pm_runtime_init - Initialize runtime PM fields in given device object.
1245  * @dev: Device object to initialize.
1246  */
1247 void pm_runtime_init(struct device *dev)
1248 {
1249 	dev->power.runtime_status = RPM_SUSPENDED;
1250 	dev->power.idle_notification = false;
1251 
1252 	dev->power.disable_depth = 1;
1253 	atomic_set(&dev->power.usage_count, 0);
1254 
1255 	dev->power.runtime_error = 0;
1256 
1257 	atomic_set(&dev->power.child_count, 0);
1258 	pm_suspend_ignore_children(dev, false);
1259 	dev->power.runtime_auto = true;
1260 
1261 	dev->power.request_pending = false;
1262 	dev->power.request = RPM_REQ_NONE;
1263 	dev->power.deferred_resume = false;
1264 	dev->power.accounting_timestamp = jiffies;
1265 	INIT_WORK(&dev->power.work, pm_runtime_work);
1266 
1267 	dev->power.timer_expires = 0;
1268 	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1269 			(unsigned long)dev);
1270 
1271 	init_waitqueue_head(&dev->power.wait_queue);
1272 }
1273 
1274 /**
1275  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1276  * @dev: Device object being removed from device hierarchy.
1277  */
1278 void pm_runtime_remove(struct device *dev)
1279 {
1280 	__pm_runtime_disable(dev, false);
1281 
1282 	/* Change the status back to 'suspended' to match the initial status. */
1283 	if (dev->power.runtime_status == RPM_ACTIVE)
1284 		pm_runtime_set_suspended(dev);
1285 	if (dev->power.irq_safe && dev->parent)
1286 		pm_runtime_put_sync(dev->parent);
1287 }
1288