xref: /linux/drivers/base/power/runtime.c (revision abdf766d149c51fb256118f73be947d7a82f702e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/runtime.c - Helper functions for device runtime PM
4  *
5  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7  */
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <linux/rculist.h>
15 #include <trace/events/rpm.h>
16 
17 #include "../base.h"
18 #include "power.h"
19 
20 typedef int (*pm_callback_t)(struct device *);
21 
get_callback_ptr(const void * start,size_t offset)22 static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
23 {
24 	return *(pm_callback_t *)(start + offset);
25 }
26 
__rpm_get_driver_callback(struct device * dev,size_t cb_offset)27 static pm_callback_t __rpm_get_driver_callback(struct device *dev,
28 					       size_t cb_offset)
29 {
30 	if (dev->driver && dev->driver->pm)
31 		return get_callback_ptr(dev->driver->pm, cb_offset);
32 
33 	return NULL;
34 }
35 
__rpm_get_callback(struct device * dev,size_t cb_offset)36 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
37 {
38 	const struct dev_pm_ops *ops;
39 	pm_callback_t cb = NULL;
40 
41 	if (dev->pm_domain)
42 		ops = &dev->pm_domain->ops;
43 	else if (dev->type && dev->type->pm)
44 		ops = dev->type->pm;
45 	else if (dev->class && dev->class->pm)
46 		ops = dev->class->pm;
47 	else if (dev->bus && dev->bus->pm)
48 		ops = dev->bus->pm;
49 	else
50 		ops = NULL;
51 
52 	if (ops)
53 		cb = get_callback_ptr(ops, cb_offset);
54 
55 	if (!cb)
56 		cb = __rpm_get_driver_callback(dev, cb_offset);
57 
58 	return cb;
59 }
60 
61 #define RPM_GET_CALLBACK(dev, callback) \
62 		__rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
63 
64 static int rpm_resume(struct device *dev, int rpmflags);
65 static int rpm_suspend(struct device *dev, int rpmflags);
66 
67 /**
68  * update_pm_runtime_accounting - Update the time accounting of power states
69  * @dev: Device to update the accounting for
70  *
71  * In order to be able to have time accounting of the various power states
72  * (as used by programs such as PowerTOP to show the effectiveness of runtime
73  * PM), we need to track the time spent in each state.
74  * update_pm_runtime_accounting must be called each time before the
75  * runtime_status field is updated, to account the time in the old state
76  * correctly.
77  */
update_pm_runtime_accounting(struct device * dev)78 static void update_pm_runtime_accounting(struct device *dev)
79 {
80 	u64 now, last, delta;
81 
82 	if (dev->power.disable_depth > 0)
83 		return;
84 
85 	last = dev->power.accounting_timestamp;
86 
87 	now = ktime_get_mono_fast_ns();
88 	dev->power.accounting_timestamp = now;
89 
90 	/*
91 	 * Because ktime_get_mono_fast_ns() is not monotonic during
92 	 * timekeeping updates, ensure that 'now' is after the last saved
93 	 * timesptamp.
94 	 */
95 	if (now < last)
96 		return;
97 
98 	delta = now - last;
99 
100 	if (dev->power.runtime_status == RPM_SUSPENDED)
101 		dev->power.suspended_time += delta;
102 	else
103 		dev->power.active_time += delta;
104 }
105 
__update_runtime_status(struct device * dev,enum rpm_status status)106 static void __update_runtime_status(struct device *dev, enum rpm_status status)
107 {
108 	update_pm_runtime_accounting(dev);
109 	trace_rpm_status(dev, status);
110 	dev->power.runtime_status = status;
111 }
112 
rpm_get_accounted_time(struct device * dev,bool suspended)113 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
114 {
115 	u64 time;
116 	unsigned long flags;
117 
118 	spin_lock_irqsave(&dev->power.lock, flags);
119 
120 	update_pm_runtime_accounting(dev);
121 	time = suspended ? dev->power.suspended_time : dev->power.active_time;
122 
123 	spin_unlock_irqrestore(&dev->power.lock, flags);
124 
125 	return time;
126 }
127 
pm_runtime_active_time(struct device * dev)128 u64 pm_runtime_active_time(struct device *dev)
129 {
130 	return rpm_get_accounted_time(dev, false);
131 }
132 
pm_runtime_suspended_time(struct device * dev)133 u64 pm_runtime_suspended_time(struct device *dev)
134 {
135 	return rpm_get_accounted_time(dev, true);
136 }
137 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
138 
139 /**
140  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
141  * @dev: Device to handle.
142  */
pm_runtime_deactivate_timer(struct device * dev)143 static void pm_runtime_deactivate_timer(struct device *dev)
144 {
145 	if (dev->power.timer_expires > 0) {
146 		hrtimer_try_to_cancel(&dev->power.suspend_timer);
147 		dev->power.timer_expires = 0;
148 	}
149 }
150 
151 /**
152  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
153  * @dev: Device to handle.
154  */
pm_runtime_cancel_pending(struct device * dev)155 static void pm_runtime_cancel_pending(struct device *dev)
156 {
157 	pm_runtime_deactivate_timer(dev);
158 	/*
159 	 * In case there's a request pending, make sure its work function will
160 	 * return without doing anything.
161 	 */
162 	dev->power.request = RPM_REQ_NONE;
163 }
164 
165 /*
166  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
167  * @dev: Device to handle.
168  *
169  * Compute the autosuspend-delay expiration time based on the device's
170  * power.last_busy time.  If the delay has already expired or is disabled
171  * (negative) or the power.use_autosuspend flag isn't set, return 0.
172  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
173  *
174  * This function may be called either with or without dev->power.lock held.
175  * Either way it can be racy, since power.last_busy may be updated at any time.
176  */
pm_runtime_autosuspend_expiration(struct device * dev)177 u64 pm_runtime_autosuspend_expiration(struct device *dev)
178 {
179 	int autosuspend_delay;
180 	u64 expires;
181 
182 	if (!dev->power.use_autosuspend)
183 		return 0;
184 
185 	autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
186 	if (autosuspend_delay < 0)
187 		return 0;
188 
189 	expires  = READ_ONCE(dev->power.last_busy);
190 	expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
191 	if (expires > ktime_get_mono_fast_ns())
192 		return expires;	/* Expires in the future */
193 
194 	return 0;
195 }
196 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
197 
dev_memalloc_noio(struct device * dev,void * data)198 static int dev_memalloc_noio(struct device *dev, void *data)
199 {
200 	return dev->power.memalloc_noio;
201 }
202 
203 /*
204  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
205  * @dev: Device to handle.
206  * @enable: True for setting the flag and False for clearing the flag.
207  *
208  * Set the flag for all devices in the path from the device to the
209  * root device in the device tree if @enable is true, otherwise clear
210  * the flag for devices in the path whose siblings don't set the flag.
211  *
212  * The function should only be called by block device, or network
213  * device driver for solving the deadlock problem during runtime
214  * resume/suspend:
215  *
216  *     If memory allocation with GFP_KERNEL is called inside runtime
217  *     resume/suspend callback of any one of its ancestors(or the
218  *     block device itself), the deadlock may be triggered inside the
219  *     memory allocation since it might not complete until the block
220  *     device becomes active and the involed page I/O finishes. The
221  *     situation is pointed out first by Alan Stern. Network device
222  *     are involved in iSCSI kind of situation.
223  *
224  * The lock of dev_hotplug_mutex is held in the function for handling
225  * hotplug race because pm_runtime_set_memalloc_noio() may be called
226  * in async probe().
227  *
228  * The function should be called between device_add() and device_del()
229  * on the affected device(block/network device).
230  */
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)231 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
232 {
233 	static DEFINE_MUTEX(dev_hotplug_mutex);
234 
235 	mutex_lock(&dev_hotplug_mutex);
236 	for (;;) {
237 		bool enabled;
238 
239 		/* hold power lock since bitfield is not SMP-safe. */
240 		spin_lock_irq(&dev->power.lock);
241 		enabled = dev->power.memalloc_noio;
242 		dev->power.memalloc_noio = enable;
243 		spin_unlock_irq(&dev->power.lock);
244 
245 		/*
246 		 * not need to enable ancestors any more if the device
247 		 * has been enabled.
248 		 */
249 		if (enabled && enable)
250 			break;
251 
252 		dev = dev->parent;
253 
254 		/*
255 		 * clear flag of the parent device only if all the
256 		 * children don't set the flag because ancestor's
257 		 * flag was set by any one of the descendants.
258 		 */
259 		if (!dev || (!enable &&
260 		    device_for_each_child(dev, NULL, dev_memalloc_noio)))
261 			break;
262 	}
263 	mutex_unlock(&dev_hotplug_mutex);
264 }
265 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
266 
267 /**
268  * rpm_check_suspend_allowed - Test whether a device may be suspended.
269  * @dev: Device to test.
270  */
rpm_check_suspend_allowed(struct device * dev)271 static int rpm_check_suspend_allowed(struct device *dev)
272 {
273 	int retval = 0;
274 
275 	if (dev->power.runtime_error)
276 		retval = -EINVAL;
277 	else if (dev->power.disable_depth > 0)
278 		retval = -EACCES;
279 	else if (atomic_read(&dev->power.usage_count))
280 		retval = -EAGAIN;
281 	else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
282 		retval = -EBUSY;
283 
284 	/* Pending resume requests take precedence over suspends. */
285 	else if ((dev->power.deferred_resume &&
286 	    dev->power.runtime_status == RPM_SUSPENDING) ||
287 	    (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
288 		retval = -EAGAIN;
289 	else if (__dev_pm_qos_resume_latency(dev) == 0)
290 		retval = -EPERM;
291 	else if (dev->power.runtime_status == RPM_SUSPENDED)
292 		retval = 1;
293 
294 	return retval;
295 }
296 
rpm_get_suppliers(struct device * dev)297 static int rpm_get_suppliers(struct device *dev)
298 {
299 	struct device_link *link;
300 
301 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
302 				device_links_read_lock_held()) {
303 		int retval;
304 
305 		if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
306 			continue;
307 
308 		retval = pm_runtime_get_sync(link->supplier);
309 		/* Ignore suppliers with disabled runtime PM. */
310 		if (retval < 0 && retval != -EACCES) {
311 			pm_runtime_put_noidle(link->supplier);
312 			return retval;
313 		}
314 		refcount_inc(&link->rpm_active);
315 	}
316 	return 0;
317 }
318 
319 /**
320  * pm_runtime_release_supplier - Drop references to device link's supplier.
321  * @link: Target device link.
322  *
323  * Drop all runtime PM references associated with @link to its supplier device.
324  */
pm_runtime_release_supplier(struct device_link * link)325 void pm_runtime_release_supplier(struct device_link *link)
326 {
327 	struct device *supplier = link->supplier;
328 
329 	/*
330 	 * The additional power.usage_count check is a safety net in case
331 	 * the rpm_active refcount becomes saturated, in which case
332 	 * refcount_dec_not_one() would return true forever, but it is not
333 	 * strictly necessary.
334 	 */
335 	while (refcount_dec_not_one(&link->rpm_active) &&
336 	       atomic_read(&supplier->power.usage_count) > 0)
337 		pm_runtime_put_noidle(supplier);
338 }
339 
__rpm_put_suppliers(struct device * dev,bool try_to_suspend)340 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
341 {
342 	struct device_link *link;
343 
344 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
345 				device_links_read_lock_held()) {
346 		pm_runtime_release_supplier(link);
347 		if (try_to_suspend)
348 			pm_request_idle(link->supplier);
349 	}
350 }
351 
rpm_put_suppliers(struct device * dev)352 static void rpm_put_suppliers(struct device *dev)
353 {
354 	__rpm_put_suppliers(dev, true);
355 }
356 
rpm_suspend_suppliers(struct device * dev)357 static void rpm_suspend_suppliers(struct device *dev)
358 {
359 	struct device_link *link;
360 	int idx = device_links_read_lock();
361 
362 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
363 				device_links_read_lock_held())
364 		pm_request_idle(link->supplier);
365 
366 	device_links_read_unlock(idx);
367 }
368 
369 /**
370  * __rpm_callback - Run a given runtime PM callback for a given device.
371  * @cb: Runtime PM callback to run.
372  * @dev: Device to run the callback for.
373  */
__rpm_callback(int (* cb)(struct device *),struct device * dev)374 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
375 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
376 {
377 	int retval = 0, idx;
378 	bool use_links = dev->power.links_count > 0;
379 
380 	if (dev->power.irq_safe) {
381 		spin_unlock(&dev->power.lock);
382 	} else {
383 		spin_unlock_irq(&dev->power.lock);
384 
385 		/*
386 		 * Resume suppliers if necessary.
387 		 *
388 		 * The device's runtime PM status cannot change until this
389 		 * routine returns, so it is safe to read the status outside of
390 		 * the lock.
391 		 */
392 		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
393 			idx = device_links_read_lock();
394 
395 			retval = rpm_get_suppliers(dev);
396 			if (retval) {
397 				rpm_put_suppliers(dev);
398 				goto fail;
399 			}
400 
401 			device_links_read_unlock(idx);
402 		}
403 	}
404 
405 	if (cb)
406 		retval = cb(dev);
407 
408 	if (dev->power.irq_safe) {
409 		spin_lock(&dev->power.lock);
410 	} else {
411 		/*
412 		 * If the device is suspending and the callback has returned
413 		 * success, drop the usage counters of the suppliers that have
414 		 * been reference counted on its resume.
415 		 *
416 		 * Do that if resume fails too.
417 		 */
418 		if (use_links &&
419 		    ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
420 		    (dev->power.runtime_status == RPM_RESUMING && retval))) {
421 			idx = device_links_read_lock();
422 
423 			__rpm_put_suppliers(dev, false);
424 
425 fail:
426 			device_links_read_unlock(idx);
427 		}
428 
429 		spin_lock_irq(&dev->power.lock);
430 	}
431 
432 	return retval;
433 }
434 
435 /**
436  * rpm_callback - Run a given runtime PM callback for a given device.
437  * @cb: Runtime PM callback to run.
438  * @dev: Device to run the callback for.
439  */
rpm_callback(int (* cb)(struct device *),struct device * dev)440 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
441 {
442 	int retval;
443 
444 	if (dev->power.memalloc_noio) {
445 		unsigned int noio_flag;
446 
447 		/*
448 		 * Deadlock might be caused if memory allocation with
449 		 * GFP_KERNEL happens inside runtime_suspend and
450 		 * runtime_resume callbacks of one block device's
451 		 * ancestor or the block device itself. Network
452 		 * device might be thought as part of iSCSI block
453 		 * device, so network device and its ancestor should
454 		 * be marked as memalloc_noio too.
455 		 */
456 		noio_flag = memalloc_noio_save();
457 		retval = __rpm_callback(cb, dev);
458 		memalloc_noio_restore(noio_flag);
459 	} else {
460 		retval = __rpm_callback(cb, dev);
461 	}
462 
463 	/*
464 	 * Since -EACCES means that runtime PM is disabled for the given device,
465 	 * it should not be returned by runtime PM callbacks.  If it is returned
466 	 * nevertheless, assume it to be a transient error and convert it to
467 	 * -EAGAIN.
468 	 */
469 	if (retval == -EACCES)
470 		retval = -EAGAIN;
471 
472 	if (retval != -EAGAIN && retval != -EBUSY)
473 		dev->power.runtime_error = retval;
474 
475 	return retval;
476 }
477 
478 /**
479  * rpm_idle - Notify device bus type if the device can be suspended.
480  * @dev: Device to notify the bus type about.
481  * @rpmflags: Flag bits.
482  *
483  * Check if the device's runtime PM status allows it to be suspended.  If
484  * another idle notification has been started earlier, return immediately.  If
485  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
486  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
487  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
488  *
489  * This function must be called under dev->power.lock with interrupts disabled.
490  */
rpm_idle(struct device * dev,int rpmflags)491 static int rpm_idle(struct device *dev, int rpmflags)
492 {
493 	int (*callback)(struct device *);
494 	int retval;
495 
496 	trace_rpm_idle(dev, rpmflags);
497 	retval = rpm_check_suspend_allowed(dev);
498 	if (retval < 0)
499 		;	/* Conditions are wrong. */
500 
501 	else if ((rpmflags & RPM_GET_PUT) && retval == 1)
502 		;	/* put() is allowed in RPM_SUSPENDED */
503 
504 	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
505 	else if (dev->power.runtime_status != RPM_ACTIVE)
506 		retval = -EAGAIN;
507 
508 	/*
509 	 * Any pending request other than an idle notification takes
510 	 * precedence over us, except that the timer may be running.
511 	 */
512 	else if (dev->power.request_pending &&
513 	    dev->power.request > RPM_REQ_IDLE)
514 		retval = -EAGAIN;
515 
516 	/* Act as though RPM_NOWAIT is always set. */
517 	else if (dev->power.idle_notification)
518 		retval = -EINPROGRESS;
519 
520 	if (retval)
521 		goto out;
522 
523 	/* Pending requests need to be canceled. */
524 	dev->power.request = RPM_REQ_NONE;
525 
526 	callback = RPM_GET_CALLBACK(dev, runtime_idle);
527 
528 	/* If no callback assume success. */
529 	if (!callback || dev->power.no_callbacks)
530 		goto out;
531 
532 	/* Carry out an asynchronous or a synchronous idle notification. */
533 	if (rpmflags & RPM_ASYNC) {
534 		dev->power.request = RPM_REQ_IDLE;
535 		if (!dev->power.request_pending) {
536 			dev->power.request_pending = true;
537 			queue_work(pm_wq, &dev->power.work);
538 		}
539 		trace_rpm_return_int(dev, _THIS_IP_, 0);
540 		return 0;
541 	}
542 
543 	dev->power.idle_notification = true;
544 
545 	if (dev->power.irq_safe)
546 		spin_unlock(&dev->power.lock);
547 	else
548 		spin_unlock_irq(&dev->power.lock);
549 
550 	retval = callback(dev);
551 
552 	if (dev->power.irq_safe)
553 		spin_lock(&dev->power.lock);
554 	else
555 		spin_lock_irq(&dev->power.lock);
556 
557 	dev->power.idle_notification = false;
558 	wake_up_all(&dev->power.wait_queue);
559 
560  out:
561 	trace_rpm_return_int(dev, _THIS_IP_, retval);
562 	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
563 }
564 
565 /**
566  * rpm_suspend - Carry out runtime suspend of given device.
567  * @dev: Device to suspend.
568  * @rpmflags: Flag bits.
569  *
570  * Check if the device's runtime PM status allows it to be suspended.
571  * Cancel a pending idle notification, autosuspend or suspend. If
572  * another suspend has been started earlier, either return immediately
573  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
574  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
575  * otherwise run the ->runtime_suspend() callback directly. When
576  * ->runtime_suspend succeeded, if a deferred resume was requested while
577  * the callback was running then carry it out, otherwise send an idle
578  * notification for its parent (if the suspend succeeded and both
579  * ignore_children of parent->power and irq_safe of dev->power are not set).
580  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
581  * flag is set and the next autosuspend-delay expiration time is in the
582  * future, schedule another autosuspend attempt.
583  *
584  * This function must be called under dev->power.lock with interrupts disabled.
585  */
rpm_suspend(struct device * dev,int rpmflags)586 static int rpm_suspend(struct device *dev, int rpmflags)
587 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
588 {
589 	int (*callback)(struct device *);
590 	struct device *parent = NULL;
591 	int retval;
592 
593 	trace_rpm_suspend(dev, rpmflags);
594 
595  repeat:
596 	retval = rpm_check_suspend_allowed(dev);
597 	if (retval < 0)
598 		goto out;	/* Conditions are wrong. */
599 
600 	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
601 	if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
602 		retval = -EAGAIN;
603 
604 	if (retval)
605 		goto out;
606 
607 	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
608 	if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
609 		u64 expires = pm_runtime_autosuspend_expiration(dev);
610 
611 		if (expires != 0) {
612 			/* Pending requests need to be canceled. */
613 			dev->power.request = RPM_REQ_NONE;
614 
615 			/*
616 			 * Optimization: If the timer is already running and is
617 			 * set to expire at or before the autosuspend delay,
618 			 * avoid the overhead of resetting it.  Just let it
619 			 * expire; pm_suspend_timer_fn() will take care of the
620 			 * rest.
621 			 */
622 			if (!(dev->power.timer_expires &&
623 			    dev->power.timer_expires <= expires)) {
624 				/*
625 				 * We add a slack of 25% to gather wakeups
626 				 * without sacrificing the granularity.
627 				 */
628 				u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
629 						    (NSEC_PER_MSEC >> 2);
630 
631 				dev->power.timer_expires = expires;
632 				hrtimer_start_range_ns(&dev->power.suspend_timer,
633 						       ns_to_ktime(expires),
634 						       slack,
635 						       HRTIMER_MODE_ABS);
636 			}
637 			dev->power.timer_autosuspends = 1;
638 			goto out;
639 		}
640 	}
641 
642 	/* Other scheduled or pending requests need to be canceled. */
643 	pm_runtime_cancel_pending(dev);
644 
645 	if (dev->power.runtime_status == RPM_SUSPENDING) {
646 		DEFINE_WAIT(wait);
647 
648 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
649 			retval = -EINPROGRESS;
650 			goto out;
651 		}
652 
653 		if (dev->power.irq_safe) {
654 			spin_unlock(&dev->power.lock);
655 
656 			cpu_relax();
657 
658 			spin_lock(&dev->power.lock);
659 			goto repeat;
660 		}
661 
662 		/* Wait for the other suspend running in parallel with us. */
663 		for (;;) {
664 			prepare_to_wait(&dev->power.wait_queue, &wait,
665 					TASK_UNINTERRUPTIBLE);
666 			if (dev->power.runtime_status != RPM_SUSPENDING)
667 				break;
668 
669 			spin_unlock_irq(&dev->power.lock);
670 
671 			schedule();
672 
673 			spin_lock_irq(&dev->power.lock);
674 		}
675 		finish_wait(&dev->power.wait_queue, &wait);
676 		goto repeat;
677 	}
678 
679 	if (dev->power.no_callbacks)
680 		goto no_callback;	/* Assume success. */
681 
682 	/* Carry out an asynchronous or a synchronous suspend. */
683 	if (rpmflags & RPM_ASYNC) {
684 		dev->power.request = (rpmflags & RPM_AUTO) ?
685 		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
686 		if (!dev->power.request_pending) {
687 			dev->power.request_pending = true;
688 			queue_work(pm_wq, &dev->power.work);
689 		}
690 		goto out;
691 	}
692 
693 	__update_runtime_status(dev, RPM_SUSPENDING);
694 
695 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
696 
697 	dev_pm_enable_wake_irq_check(dev, true);
698 	retval = rpm_callback(callback, dev);
699 	if (retval)
700 		goto fail;
701 
702 	dev_pm_enable_wake_irq_complete(dev);
703 
704  no_callback:
705 	__update_runtime_status(dev, RPM_SUSPENDED);
706 	pm_runtime_deactivate_timer(dev);
707 
708 	if (dev->parent) {
709 		parent = dev->parent;
710 		atomic_add_unless(&parent->power.child_count, -1, 0);
711 	}
712 	wake_up_all(&dev->power.wait_queue);
713 
714 	if (dev->power.deferred_resume) {
715 		dev->power.deferred_resume = false;
716 		rpm_resume(dev, 0);
717 		retval = -EAGAIN;
718 		goto out;
719 	}
720 
721 	if (dev->power.irq_safe)
722 		goto out;
723 
724 	/* Maybe the parent is now able to suspend. */
725 	if (parent && !parent->power.ignore_children) {
726 		spin_unlock(&dev->power.lock);
727 
728 		spin_lock(&parent->power.lock);
729 		rpm_idle(parent, RPM_ASYNC);
730 		spin_unlock(&parent->power.lock);
731 
732 		spin_lock(&dev->power.lock);
733 	}
734 	/* Maybe the suppliers are now able to suspend. */
735 	if (dev->power.links_count > 0) {
736 		spin_unlock_irq(&dev->power.lock);
737 
738 		rpm_suspend_suppliers(dev);
739 
740 		spin_lock_irq(&dev->power.lock);
741 	}
742 
743  out:
744 	trace_rpm_return_int(dev, _THIS_IP_, retval);
745 
746 	return retval;
747 
748  fail:
749 	dev_pm_disable_wake_irq_check(dev, true);
750 	__update_runtime_status(dev, RPM_ACTIVE);
751 	dev->power.deferred_resume = false;
752 	wake_up_all(&dev->power.wait_queue);
753 
754 	/*
755 	 * On transient errors, if the callback routine failed an autosuspend,
756 	 * and if the last_busy time has been updated so that there is a new
757 	 * autosuspend expiration time, automatically reschedule another
758 	 * autosuspend.
759 	 */
760 	if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
761 	    pm_runtime_autosuspend_expiration(dev) != 0)
762 		goto repeat;
763 
764 	pm_runtime_cancel_pending(dev);
765 
766 	goto out;
767 }
768 
769 /**
770  * rpm_resume - Carry out runtime resume of given device.
771  * @dev: Device to resume.
772  * @rpmflags: Flag bits.
773  *
774  * Check if the device's runtime PM status allows it to be resumed.  Cancel
775  * any scheduled or pending requests.  If another resume has been started
776  * earlier, either return immediately or wait for it to finish, depending on the
777  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
778  * parallel with this function, either tell the other process to resume after
779  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
780  * flag is set then queue a resume request; otherwise run the
781  * ->runtime_resume() callback directly.  Queue an idle notification for the
782  * device if the resume succeeded.
783  *
784  * This function must be called under dev->power.lock with interrupts disabled.
785  */
rpm_resume(struct device * dev,int rpmflags)786 static int rpm_resume(struct device *dev, int rpmflags)
787 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
788 {
789 	int (*callback)(struct device *);
790 	struct device *parent = NULL;
791 	int retval = 0;
792 
793 	trace_rpm_resume(dev, rpmflags);
794 
795  repeat:
796 	if (dev->power.runtime_error) {
797 		retval = -EINVAL;
798 	} else if (dev->power.disable_depth > 0) {
799 		if (dev->power.runtime_status == RPM_ACTIVE &&
800 		    dev->power.last_status == RPM_ACTIVE)
801 			retval = 1;
802 		else if (rpmflags & RPM_TRANSPARENT)
803 			goto out;
804 		else
805 			retval = -EACCES;
806 	}
807 	if (retval)
808 		goto out;
809 
810 	/*
811 	 * Other scheduled or pending requests need to be canceled.  Small
812 	 * optimization: If an autosuspend timer is running, leave it running
813 	 * rather than cancelling it now only to restart it again in the near
814 	 * future.
815 	 */
816 	dev->power.request = RPM_REQ_NONE;
817 	if (!dev->power.timer_autosuspends)
818 		pm_runtime_deactivate_timer(dev);
819 
820 	if (dev->power.runtime_status == RPM_ACTIVE) {
821 		retval = 1;
822 		goto out;
823 	}
824 
825 	if (dev->power.runtime_status == RPM_RESUMING ||
826 	    dev->power.runtime_status == RPM_SUSPENDING) {
827 		DEFINE_WAIT(wait);
828 
829 		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
830 			if (dev->power.runtime_status == RPM_SUSPENDING) {
831 				dev->power.deferred_resume = true;
832 				if (rpmflags & RPM_NOWAIT)
833 					retval = -EINPROGRESS;
834 			} else {
835 				retval = -EINPROGRESS;
836 			}
837 			goto out;
838 		}
839 
840 		if (dev->power.irq_safe) {
841 			spin_unlock(&dev->power.lock);
842 
843 			cpu_relax();
844 
845 			spin_lock(&dev->power.lock);
846 			goto repeat;
847 		}
848 
849 		/* Wait for the operation carried out in parallel with us. */
850 		for (;;) {
851 			prepare_to_wait(&dev->power.wait_queue, &wait,
852 					TASK_UNINTERRUPTIBLE);
853 			if (dev->power.runtime_status != RPM_RESUMING &&
854 			    dev->power.runtime_status != RPM_SUSPENDING)
855 				break;
856 
857 			spin_unlock_irq(&dev->power.lock);
858 
859 			schedule();
860 
861 			spin_lock_irq(&dev->power.lock);
862 		}
863 		finish_wait(&dev->power.wait_queue, &wait);
864 		goto repeat;
865 	}
866 
867 	/*
868 	 * See if we can skip waking up the parent.  This is safe only if
869 	 * power.no_callbacks is set, because otherwise we don't know whether
870 	 * the resume will actually succeed.
871 	 */
872 	if (dev->power.no_callbacks && !parent && dev->parent) {
873 		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
874 		if (dev->parent->power.disable_depth > 0 ||
875 		    dev->parent->power.ignore_children ||
876 		    dev->parent->power.runtime_status == RPM_ACTIVE) {
877 			atomic_inc(&dev->parent->power.child_count);
878 			spin_unlock(&dev->parent->power.lock);
879 			retval = 1;
880 			goto no_callback;	/* Assume success. */
881 		}
882 		spin_unlock(&dev->parent->power.lock);
883 	}
884 
885 	/* Carry out an asynchronous or a synchronous resume. */
886 	if (rpmflags & RPM_ASYNC) {
887 		dev->power.request = RPM_REQ_RESUME;
888 		if (!dev->power.request_pending) {
889 			dev->power.request_pending = true;
890 			queue_work(pm_wq, &dev->power.work);
891 		}
892 		retval = 0;
893 		goto out;
894 	}
895 
896 	if (!parent && dev->parent) {
897 		/*
898 		 * Increment the parent's usage counter and resume it if
899 		 * necessary.  Not needed if dev is irq-safe; then the
900 		 * parent is permanently resumed.
901 		 */
902 		parent = dev->parent;
903 		if (dev->power.irq_safe)
904 			goto skip_parent;
905 
906 		spin_unlock(&dev->power.lock);
907 
908 		pm_runtime_get_noresume(parent);
909 
910 		spin_lock(&parent->power.lock);
911 		/*
912 		 * Resume the parent if it has runtime PM enabled and not been
913 		 * set to ignore its children.
914 		 */
915 		if (!parent->power.disable_depth &&
916 		    !parent->power.ignore_children) {
917 			rpm_resume(parent, 0);
918 			if (parent->power.runtime_status != RPM_ACTIVE)
919 				retval = -EBUSY;
920 		}
921 		spin_unlock(&parent->power.lock);
922 
923 		spin_lock(&dev->power.lock);
924 		if (retval)
925 			goto out;
926 
927 		goto repeat;
928 	}
929  skip_parent:
930 
931 	if (dev->power.no_callbacks)
932 		goto no_callback;	/* Assume success. */
933 
934 	__update_runtime_status(dev, RPM_RESUMING);
935 
936 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
937 
938 	dev_pm_disable_wake_irq_check(dev, false);
939 	retval = rpm_callback(callback, dev);
940 	if (retval) {
941 		__update_runtime_status(dev, RPM_SUSPENDED);
942 		pm_runtime_cancel_pending(dev);
943 		dev_pm_enable_wake_irq_check(dev, false);
944 	} else {
945  no_callback:
946 		__update_runtime_status(dev, RPM_ACTIVE);
947 		pm_runtime_mark_last_busy(dev);
948 		if (parent)
949 			atomic_inc(&parent->power.child_count);
950 	}
951 	wake_up_all(&dev->power.wait_queue);
952 
953 	if (retval >= 0)
954 		rpm_idle(dev, RPM_ASYNC);
955 
956  out:
957 	if (parent && !dev->power.irq_safe) {
958 		spin_unlock_irq(&dev->power.lock);
959 
960 		pm_runtime_put(parent);
961 
962 		spin_lock_irq(&dev->power.lock);
963 	}
964 
965 	trace_rpm_return_int(dev, _THIS_IP_, retval);
966 
967 	return retval;
968 }
969 
970 /**
971  * pm_runtime_work - Universal runtime PM work function.
972  * @work: Work structure used for scheduling the execution of this function.
973  *
974  * Use @work to get the device object the work is to be done for, determine what
975  * is to be done and execute the appropriate runtime PM function.
976  */
pm_runtime_work(struct work_struct * work)977 static void pm_runtime_work(struct work_struct *work)
978 {
979 	struct device *dev = container_of(work, struct device, power.work);
980 	enum rpm_request req;
981 
982 	spin_lock_irq(&dev->power.lock);
983 
984 	if (!dev->power.request_pending)
985 		goto out;
986 
987 	req = dev->power.request;
988 	dev->power.request = RPM_REQ_NONE;
989 	dev->power.request_pending = false;
990 
991 	switch (req) {
992 	case RPM_REQ_NONE:
993 		break;
994 	case RPM_REQ_IDLE:
995 		rpm_idle(dev, RPM_NOWAIT);
996 		break;
997 	case RPM_REQ_SUSPEND:
998 		rpm_suspend(dev, RPM_NOWAIT);
999 		break;
1000 	case RPM_REQ_AUTOSUSPEND:
1001 		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
1002 		break;
1003 	case RPM_REQ_RESUME:
1004 		rpm_resume(dev, RPM_NOWAIT);
1005 		break;
1006 	}
1007 
1008  out:
1009 	spin_unlock_irq(&dev->power.lock);
1010 }
1011 
1012 /**
1013  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
1014  * @timer: hrtimer used by pm_schedule_suspend().
1015  *
1016  * Check if the time is right and queue a suspend request.
1017  */
pm_suspend_timer_fn(struct hrtimer * timer)1018 static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
1019 {
1020 	struct device *dev = container_of(timer, struct device, power.suspend_timer);
1021 	unsigned long flags;
1022 	u64 expires;
1023 
1024 	spin_lock_irqsave(&dev->power.lock, flags);
1025 
1026 	expires = dev->power.timer_expires;
1027 	/*
1028 	 * If 'expires' is after the current time, we've been called
1029 	 * too early.
1030 	 */
1031 	if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
1032 		dev->power.timer_expires = 0;
1033 		rpm_suspend(dev, dev->power.timer_autosuspends ?
1034 		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1035 	}
1036 
1037 	spin_unlock_irqrestore(&dev->power.lock, flags);
1038 
1039 	return HRTIMER_NORESTART;
1040 }
1041 
1042 /**
1043  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1044  * @dev: Device to suspend.
1045  * @delay: Time to wait before submitting a suspend request, in milliseconds.
1046  */
pm_schedule_suspend(struct device * dev,unsigned int delay)1047 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1048 {
1049 	unsigned long flags;
1050 	u64 expires;
1051 	int retval;
1052 
1053 	spin_lock_irqsave(&dev->power.lock, flags);
1054 
1055 	if (!delay) {
1056 		retval = rpm_suspend(dev, RPM_ASYNC);
1057 		goto out;
1058 	}
1059 
1060 	retval = rpm_check_suspend_allowed(dev);
1061 	if (retval)
1062 		goto out;
1063 
1064 	/* Other scheduled or pending requests need to be canceled. */
1065 	pm_runtime_cancel_pending(dev);
1066 
1067 	expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1068 	dev->power.timer_expires = expires;
1069 	dev->power.timer_autosuspends = 0;
1070 	hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1071 
1072  out:
1073 	spin_unlock_irqrestore(&dev->power.lock, flags);
1074 
1075 	return retval;
1076 }
1077 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1078 
rpm_drop_usage_count(struct device * dev)1079 static int rpm_drop_usage_count(struct device *dev)
1080 {
1081 	int ret;
1082 
1083 	ret = atomic_sub_return(1, &dev->power.usage_count);
1084 	if (ret >= 0)
1085 		return ret;
1086 
1087 	/*
1088 	 * Because rpm_resume() does not check the usage counter, it will resume
1089 	 * the device even if the usage counter is 0 or negative, so it is
1090 	 * sufficient to increment the usage counter here to reverse the change
1091 	 * made above.
1092 	 */
1093 	atomic_inc(&dev->power.usage_count);
1094 	dev_warn(dev, "Runtime PM usage count underflow!\n");
1095 	return -EINVAL;
1096 }
1097 
1098 /**
1099  * __pm_runtime_idle - Entry point for runtime idle operations.
1100  * @dev: Device to send idle notification for.
1101  * @rpmflags: Flag bits.
1102  *
1103  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1104  * return immediately if it is larger than zero (if it becomes negative, log a
1105  * warning, increment it, and return an error).  Then carry out an idle
1106  * notification, either synchronous or asynchronous.
1107  *
1108  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1109  * or if pm_runtime_irq_safe() has been called.
1110  */
__pm_runtime_idle(struct device * dev,int rpmflags)1111 int __pm_runtime_idle(struct device *dev, int rpmflags)
1112 {
1113 	unsigned long flags;
1114 	int retval;
1115 
1116 	if (rpmflags & RPM_GET_PUT) {
1117 		retval = rpm_drop_usage_count(dev);
1118 		if (retval < 0) {
1119 			return retval;
1120 		} else if (retval > 0) {
1121 			trace_rpm_usage(dev, rpmflags);
1122 			return 0;
1123 		}
1124 	}
1125 
1126 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1127 
1128 	spin_lock_irqsave(&dev->power.lock, flags);
1129 	retval = rpm_idle(dev, rpmflags);
1130 	spin_unlock_irqrestore(&dev->power.lock, flags);
1131 
1132 	return retval;
1133 }
1134 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1135 
1136 /**
1137  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1138  * @dev: Device to suspend.
1139  * @rpmflags: Flag bits.
1140  *
1141  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1142  * return immediately if it is larger than zero (if it becomes negative, log a
1143  * warning, increment it, and return an error).  Then carry out a suspend,
1144  * either synchronous or asynchronous.
1145  *
1146  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1147  * or if pm_runtime_irq_safe() has been called.
1148  */
__pm_runtime_suspend(struct device * dev,int rpmflags)1149 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1150 {
1151 	unsigned long flags;
1152 	int retval;
1153 
1154 	if (rpmflags & RPM_GET_PUT) {
1155 		retval = rpm_drop_usage_count(dev);
1156 		if (retval < 0) {
1157 			return retval;
1158 		} else if (retval > 0) {
1159 			trace_rpm_usage(dev, rpmflags);
1160 			return 0;
1161 		}
1162 	}
1163 
1164 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1165 
1166 	spin_lock_irqsave(&dev->power.lock, flags);
1167 	retval = rpm_suspend(dev, rpmflags);
1168 	spin_unlock_irqrestore(&dev->power.lock, flags);
1169 
1170 	return retval;
1171 }
1172 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1173 
1174 /**
1175  * __pm_runtime_resume - Entry point for runtime resume operations.
1176  * @dev: Device to resume.
1177  * @rpmflags: Flag bits.
1178  *
1179  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
1180  * carry out a resume, either synchronous or asynchronous.
1181  *
1182  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1183  * or if pm_runtime_irq_safe() has been called.
1184  */
__pm_runtime_resume(struct device * dev,int rpmflags)1185 int __pm_runtime_resume(struct device *dev, int rpmflags)
1186 {
1187 	unsigned long flags;
1188 	int retval;
1189 
1190 	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1191 			dev->power.runtime_status != RPM_ACTIVE);
1192 
1193 	if (rpmflags & RPM_GET_PUT)
1194 		atomic_inc(&dev->power.usage_count);
1195 
1196 	spin_lock_irqsave(&dev->power.lock, flags);
1197 	retval = rpm_resume(dev, rpmflags);
1198 	spin_unlock_irqrestore(&dev->power.lock, flags);
1199 
1200 	return retval;
1201 }
1202 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1203 
1204 /**
1205  * pm_runtime_get_conditional - Conditionally bump up device usage counter.
1206  * @dev: Device to handle.
1207  * @ign_usage_count: Whether or not to look at the current usage counter value.
1208  *
1209  * Return -EINVAL if runtime PM is disabled for @dev.
1210  *
1211  * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
1212  * is set, or (2) @dev is not ignoring children and its active child count is
1213  * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
1214  * the usage counter of @dev and return 1.
1215  *
1216  * Otherwise, return 0 without changing the usage counter.
1217  *
1218  * If @ign_usage_count is %true, this function can be used to prevent suspending
1219  * the device when its runtime PM status is %RPM_ACTIVE.
1220  *
1221  * If @ign_usage_count is %false, this function can be used to prevent
1222  * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1223  * runtime PM usage counter is not zero.
1224  *
1225  * The caller is responsible for decrementing the runtime PM usage counter of
1226  * @dev after this function has returned a positive value for it.
1227  */
pm_runtime_get_conditional(struct device * dev,bool ign_usage_count)1228 static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
1229 {
1230 	unsigned long flags;
1231 	int retval;
1232 
1233 	spin_lock_irqsave(&dev->power.lock, flags);
1234 	if (dev->power.disable_depth > 0) {
1235 		retval = -EINVAL;
1236 	} else if (dev->power.runtime_status != RPM_ACTIVE) {
1237 		retval = 0;
1238 	} else if (ign_usage_count || (!dev->power.ignore_children &&
1239 		   atomic_read(&dev->power.child_count) > 0)) {
1240 		retval = 1;
1241 		atomic_inc(&dev->power.usage_count);
1242 	} else {
1243 		retval = atomic_inc_not_zero(&dev->power.usage_count);
1244 	}
1245 	trace_rpm_usage(dev, 0);
1246 	spin_unlock_irqrestore(&dev->power.lock, flags);
1247 
1248 	return retval;
1249 }
1250 
1251 /**
1252  * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
1253  *			      in active state
1254  * @dev: Target device.
1255  *
1256  * Increment the runtime PM usage counter of @dev if its runtime PM status is
1257  * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
1258  * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
1259  * device, in which case also the usage_count will remain unmodified.
1260  */
pm_runtime_get_if_active(struct device * dev)1261 int pm_runtime_get_if_active(struct device *dev)
1262 {
1263 	return pm_runtime_get_conditional(dev, true);
1264 }
1265 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1266 
1267 /**
1268  * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
1269  * @dev: Target device.
1270  *
1271  * Increment the runtime PM usage counter of @dev if its runtime PM status is
1272  * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
1273  * ignoring children and its active child count is nonzero.  1 is returned in
1274  * this case.
1275  *
1276  * If @dev is in a different state or it is not in use (that is, its usage
1277  * counter is 0, or it is ignoring children, or its active child count is 0),
1278  * 0 is returned.
1279  *
1280  * -EINVAL is returned if runtime PM is disabled for the device, in which case
1281  * also the usage counter of @dev is not updated.
1282  */
pm_runtime_get_if_in_use(struct device * dev)1283 int pm_runtime_get_if_in_use(struct device *dev)
1284 {
1285 	return pm_runtime_get_conditional(dev, false);
1286 }
1287 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1288 
1289 /**
1290  * __pm_runtime_set_status - Set runtime PM status of a device.
1291  * @dev: Device to handle.
1292  * @status: New runtime PM status of the device.
1293  *
1294  * If runtime PM of the device is disabled or its power.runtime_error field is
1295  * different from zero, the status may be changed either to RPM_ACTIVE, or to
1296  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1297  * However, if the device has a parent and the parent is not active, and the
1298  * parent's power.ignore_children flag is unset, the device's status cannot be
1299  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1300  *
1301  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1302  * and the device parent's counter of unsuspended children is modified to
1303  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
1304  * notification request for the parent is submitted.
1305  *
1306  * If @dev has any suppliers (as reflected by device links to them), and @status
1307  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1308  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1309  * of the @status value) and the suppliers will be deacticated on exit.  The
1310  * error returned by the failing supplier activation will be returned in that
1311  * case.
1312  */
__pm_runtime_set_status(struct device * dev,unsigned int status)1313 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1314 {
1315 	struct device *parent = dev->parent;
1316 	bool notify_parent = false;
1317 	unsigned long flags;
1318 	int error = 0;
1319 
1320 	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1321 		return -EINVAL;
1322 
1323 	spin_lock_irqsave(&dev->power.lock, flags);
1324 
1325 	/*
1326 	 * Prevent PM-runtime from being enabled for the device or return an
1327 	 * error if it is enabled already and working.
1328 	 */
1329 	if (dev->power.runtime_error || dev->power.disable_depth)
1330 		dev->power.disable_depth++;
1331 	else
1332 		error = -EAGAIN;
1333 
1334 	spin_unlock_irqrestore(&dev->power.lock, flags);
1335 
1336 	if (error)
1337 		return error;
1338 
1339 	/*
1340 	 * If the new status is RPM_ACTIVE, the suppliers can be activated
1341 	 * upfront regardless of the current status, because next time
1342 	 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1343 	 * involved will be dropped down to one anyway.
1344 	 */
1345 	if (status == RPM_ACTIVE) {
1346 		int idx = device_links_read_lock();
1347 
1348 		error = rpm_get_suppliers(dev);
1349 		if (error)
1350 			status = RPM_SUSPENDED;
1351 
1352 		device_links_read_unlock(idx);
1353 	}
1354 
1355 	spin_lock_irqsave(&dev->power.lock, flags);
1356 
1357 	if (dev->power.runtime_status == status || !parent)
1358 		goto out_set;
1359 
1360 	if (status == RPM_SUSPENDED) {
1361 		atomic_add_unless(&parent->power.child_count, -1, 0);
1362 		notify_parent = !parent->power.ignore_children;
1363 	} else {
1364 		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1365 
1366 		/*
1367 		 * It is invalid to put an active child under a parent that is
1368 		 * not active, has runtime PM enabled and the
1369 		 * 'power.ignore_children' flag unset.
1370 		 */
1371 		if (!parent->power.disable_depth &&
1372 		    !parent->power.ignore_children &&
1373 		    parent->power.runtime_status != RPM_ACTIVE) {
1374 			dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1375 				dev_name(dev),
1376 				dev_name(parent));
1377 			error = -EBUSY;
1378 		} else if (dev->power.runtime_status == RPM_SUSPENDED) {
1379 			atomic_inc(&parent->power.child_count);
1380 		}
1381 
1382 		spin_unlock(&parent->power.lock);
1383 
1384 		if (error) {
1385 			status = RPM_SUSPENDED;
1386 			goto out;
1387 		}
1388 	}
1389 
1390  out_set:
1391 	__update_runtime_status(dev, status);
1392 	if (!error)
1393 		dev->power.runtime_error = 0;
1394 
1395  out:
1396 	spin_unlock_irqrestore(&dev->power.lock, flags);
1397 
1398 	if (notify_parent)
1399 		pm_request_idle(parent);
1400 
1401 	if (status == RPM_SUSPENDED) {
1402 		int idx = device_links_read_lock();
1403 
1404 		rpm_put_suppliers(dev);
1405 
1406 		device_links_read_unlock(idx);
1407 	}
1408 
1409 	pm_runtime_enable(dev);
1410 
1411 	return error;
1412 }
1413 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1414 
1415 /**
1416  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1417  * @dev: Device to handle.
1418  *
1419  * Flush all pending requests for the device from pm_wq and wait for all
1420  * runtime PM operations involving the device in progress to complete.
1421  *
1422  * Should be called under dev->power.lock with interrupts disabled.
1423  */
__pm_runtime_barrier(struct device * dev)1424 static void __pm_runtime_barrier(struct device *dev)
1425 {
1426 	pm_runtime_deactivate_timer(dev);
1427 
1428 	if (dev->power.request_pending) {
1429 		dev->power.request = RPM_REQ_NONE;
1430 		spin_unlock_irq(&dev->power.lock);
1431 
1432 		cancel_work_sync(&dev->power.work);
1433 
1434 		spin_lock_irq(&dev->power.lock);
1435 		dev->power.request_pending = false;
1436 	}
1437 
1438 	if (dev->power.runtime_status == RPM_SUSPENDING ||
1439 	    dev->power.runtime_status == RPM_RESUMING ||
1440 	    dev->power.idle_notification) {
1441 		DEFINE_WAIT(wait);
1442 
1443 		/* Suspend, wake-up or idle notification in progress. */
1444 		for (;;) {
1445 			prepare_to_wait(&dev->power.wait_queue, &wait,
1446 					TASK_UNINTERRUPTIBLE);
1447 			if (dev->power.runtime_status != RPM_SUSPENDING
1448 			    && dev->power.runtime_status != RPM_RESUMING
1449 			    && !dev->power.idle_notification)
1450 				break;
1451 			spin_unlock_irq(&dev->power.lock);
1452 
1453 			schedule();
1454 
1455 			spin_lock_irq(&dev->power.lock);
1456 		}
1457 		finish_wait(&dev->power.wait_queue, &wait);
1458 	}
1459 }
1460 
1461 /**
1462  * pm_runtime_barrier - Flush pending requests and wait for completions.
1463  * @dev: Device to handle.
1464  *
1465  * Prevent the device from being suspended by incrementing its usage counter and
1466  * if there's a pending resume request for the device, wake the device up.
1467  * Next, make sure that all pending requests for the device have been flushed
1468  * from pm_wq and wait for all runtime PM operations involving the device in
1469  * progress to complete.
1470  *
1471  * Return value:
1472  * 1, if there was a resume request pending and the device had to be woken up,
1473  * 0, otherwise
1474  */
pm_runtime_barrier(struct device * dev)1475 int pm_runtime_barrier(struct device *dev)
1476 {
1477 	int retval = 0;
1478 
1479 	pm_runtime_get_noresume(dev);
1480 	spin_lock_irq(&dev->power.lock);
1481 
1482 	if (dev->power.request_pending
1483 	    && dev->power.request == RPM_REQ_RESUME) {
1484 		rpm_resume(dev, 0);
1485 		retval = 1;
1486 	}
1487 
1488 	__pm_runtime_barrier(dev);
1489 
1490 	spin_unlock_irq(&dev->power.lock);
1491 	pm_runtime_put_noidle(dev);
1492 
1493 	return retval;
1494 }
1495 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1496 
pm_runtime_block_if_disabled(struct device * dev)1497 bool pm_runtime_block_if_disabled(struct device *dev)
1498 {
1499 	bool ret;
1500 
1501 	spin_lock_irq(&dev->power.lock);
1502 
1503 	ret = !pm_runtime_enabled(dev);
1504 	if (ret && dev->power.last_status == RPM_INVALID)
1505 		dev->power.last_status = RPM_BLOCKED;
1506 
1507 	spin_unlock_irq(&dev->power.lock);
1508 
1509 	return ret;
1510 }
1511 
pm_runtime_unblock(struct device * dev)1512 void pm_runtime_unblock(struct device *dev)
1513 {
1514 	spin_lock_irq(&dev->power.lock);
1515 
1516 	if (dev->power.last_status == RPM_BLOCKED)
1517 		dev->power.last_status = RPM_INVALID;
1518 
1519 	spin_unlock_irq(&dev->power.lock);
1520 }
1521 
__pm_runtime_disable(struct device * dev,bool check_resume)1522 void __pm_runtime_disable(struct device *dev, bool check_resume)
1523 {
1524 	spin_lock_irq(&dev->power.lock);
1525 
1526 	if (dev->power.disable_depth > 0) {
1527 		dev->power.disable_depth++;
1528 		goto out;
1529 	}
1530 
1531 	/*
1532 	 * Wake up the device if there's a resume request pending, because that
1533 	 * means there probably is some I/O to process and disabling runtime PM
1534 	 * shouldn't prevent the device from processing the I/O.
1535 	 */
1536 	if (check_resume && dev->power.request_pending &&
1537 	    dev->power.request == RPM_REQ_RESUME) {
1538 		/*
1539 		 * Prevent suspends and idle notifications from being carried
1540 		 * out after we have woken up the device.
1541 		 */
1542 		pm_runtime_get_noresume(dev);
1543 
1544 		rpm_resume(dev, 0);
1545 
1546 		pm_runtime_put_noidle(dev);
1547 	}
1548 
1549 	/* Update time accounting before disabling PM-runtime. */
1550 	update_pm_runtime_accounting(dev);
1551 
1552 	if (!dev->power.disable_depth++) {
1553 		__pm_runtime_barrier(dev);
1554 		dev->power.last_status = dev->power.runtime_status;
1555 	}
1556 
1557  out:
1558 	spin_unlock_irq(&dev->power.lock);
1559 }
1560 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1561 
1562 /**
1563  * pm_runtime_enable - Enable runtime PM of a device.
1564  * @dev: Device to handle.
1565  */
pm_runtime_enable(struct device * dev)1566 void pm_runtime_enable(struct device *dev)
1567 {
1568 	unsigned long flags;
1569 
1570 	spin_lock_irqsave(&dev->power.lock, flags);
1571 
1572 	if (!dev->power.disable_depth) {
1573 		dev_warn(dev, "Unbalanced %s!\n", __func__);
1574 		goto out;
1575 	}
1576 
1577 	if (--dev->power.disable_depth > 0)
1578 		goto out;
1579 
1580 	if (dev->power.last_status == RPM_BLOCKED) {
1581 		dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
1582 		dump_stack();
1583 	}
1584 	dev->power.last_status = RPM_INVALID;
1585 	dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1586 
1587 	if (dev->power.runtime_status == RPM_SUSPENDED &&
1588 	    !dev->power.ignore_children &&
1589 	    atomic_read(&dev->power.child_count) > 0)
1590 		dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1591 
1592 out:
1593 	spin_unlock_irqrestore(&dev->power.lock, flags);
1594 }
1595 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1596 
pm_runtime_set_suspended_action(void * data)1597 static void pm_runtime_set_suspended_action(void *data)
1598 {
1599 	pm_runtime_set_suspended(data);
1600 }
1601 
1602 /**
1603  * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
1604  *
1605  * @dev: Device to handle.
1606  */
devm_pm_runtime_set_active_enabled(struct device * dev)1607 int devm_pm_runtime_set_active_enabled(struct device *dev)
1608 {
1609 	int err;
1610 
1611 	err = pm_runtime_set_active(dev);
1612 	if (err)
1613 		return err;
1614 
1615 	err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
1616 	if (err)
1617 		return err;
1618 
1619 	return devm_pm_runtime_enable(dev);
1620 }
1621 EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
1622 
pm_runtime_disable_action(void * data)1623 static void pm_runtime_disable_action(void *data)
1624 {
1625 	pm_runtime_dont_use_autosuspend(data);
1626 	pm_runtime_disable(data);
1627 }
1628 
1629 /**
1630  * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1631  *
1632  * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1633  * you at driver exit time if needed.
1634  *
1635  * @dev: Device to handle.
1636  */
devm_pm_runtime_enable(struct device * dev)1637 int devm_pm_runtime_enable(struct device *dev)
1638 {
1639 	pm_runtime_enable(dev);
1640 
1641 	return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1642 }
1643 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1644 
pm_runtime_put_noidle_action(void * data)1645 static void pm_runtime_put_noidle_action(void *data)
1646 {
1647 	pm_runtime_put_noidle(data);
1648 }
1649 
1650 /**
1651  * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
1652  *
1653  * @dev: Device to handle.
1654  */
devm_pm_runtime_get_noresume(struct device * dev)1655 int devm_pm_runtime_get_noresume(struct device *dev)
1656 {
1657 	pm_runtime_get_noresume(dev);
1658 
1659 	return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
1660 }
1661 EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
1662 
1663 /**
1664  * pm_runtime_forbid - Block runtime PM of a device.
1665  * @dev: Device to handle.
1666  *
1667  * Increase the device's usage count and clear its power.runtime_auto flag,
1668  * so that it cannot be suspended at run time until pm_runtime_allow() is called
1669  * for it.
1670  */
pm_runtime_forbid(struct device * dev)1671 void pm_runtime_forbid(struct device *dev)
1672 {
1673 	spin_lock_irq(&dev->power.lock);
1674 	if (!dev->power.runtime_auto)
1675 		goto out;
1676 
1677 	dev->power.runtime_auto = false;
1678 	atomic_inc(&dev->power.usage_count);
1679 	rpm_resume(dev, 0);
1680 
1681  out:
1682 	spin_unlock_irq(&dev->power.lock);
1683 }
1684 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1685 
1686 /**
1687  * pm_runtime_allow - Unblock runtime PM of a device.
1688  * @dev: Device to handle.
1689  *
1690  * Decrease the device's usage count and set its power.runtime_auto flag.
1691  */
pm_runtime_allow(struct device * dev)1692 void pm_runtime_allow(struct device *dev)
1693 {
1694 	int ret;
1695 
1696 	spin_lock_irq(&dev->power.lock);
1697 	if (dev->power.runtime_auto)
1698 		goto out;
1699 
1700 	dev->power.runtime_auto = true;
1701 	ret = rpm_drop_usage_count(dev);
1702 	if (ret == 0)
1703 		rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1704 	else if (ret > 0)
1705 		trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1706 
1707  out:
1708 	spin_unlock_irq(&dev->power.lock);
1709 }
1710 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1711 
1712 /**
1713  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1714  * @dev: Device to handle.
1715  *
1716  * Set the power.no_callbacks flag, which tells the PM core that this
1717  * device is power-managed through its parent and has no runtime PM
1718  * callbacks of its own.  The runtime sysfs attributes will be removed.
1719  */
pm_runtime_no_callbacks(struct device * dev)1720 void pm_runtime_no_callbacks(struct device *dev)
1721 {
1722 	spin_lock_irq(&dev->power.lock);
1723 	dev->power.no_callbacks = 1;
1724 	spin_unlock_irq(&dev->power.lock);
1725 	if (device_is_registered(dev))
1726 		rpm_sysfs_remove(dev);
1727 }
1728 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1729 
1730 /**
1731  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1732  * @dev: Device to handle
1733  *
1734  * Set the power.irq_safe flag, which tells the PM core that the
1735  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1736  * always be invoked with the spinlock held and interrupts disabled.  It also
1737  * causes the parent's usage counter to be permanently incremented, preventing
1738  * the parent from runtime suspending -- otherwise an irq-safe child might have
1739  * to wait for a non-irq-safe parent.
1740  */
pm_runtime_irq_safe(struct device * dev)1741 void pm_runtime_irq_safe(struct device *dev)
1742 {
1743 	if (dev->parent)
1744 		pm_runtime_get_sync(dev->parent);
1745 
1746 	spin_lock_irq(&dev->power.lock);
1747 	dev->power.irq_safe = 1;
1748 	spin_unlock_irq(&dev->power.lock);
1749 }
1750 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1751 
1752 /**
1753  * update_autosuspend - Handle a change to a device's autosuspend settings.
1754  * @dev: Device to handle.
1755  * @old_delay: The former autosuspend_delay value.
1756  * @old_use: The former use_autosuspend value.
1757  *
1758  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1759  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1760  *
1761  * This function must be called under dev->power.lock with interrupts disabled.
1762  */
update_autosuspend(struct device * dev,int old_delay,int old_use)1763 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1764 {
1765 	int delay = dev->power.autosuspend_delay;
1766 
1767 	/* Should runtime suspend be prevented now? */
1768 	if (dev->power.use_autosuspend && delay < 0) {
1769 
1770 		/* If it used to be allowed then prevent it. */
1771 		if (!old_use || old_delay >= 0) {
1772 			atomic_inc(&dev->power.usage_count);
1773 			rpm_resume(dev, 0);
1774 		} else {
1775 			trace_rpm_usage(dev, 0);
1776 		}
1777 	}
1778 
1779 	/* Runtime suspend should be allowed now. */
1780 	else {
1781 
1782 		/* If it used to be prevented then allow it. */
1783 		if (old_use && old_delay < 0)
1784 			atomic_dec(&dev->power.usage_count);
1785 
1786 		/* Maybe we can autosuspend now. */
1787 		rpm_idle(dev, RPM_AUTO);
1788 	}
1789 }
1790 
1791 /**
1792  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1793  * @dev: Device to handle.
1794  * @delay: Value of the new delay in milliseconds.
1795  *
1796  * Set the device's power.autosuspend_delay value.  If it changes to negative
1797  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1798  * changes the other way, allow runtime suspends.
1799  */
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)1800 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1801 {
1802 	int old_delay, old_use;
1803 
1804 	spin_lock_irq(&dev->power.lock);
1805 	old_delay = dev->power.autosuspend_delay;
1806 	old_use = dev->power.use_autosuspend;
1807 	dev->power.autosuspend_delay = delay;
1808 	update_autosuspend(dev, old_delay, old_use);
1809 	spin_unlock_irq(&dev->power.lock);
1810 }
1811 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1812 
1813 /**
1814  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1815  * @dev: Device to handle.
1816  * @use: New value for use_autosuspend.
1817  *
1818  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1819  * suspends as needed.
1820  */
__pm_runtime_use_autosuspend(struct device * dev,bool use)1821 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1822 {
1823 	int old_delay, old_use;
1824 
1825 	spin_lock_irq(&dev->power.lock);
1826 	old_delay = dev->power.autosuspend_delay;
1827 	old_use = dev->power.use_autosuspend;
1828 	dev->power.use_autosuspend = use;
1829 	update_autosuspend(dev, old_delay, old_use);
1830 	spin_unlock_irq(&dev->power.lock);
1831 }
1832 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1833 
1834 /**
1835  * pm_runtime_init - Initialize runtime PM fields in given device object.
1836  * @dev: Device object to initialize.
1837  */
pm_runtime_init(struct device * dev)1838 void pm_runtime_init(struct device *dev)
1839 {
1840 	dev->power.runtime_status = RPM_SUSPENDED;
1841 	dev->power.last_status = RPM_INVALID;
1842 	dev->power.idle_notification = false;
1843 
1844 	dev->power.disable_depth = 1;
1845 	atomic_set(&dev->power.usage_count, 0);
1846 
1847 	dev->power.runtime_error = 0;
1848 
1849 	atomic_set(&dev->power.child_count, 0);
1850 	pm_suspend_ignore_children(dev, false);
1851 	dev->power.runtime_auto = true;
1852 
1853 	dev->power.request_pending = false;
1854 	dev->power.request = RPM_REQ_NONE;
1855 	dev->power.deferred_resume = false;
1856 	dev->power.needs_force_resume = false;
1857 	INIT_WORK(&dev->power.work, pm_runtime_work);
1858 
1859 	dev->power.timer_expires = 0;
1860 	hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
1861 		      HRTIMER_MODE_ABS);
1862 
1863 	init_waitqueue_head(&dev->power.wait_queue);
1864 }
1865 
1866 /**
1867  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1868  * @dev: Device object to re-initialize.
1869  */
pm_runtime_reinit(struct device * dev)1870 void pm_runtime_reinit(struct device *dev)
1871 {
1872 	if (!pm_runtime_enabled(dev)) {
1873 		if (dev->power.runtime_status == RPM_ACTIVE)
1874 			pm_runtime_set_suspended(dev);
1875 		if (dev->power.irq_safe) {
1876 			spin_lock_irq(&dev->power.lock);
1877 			dev->power.irq_safe = 0;
1878 			spin_unlock_irq(&dev->power.lock);
1879 			if (dev->parent)
1880 				pm_runtime_put(dev->parent);
1881 		}
1882 	}
1883 	/*
1884 	 * Clear power.needs_force_resume in case it has been set by
1885 	 * pm_runtime_force_suspend() invoked from a driver remove callback.
1886 	 */
1887 	dev->power.needs_force_resume = false;
1888 }
1889 
1890 /**
1891  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1892  * @dev: Device object being removed from device hierarchy.
1893  */
pm_runtime_remove(struct device * dev)1894 void pm_runtime_remove(struct device *dev)
1895 {
1896 	__pm_runtime_disable(dev, false);
1897 	pm_runtime_reinit(dev);
1898 }
1899 
1900 /**
1901  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1902  * @dev: Consumer device.
1903  */
pm_runtime_get_suppliers(struct device * dev)1904 void pm_runtime_get_suppliers(struct device *dev)
1905 {
1906 	struct device_link *link;
1907 	int idx;
1908 
1909 	idx = device_links_read_lock();
1910 
1911 	dev_for_each_link_to_supplier(link, dev)
1912 		if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
1913 			link->supplier_preactivated = true;
1914 			pm_runtime_get_sync(link->supplier);
1915 		}
1916 
1917 	device_links_read_unlock(idx);
1918 }
1919 
1920 /**
1921  * pm_runtime_put_suppliers - Drop references to supplier devices.
1922  * @dev: Consumer device.
1923  */
pm_runtime_put_suppliers(struct device * dev)1924 void pm_runtime_put_suppliers(struct device *dev)
1925 {
1926 	struct device_link *link;
1927 	int idx;
1928 
1929 	idx = device_links_read_lock();
1930 
1931 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1932 				device_links_read_lock_held())
1933 		if (link->supplier_preactivated) {
1934 			link->supplier_preactivated = false;
1935 			pm_runtime_put(link->supplier);
1936 		}
1937 
1938 	device_links_read_unlock(idx);
1939 }
1940 
pm_runtime_new_link(struct device * dev)1941 void pm_runtime_new_link(struct device *dev)
1942 {
1943 	spin_lock_irq(&dev->power.lock);
1944 	dev->power.links_count++;
1945 	spin_unlock_irq(&dev->power.lock);
1946 }
1947 
pm_runtime_drop_link_count(struct device * dev)1948 static void pm_runtime_drop_link_count(struct device *dev)
1949 {
1950 	spin_lock_irq(&dev->power.lock);
1951 	WARN_ON(dev->power.links_count == 0);
1952 	dev->power.links_count--;
1953 	spin_unlock_irq(&dev->power.lock);
1954 }
1955 
1956 /**
1957  * pm_runtime_drop_link - Prepare for device link removal.
1958  * @link: Device link going away.
1959  *
1960  * Drop the link count of the consumer end of @link and decrement the supplier
1961  * device's runtime PM usage counter as many times as needed to drop all of the
1962  * PM runtime reference to it from the consumer.
1963  */
pm_runtime_drop_link(struct device_link * link)1964 void pm_runtime_drop_link(struct device_link *link)
1965 {
1966 	if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
1967 		return;
1968 
1969 	pm_runtime_drop_link_count(link->consumer);
1970 	pm_runtime_release_supplier(link);
1971 	pm_request_idle(link->supplier);
1972 }
1973 
get_callback(struct device * dev,size_t cb_offset)1974 static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
1975 {
1976 	/*
1977 	 * Setting power.strict_midlayer means that the middle layer
1978 	 * code does not want its runtime PM callbacks to be invoked via
1979 	 * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
1980 	 * return a direct pointer to the driver callback in that case.
1981 	 */
1982 	if (dev_pm_strict_midlayer_is_set(dev))
1983 		return __rpm_get_driver_callback(dev, cb_offset);
1984 
1985 	return __rpm_get_callback(dev, cb_offset);
1986 }
1987 
1988 #define GET_CALLBACK(dev, callback) \
1989 		get_callback(dev, offsetof(struct dev_pm_ops, callback))
1990 
1991 /**
1992  * pm_runtime_force_suspend - Force a device into suspend state if needed.
1993  * @dev: Device to suspend.
1994  *
1995  * Disable runtime PM so we safely can check the device's runtime PM status and
1996  * if it is active, invoke its ->runtime_suspend callback to suspend it and
1997  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
1998  * usage and children counters don't indicate that the device was in use before
1999  * the system-wide transition under way, decrement its parent's children counter
2000  * (if there is a parent).  Keep runtime PM disabled to preserve the state
2001  * unless we encounter errors.
2002  *
2003  * Typically this function may be invoked from a system suspend callback to make
2004  * sure the device is put into low power state and it should only be used during
2005  * system-wide PM transitions to sleep states.  It assumes that the analogous
2006  * pm_runtime_force_resume() will be used to resume the device.
2007  */
pm_runtime_force_suspend(struct device * dev)2008 int pm_runtime_force_suspend(struct device *dev)
2009 {
2010 	int (*callback)(struct device *);
2011 	int ret;
2012 
2013 	pm_runtime_disable(dev);
2014 	if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
2015 		return 0;
2016 
2017 	callback = GET_CALLBACK(dev, runtime_suspend);
2018 
2019 	dev_pm_enable_wake_irq_check(dev, true);
2020 	ret = callback ? callback(dev) : 0;
2021 	if (ret)
2022 		goto err;
2023 
2024 	dev_pm_enable_wake_irq_complete(dev);
2025 
2026 	/*
2027 	 * If the device can stay in suspend after the system-wide transition
2028 	 * to the working state that will follow, drop the children counter of
2029 	 * its parent and the usage counters of its suppliers.  Otherwise, set
2030 	 * power.needs_force_resume to let pm_runtime_force_resume() know that
2031 	 * the device needs to be taken care of and to prevent this function
2032 	 * from handling the device again in case the device is passed to it
2033 	 * once more subsequently.
2034 	 */
2035 	if (pm_runtime_need_not_resume(dev))
2036 		pm_runtime_set_suspended(dev);
2037 	else
2038 		dev->power.needs_force_resume = true;
2039 
2040 	return 0;
2041 
2042 err:
2043 	dev_pm_disable_wake_irq_check(dev, true);
2044 	pm_runtime_enable(dev);
2045 	return ret;
2046 }
2047 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
2048 
2049 #ifdef CONFIG_PM_SLEEP
2050 
2051 /**
2052  * pm_runtime_force_resume - Force a device into resume state if needed.
2053  * @dev: Device to resume.
2054  *
2055  * This function expects that either pm_runtime_force_suspend() has put the
2056  * device into a low-power state prior to calling it, or the device had been
2057  * runtime-suspended before the preceding system-wide suspend transition and it
2058  * was left in suspend during that transition.
2059  *
2060  * The actions carried out by pm_runtime_force_suspend(), or by a runtime
2061  * suspend in general, are reversed and the device is brought back into full
2062  * power if it is expected to be used on system resume, which is the case when
2063  * its needs_force_resume flag is set or when its smart_suspend flag is set and
2064  * its runtime PM status is "active".
2065  *
2066  * In other cases, the resume is deferred to be managed via runtime PM.
2067  *
2068  * Typically, this function may be invoked from a system resume callback.
2069  */
pm_runtime_force_resume(struct device * dev)2070 int pm_runtime_force_resume(struct device *dev)
2071 {
2072 	int (*callback)(struct device *);
2073 	int ret = 0;
2074 
2075 	if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
2076 	    pm_runtime_status_suspended(dev)))
2077 		goto out;
2078 
2079 	callback = GET_CALLBACK(dev, runtime_resume);
2080 
2081 	dev_pm_disable_wake_irq_check(dev, false);
2082 	ret = callback ? callback(dev) : 0;
2083 	if (ret) {
2084 		pm_runtime_set_suspended(dev);
2085 		dev_pm_enable_wake_irq_check(dev, false);
2086 		goto out;
2087 	}
2088 
2089 	pm_runtime_mark_last_busy(dev);
2090 
2091 out:
2092 	/*
2093 	 * The smart_suspend flag can be cleared here because it is not going
2094 	 * to be necessary until the next system-wide suspend transition that
2095 	 * will update it again.
2096 	 */
2097 	dev->power.smart_suspend = false;
2098 	/*
2099 	 * Also clear needs_force_resume to make this function skip devices that
2100 	 * have been seen by it once.
2101 	 */
2102 	dev->power.needs_force_resume = false;
2103 
2104 	pm_runtime_enable(dev);
2105 	return ret;
2106 }
2107 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
2108 
pm_runtime_need_not_resume(struct device * dev)2109 bool pm_runtime_need_not_resume(struct device *dev)
2110 {
2111 	return atomic_read(&dev->power.usage_count) <= 1 &&
2112 		(atomic_read(&dev->power.child_count) == 0 ||
2113 		 dev->power.ignore_children);
2114 }
2115 
2116 #endif /* CONFIG_PM_SLEEP */
2117