xref: /linux/drivers/base/power/wakeup.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * drivers/base/power/wakeup.c - System wakeup events framework
3  *
4  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  *
6  * This file is released under the GPLv2.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <trace/events/power.h>
19 
20 #include "power.h"
21 
22 /*
23  * If set, the suspend/hibernate code will abort transitions to a sleep state
24  * if wakeup events are registered during or immediately before the transition.
25  */
26 bool events_check_enabled __read_mostly;
27 
28 /* If set and the system is suspending, terminate the suspend. */
29 static bool pm_abort_suspend __read_mostly;
30 
31 /*
32  * Combined counters of registered wakeup events and wakeup events in progress.
33  * They need to be modified together atomically, so it's better to use one
34  * atomic variable to hold them both.
35  */
36 static atomic_t combined_event_count = ATOMIC_INIT(0);
37 
38 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
39 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
40 
41 static void split_counters(unsigned int *cnt, unsigned int *inpr)
42 {
43 	unsigned int comb = atomic_read(&combined_event_count);
44 
45 	*cnt = (comb >> IN_PROGRESS_BITS);
46 	*inpr = comb & MAX_IN_PROGRESS;
47 }
48 
49 /* A preserved old value of the events counter. */
50 static unsigned int saved_count;
51 
52 static DEFINE_SPINLOCK(events_lock);
53 
54 static void pm_wakeup_timer_fn(unsigned long data);
55 
56 static LIST_HEAD(wakeup_sources);
57 
58 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
59 
60 static struct wakeup_source deleted_ws = {
61 	.name = "deleted",
62 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
63 };
64 
65 /**
66  * wakeup_source_prepare - Prepare a new wakeup source for initialization.
67  * @ws: Wakeup source to prepare.
68  * @name: Pointer to the name of the new wakeup source.
69  *
70  * Callers must ensure that the @name string won't be freed when @ws is still in
71  * use.
72  */
73 void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
74 {
75 	if (ws) {
76 		memset(ws, 0, sizeof(*ws));
77 		ws->name = name;
78 	}
79 }
80 EXPORT_SYMBOL_GPL(wakeup_source_prepare);
81 
82 /**
83  * wakeup_source_create - Create a struct wakeup_source object.
84  * @name: Name of the new wakeup source.
85  */
86 struct wakeup_source *wakeup_source_create(const char *name)
87 {
88 	struct wakeup_source *ws;
89 
90 	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
91 	if (!ws)
92 		return NULL;
93 
94 	wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
95 	return ws;
96 }
97 EXPORT_SYMBOL_GPL(wakeup_source_create);
98 
99 /**
100  * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
101  * @ws: Wakeup source to prepare for destruction.
102  *
103  * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
104  * be run in parallel with this function for the same wakeup source object.
105  */
106 void wakeup_source_drop(struct wakeup_source *ws)
107 {
108 	if (!ws)
109 		return;
110 
111 	del_timer_sync(&ws->timer);
112 	__pm_relax(ws);
113 }
114 EXPORT_SYMBOL_GPL(wakeup_source_drop);
115 
116 /*
117  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
118  */
119 static void wakeup_source_record(struct wakeup_source *ws)
120 {
121 	unsigned long flags;
122 
123 	spin_lock_irqsave(&deleted_ws.lock, flags);
124 
125 	if (ws->event_count) {
126 		deleted_ws.total_time =
127 			ktime_add(deleted_ws.total_time, ws->total_time);
128 		deleted_ws.prevent_sleep_time =
129 			ktime_add(deleted_ws.prevent_sleep_time,
130 				  ws->prevent_sleep_time);
131 		deleted_ws.max_time =
132 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
133 				deleted_ws.max_time : ws->max_time;
134 		deleted_ws.event_count += ws->event_count;
135 		deleted_ws.active_count += ws->active_count;
136 		deleted_ws.relax_count += ws->relax_count;
137 		deleted_ws.expire_count += ws->expire_count;
138 		deleted_ws.wakeup_count += ws->wakeup_count;
139 	}
140 
141 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
142 }
143 
144 /**
145  * wakeup_source_destroy - Destroy a struct wakeup_source object.
146  * @ws: Wakeup source to destroy.
147  *
148  * Use only for wakeup source objects created with wakeup_source_create().
149  */
150 void wakeup_source_destroy(struct wakeup_source *ws)
151 {
152 	if (!ws)
153 		return;
154 
155 	wakeup_source_drop(ws);
156 	wakeup_source_record(ws);
157 	kfree(ws->name);
158 	kfree(ws);
159 }
160 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
161 
162 /**
163  * wakeup_source_add - Add given object to the list of wakeup sources.
164  * @ws: Wakeup source object to add to the list.
165  */
166 void wakeup_source_add(struct wakeup_source *ws)
167 {
168 	unsigned long flags;
169 
170 	if (WARN_ON(!ws))
171 		return;
172 
173 	spin_lock_init(&ws->lock);
174 	setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
175 	ws->active = false;
176 	ws->last_time = ktime_get();
177 
178 	spin_lock_irqsave(&events_lock, flags);
179 	list_add_rcu(&ws->entry, &wakeup_sources);
180 	spin_unlock_irqrestore(&events_lock, flags);
181 }
182 EXPORT_SYMBOL_GPL(wakeup_source_add);
183 
184 /**
185  * wakeup_source_remove - Remove given object from the wakeup sources list.
186  * @ws: Wakeup source object to remove from the list.
187  */
188 void wakeup_source_remove(struct wakeup_source *ws)
189 {
190 	unsigned long flags;
191 
192 	if (WARN_ON(!ws))
193 		return;
194 
195 	spin_lock_irqsave(&events_lock, flags);
196 	list_del_rcu(&ws->entry);
197 	spin_unlock_irqrestore(&events_lock, flags);
198 	synchronize_rcu();
199 }
200 EXPORT_SYMBOL_GPL(wakeup_source_remove);
201 
202 /**
203  * wakeup_source_register - Create wakeup source and add it to the list.
204  * @name: Name of the wakeup source to register.
205  */
206 struct wakeup_source *wakeup_source_register(const char *name)
207 {
208 	struct wakeup_source *ws;
209 
210 	ws = wakeup_source_create(name);
211 	if (ws)
212 		wakeup_source_add(ws);
213 
214 	return ws;
215 }
216 EXPORT_SYMBOL_GPL(wakeup_source_register);
217 
218 /**
219  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
220  * @ws: Wakeup source object to unregister.
221  */
222 void wakeup_source_unregister(struct wakeup_source *ws)
223 {
224 	if (ws) {
225 		wakeup_source_remove(ws);
226 		wakeup_source_destroy(ws);
227 	}
228 }
229 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
230 
231 /**
232  * device_wakeup_attach - Attach a wakeup source object to a device object.
233  * @dev: Device to handle.
234  * @ws: Wakeup source object to attach to @dev.
235  *
236  * This causes @dev to be treated as a wakeup device.
237  */
238 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
239 {
240 	spin_lock_irq(&dev->power.lock);
241 	if (dev->power.wakeup) {
242 		spin_unlock_irq(&dev->power.lock);
243 		return -EEXIST;
244 	}
245 	dev->power.wakeup = ws;
246 	spin_unlock_irq(&dev->power.lock);
247 	return 0;
248 }
249 
250 /**
251  * device_wakeup_enable - Enable given device to be a wakeup source.
252  * @dev: Device to handle.
253  *
254  * Create a wakeup source object, register it and attach it to @dev.
255  */
256 int device_wakeup_enable(struct device *dev)
257 {
258 	struct wakeup_source *ws;
259 	int ret;
260 
261 	if (!dev || !dev->power.can_wakeup)
262 		return -EINVAL;
263 
264 	ws = wakeup_source_register(dev_name(dev));
265 	if (!ws)
266 		return -ENOMEM;
267 
268 	ret = device_wakeup_attach(dev, ws);
269 	if (ret)
270 		wakeup_source_unregister(ws);
271 
272 	return ret;
273 }
274 EXPORT_SYMBOL_GPL(device_wakeup_enable);
275 
276 /**
277  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
278  * @dev: Device to handle
279  * @wakeirq: Device specific wakeirq entry
280  *
281  * Attach a device wakeirq to the wakeup source so the device
282  * wake IRQ can be configured automatically for suspend and
283  * resume.
284  */
285 int device_wakeup_attach_irq(struct device *dev,
286 			     struct wake_irq *wakeirq)
287 {
288 	struct wakeup_source *ws;
289 	int ret = 0;
290 
291 	spin_lock_irq(&dev->power.lock);
292 	ws = dev->power.wakeup;
293 	if (!ws) {
294 		dev_err(dev, "forgot to call call device_init_wakeup?\n");
295 		ret = -EINVAL;
296 		goto unlock;
297 	}
298 
299 	if (ws->wakeirq) {
300 		ret = -EEXIST;
301 		goto unlock;
302 	}
303 
304 	ws->wakeirq = wakeirq;
305 
306 unlock:
307 	spin_unlock_irq(&dev->power.lock);
308 
309 	return ret;
310 }
311 
312 /**
313  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
314  * @dev: Device to handle
315  *
316  * Removes a device wakeirq from the wakeup source.
317  */
318 void device_wakeup_detach_irq(struct device *dev)
319 {
320 	struct wakeup_source *ws;
321 
322 	spin_lock_irq(&dev->power.lock);
323 	ws = dev->power.wakeup;
324 	if (!ws)
325 		goto unlock;
326 
327 	ws->wakeirq = NULL;
328 
329 unlock:
330 	spin_unlock_irq(&dev->power.lock);
331 }
332 
333 /**
334  * device_wakeup_arm_wake_irqs(void)
335  *
336  * Itereates over the list of device wakeirqs to arm them.
337  */
338 void device_wakeup_arm_wake_irqs(void)
339 {
340 	struct wakeup_source *ws;
341 
342 	rcu_read_lock();
343 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
344 		if (ws->wakeirq)
345 			dev_pm_arm_wake_irq(ws->wakeirq);
346 	}
347 	rcu_read_unlock();
348 }
349 
350 /**
351  * device_wakeup_disarm_wake_irqs(void)
352  *
353  * Itereates over the list of device wakeirqs to disarm them.
354  */
355 void device_wakeup_disarm_wake_irqs(void)
356 {
357 	struct wakeup_source *ws;
358 
359 	rcu_read_lock();
360 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
361 		if (ws->wakeirq)
362 			dev_pm_disarm_wake_irq(ws->wakeirq);
363 	}
364 	rcu_read_unlock();
365 }
366 
367 /**
368  * device_wakeup_detach - Detach a device's wakeup source object from it.
369  * @dev: Device to detach the wakeup source object from.
370  *
371  * After it returns, @dev will not be treated as a wakeup device any more.
372  */
373 static struct wakeup_source *device_wakeup_detach(struct device *dev)
374 {
375 	struct wakeup_source *ws;
376 
377 	spin_lock_irq(&dev->power.lock);
378 	ws = dev->power.wakeup;
379 	dev->power.wakeup = NULL;
380 	spin_unlock_irq(&dev->power.lock);
381 	return ws;
382 }
383 
384 /**
385  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
386  * @dev: Device to handle.
387  *
388  * Detach the @dev's wakeup source object from it, unregister this wakeup source
389  * object and destroy it.
390  */
391 int device_wakeup_disable(struct device *dev)
392 {
393 	struct wakeup_source *ws;
394 
395 	if (!dev || !dev->power.can_wakeup)
396 		return -EINVAL;
397 
398 	ws = device_wakeup_detach(dev);
399 	if (ws)
400 		wakeup_source_unregister(ws);
401 
402 	return 0;
403 }
404 EXPORT_SYMBOL_GPL(device_wakeup_disable);
405 
406 /**
407  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
408  * @dev: Device to handle.
409  * @capable: Whether or not @dev is capable of waking up the system from sleep.
410  *
411  * If @capable is set, set the @dev's power.can_wakeup flag and add its
412  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
413  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
414  *
415  * This function may sleep and it can't be called from any context where
416  * sleeping is not allowed.
417  */
418 void device_set_wakeup_capable(struct device *dev, bool capable)
419 {
420 	if (!!dev->power.can_wakeup == !!capable)
421 		return;
422 
423 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
424 		if (capable) {
425 			if (wakeup_sysfs_add(dev))
426 				return;
427 		} else {
428 			wakeup_sysfs_remove(dev);
429 		}
430 	}
431 	dev->power.can_wakeup = capable;
432 }
433 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
434 
435 /**
436  * device_init_wakeup - Device wakeup initialization.
437  * @dev: Device to handle.
438  * @enable: Whether or not to enable @dev as a wakeup device.
439  *
440  * By default, most devices should leave wakeup disabled.  The exceptions are
441  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
442  * possibly network interfaces, etc.  Also, devices that don't generate their
443  * own wakeup requests but merely forward requests from one bus to another
444  * (like PCI bridges) should have wakeup enabled by default.
445  */
446 int device_init_wakeup(struct device *dev, bool enable)
447 {
448 	int ret = 0;
449 
450 	if (!dev)
451 		return -EINVAL;
452 
453 	if (enable) {
454 		device_set_wakeup_capable(dev, true);
455 		ret = device_wakeup_enable(dev);
456 	} else {
457 		if (dev->power.can_wakeup)
458 			device_wakeup_disable(dev);
459 
460 		device_set_wakeup_capable(dev, false);
461 	}
462 
463 	return ret;
464 }
465 EXPORT_SYMBOL_GPL(device_init_wakeup);
466 
467 /**
468  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
469  * @dev: Device to handle.
470  */
471 int device_set_wakeup_enable(struct device *dev, bool enable)
472 {
473 	if (!dev || !dev->power.can_wakeup)
474 		return -EINVAL;
475 
476 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
477 }
478 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
479 
480 /**
481  * wakeup_source_not_registered - validate the given wakeup source.
482  * @ws: Wakeup source to be validated.
483  */
484 static bool wakeup_source_not_registered(struct wakeup_source *ws)
485 {
486 	/*
487 	 * Use timer struct to check if the given source is initialized
488 	 * by wakeup_source_add.
489 	 */
490 	return ws->timer.function != pm_wakeup_timer_fn ||
491 		   ws->timer.data != (unsigned long)ws;
492 }
493 
494 /*
495  * The functions below use the observation that each wakeup event starts a
496  * period in which the system should not be suspended.  The moment this period
497  * will end depends on how the wakeup event is going to be processed after being
498  * detected and all of the possible cases can be divided into two distinct
499  * groups.
500  *
501  * First, a wakeup event may be detected by the same functional unit that will
502  * carry out the entire processing of it and possibly will pass it to user space
503  * for further processing.  In that case the functional unit that has detected
504  * the event may later "close" the "no suspend" period associated with it
505  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
506  * pm_relax(), balanced with each other, is supposed to be used in such
507  * situations.
508  *
509  * Second, a wakeup event may be detected by one functional unit and processed
510  * by another one.  In that case the unit that has detected it cannot really
511  * "close" the "no suspend" period associated with it, unless it knows in
512  * advance what's going to happen to the event during processing.  This
513  * knowledge, however, may not be available to it, so it can simply specify time
514  * to wait before the system can be suspended and pass it as the second
515  * argument of pm_wakeup_event().
516  *
517  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
518  * "no suspend" period will be ended either by the pm_relax(), or by the timer
519  * function executed when the timer expires, whichever comes first.
520  */
521 
522 /**
523  * wakup_source_activate - Mark given wakeup source as active.
524  * @ws: Wakeup source to handle.
525  *
526  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
527  * core of the event by incrementing the counter of of wakeup events being
528  * processed.
529  */
530 static void wakeup_source_activate(struct wakeup_source *ws)
531 {
532 	unsigned int cec;
533 
534 	if (WARN_ONCE(wakeup_source_not_registered(ws),
535 			"unregistered wakeup source\n"))
536 		return;
537 
538 	/*
539 	 * active wakeup source should bring the system
540 	 * out of PM_SUSPEND_FREEZE state
541 	 */
542 	freeze_wake();
543 
544 	ws->active = true;
545 	ws->active_count++;
546 	ws->last_time = ktime_get();
547 	if (ws->autosleep_enabled)
548 		ws->start_prevent_time = ws->last_time;
549 
550 	/* Increment the counter of events in progress. */
551 	cec = atomic_inc_return(&combined_event_count);
552 
553 	trace_wakeup_source_activate(ws->name, cec);
554 }
555 
556 /**
557  * wakeup_source_report_event - Report wakeup event using the given source.
558  * @ws: Wakeup source to report the event for.
559  */
560 static void wakeup_source_report_event(struct wakeup_source *ws)
561 {
562 	ws->event_count++;
563 	/* This is racy, but the counter is approximate anyway. */
564 	if (events_check_enabled)
565 		ws->wakeup_count++;
566 
567 	if (!ws->active)
568 		wakeup_source_activate(ws);
569 }
570 
571 /**
572  * __pm_stay_awake - Notify the PM core of a wakeup event.
573  * @ws: Wakeup source object associated with the source of the event.
574  *
575  * It is safe to call this function from interrupt context.
576  */
577 void __pm_stay_awake(struct wakeup_source *ws)
578 {
579 	unsigned long flags;
580 
581 	if (!ws)
582 		return;
583 
584 	spin_lock_irqsave(&ws->lock, flags);
585 
586 	wakeup_source_report_event(ws);
587 	del_timer(&ws->timer);
588 	ws->timer_expires = 0;
589 
590 	spin_unlock_irqrestore(&ws->lock, flags);
591 }
592 EXPORT_SYMBOL_GPL(__pm_stay_awake);
593 
594 /**
595  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
596  * @dev: Device the wakeup event is related to.
597  *
598  * Notify the PM core of a wakeup event (signaled by @dev) by calling
599  * __pm_stay_awake for the @dev's wakeup source object.
600  *
601  * Call this function after detecting of a wakeup event if pm_relax() is going
602  * to be called directly after processing the event (and possibly passing it to
603  * user space for further processing).
604  */
605 void pm_stay_awake(struct device *dev)
606 {
607 	unsigned long flags;
608 
609 	if (!dev)
610 		return;
611 
612 	spin_lock_irqsave(&dev->power.lock, flags);
613 	__pm_stay_awake(dev->power.wakeup);
614 	spin_unlock_irqrestore(&dev->power.lock, flags);
615 }
616 EXPORT_SYMBOL_GPL(pm_stay_awake);
617 
618 #ifdef CONFIG_PM_AUTOSLEEP
619 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
620 {
621 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
622 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
623 }
624 #else
625 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
626 					     ktime_t now) {}
627 #endif
628 
629 /**
630  * wakup_source_deactivate - Mark given wakeup source as inactive.
631  * @ws: Wakeup source to handle.
632  *
633  * Update the @ws' statistics and notify the PM core that the wakeup source has
634  * become inactive by decrementing the counter of wakeup events being processed
635  * and incrementing the counter of registered wakeup events.
636  */
637 static void wakeup_source_deactivate(struct wakeup_source *ws)
638 {
639 	unsigned int cnt, inpr, cec;
640 	ktime_t duration;
641 	ktime_t now;
642 
643 	ws->relax_count++;
644 	/*
645 	 * __pm_relax() may be called directly or from a timer function.
646 	 * If it is called directly right after the timer function has been
647 	 * started, but before the timer function calls __pm_relax(), it is
648 	 * possible that __pm_stay_awake() will be called in the meantime and
649 	 * will set ws->active.  Then, ws->active may be cleared immediately
650 	 * by the __pm_relax() called from the timer function, but in such a
651 	 * case ws->relax_count will be different from ws->active_count.
652 	 */
653 	if (ws->relax_count != ws->active_count) {
654 		ws->relax_count--;
655 		return;
656 	}
657 
658 	ws->active = false;
659 
660 	now = ktime_get();
661 	duration = ktime_sub(now, ws->last_time);
662 	ws->total_time = ktime_add(ws->total_time, duration);
663 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
664 		ws->max_time = duration;
665 
666 	ws->last_time = now;
667 	del_timer(&ws->timer);
668 	ws->timer_expires = 0;
669 
670 	if (ws->autosleep_enabled)
671 		update_prevent_sleep_time(ws, now);
672 
673 	/*
674 	 * Increment the counter of registered wakeup events and decrement the
675 	 * couter of wakeup events in progress simultaneously.
676 	 */
677 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
678 	trace_wakeup_source_deactivate(ws->name, cec);
679 
680 	split_counters(&cnt, &inpr);
681 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
682 		wake_up(&wakeup_count_wait_queue);
683 }
684 
685 /**
686  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
687  * @ws: Wakeup source object associated with the source of the event.
688  *
689  * Call this function for wakeup events whose processing started with calling
690  * __pm_stay_awake().
691  *
692  * It is safe to call it from interrupt context.
693  */
694 void __pm_relax(struct wakeup_source *ws)
695 {
696 	unsigned long flags;
697 
698 	if (!ws)
699 		return;
700 
701 	spin_lock_irqsave(&ws->lock, flags);
702 	if (ws->active)
703 		wakeup_source_deactivate(ws);
704 	spin_unlock_irqrestore(&ws->lock, flags);
705 }
706 EXPORT_SYMBOL_GPL(__pm_relax);
707 
708 /**
709  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
710  * @dev: Device that signaled the event.
711  *
712  * Execute __pm_relax() for the @dev's wakeup source object.
713  */
714 void pm_relax(struct device *dev)
715 {
716 	unsigned long flags;
717 
718 	if (!dev)
719 		return;
720 
721 	spin_lock_irqsave(&dev->power.lock, flags);
722 	__pm_relax(dev->power.wakeup);
723 	spin_unlock_irqrestore(&dev->power.lock, flags);
724 }
725 EXPORT_SYMBOL_GPL(pm_relax);
726 
727 /**
728  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
729  * @data: Address of the wakeup source object associated with the event source.
730  *
731  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
732  * in @data if it is currently active and its timer has not been canceled and
733  * the expiration time of the timer is not in future.
734  */
735 static void pm_wakeup_timer_fn(unsigned long data)
736 {
737 	struct wakeup_source *ws = (struct wakeup_source *)data;
738 	unsigned long flags;
739 
740 	spin_lock_irqsave(&ws->lock, flags);
741 
742 	if (ws->active && ws->timer_expires
743 	    && time_after_eq(jiffies, ws->timer_expires)) {
744 		wakeup_source_deactivate(ws);
745 		ws->expire_count++;
746 	}
747 
748 	spin_unlock_irqrestore(&ws->lock, flags);
749 }
750 
751 /**
752  * __pm_wakeup_event - Notify the PM core of a wakeup event.
753  * @ws: Wakeup source object associated with the event source.
754  * @msec: Anticipated event processing time (in milliseconds).
755  *
756  * Notify the PM core of a wakeup event whose source is @ws that will take
757  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
758  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
759  * execute pm_wakeup_timer_fn() in future.
760  *
761  * It is safe to call this function from interrupt context.
762  */
763 void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
764 {
765 	unsigned long flags;
766 	unsigned long expires;
767 
768 	if (!ws)
769 		return;
770 
771 	spin_lock_irqsave(&ws->lock, flags);
772 
773 	wakeup_source_report_event(ws);
774 
775 	if (!msec) {
776 		wakeup_source_deactivate(ws);
777 		goto unlock;
778 	}
779 
780 	expires = jiffies + msecs_to_jiffies(msec);
781 	if (!expires)
782 		expires = 1;
783 
784 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
785 		mod_timer(&ws->timer, expires);
786 		ws->timer_expires = expires;
787 	}
788 
789  unlock:
790 	spin_unlock_irqrestore(&ws->lock, flags);
791 }
792 EXPORT_SYMBOL_GPL(__pm_wakeup_event);
793 
794 
795 /**
796  * pm_wakeup_event - Notify the PM core of a wakeup event.
797  * @dev: Device the wakeup event is related to.
798  * @msec: Anticipated event processing time (in milliseconds).
799  *
800  * Call __pm_wakeup_event() for the @dev's wakeup source object.
801  */
802 void pm_wakeup_event(struct device *dev, unsigned int msec)
803 {
804 	unsigned long flags;
805 
806 	if (!dev)
807 		return;
808 
809 	spin_lock_irqsave(&dev->power.lock, flags);
810 	__pm_wakeup_event(dev->power.wakeup, msec);
811 	spin_unlock_irqrestore(&dev->power.lock, flags);
812 }
813 EXPORT_SYMBOL_GPL(pm_wakeup_event);
814 
815 void pm_print_active_wakeup_sources(void)
816 {
817 	struct wakeup_source *ws;
818 	int active = 0;
819 	struct wakeup_source *last_activity_ws = NULL;
820 
821 	rcu_read_lock();
822 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
823 		if (ws->active) {
824 			pr_info("active wakeup source: %s\n", ws->name);
825 			active = 1;
826 		} else if (!active &&
827 			   (!last_activity_ws ||
828 			    ktime_to_ns(ws->last_time) >
829 			    ktime_to_ns(last_activity_ws->last_time))) {
830 			last_activity_ws = ws;
831 		}
832 	}
833 
834 	if (!active && last_activity_ws)
835 		pr_info("last active wakeup source: %s\n",
836 			last_activity_ws->name);
837 	rcu_read_unlock();
838 }
839 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
840 
841 /**
842  * pm_wakeup_pending - Check if power transition in progress should be aborted.
843  *
844  * Compare the current number of registered wakeup events with its preserved
845  * value from the past and return true if new wakeup events have been registered
846  * since the old value was stored.  Also return true if the current number of
847  * wakeup events being processed is different from zero.
848  */
849 bool pm_wakeup_pending(void)
850 {
851 	unsigned long flags;
852 	bool ret = false;
853 
854 	spin_lock_irqsave(&events_lock, flags);
855 	if (events_check_enabled) {
856 		unsigned int cnt, inpr;
857 
858 		split_counters(&cnt, &inpr);
859 		ret = (cnt != saved_count || inpr > 0);
860 		events_check_enabled = !ret;
861 	}
862 	spin_unlock_irqrestore(&events_lock, flags);
863 
864 	if (ret) {
865 		pr_info("PM: Wakeup pending, aborting suspend\n");
866 		pm_print_active_wakeup_sources();
867 	}
868 
869 	return ret || pm_abort_suspend;
870 }
871 
872 void pm_system_wakeup(void)
873 {
874 	pm_abort_suspend = true;
875 	freeze_wake();
876 }
877 EXPORT_SYMBOL_GPL(pm_system_wakeup);
878 
879 void pm_wakeup_clear(void)
880 {
881 	pm_abort_suspend = false;
882 }
883 
884 /**
885  * pm_get_wakeup_count - Read the number of registered wakeup events.
886  * @count: Address to store the value at.
887  * @block: Whether or not to block.
888  *
889  * Store the number of registered wakeup events at the address in @count.  If
890  * @block is set, block until the current number of wakeup events being
891  * processed is zero.
892  *
893  * Return 'false' if the current number of wakeup events being processed is
894  * nonzero.  Otherwise return 'true'.
895  */
896 bool pm_get_wakeup_count(unsigned int *count, bool block)
897 {
898 	unsigned int cnt, inpr;
899 
900 	if (block) {
901 		DEFINE_WAIT(wait);
902 
903 		for (;;) {
904 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
905 					TASK_INTERRUPTIBLE);
906 			split_counters(&cnt, &inpr);
907 			if (inpr == 0 || signal_pending(current))
908 				break;
909 
910 			schedule();
911 		}
912 		finish_wait(&wakeup_count_wait_queue, &wait);
913 	}
914 
915 	split_counters(&cnt, &inpr);
916 	*count = cnt;
917 	return !inpr;
918 }
919 
920 /**
921  * pm_save_wakeup_count - Save the current number of registered wakeup events.
922  * @count: Value to compare with the current number of registered wakeup events.
923  *
924  * If @count is equal to the current number of registered wakeup events and the
925  * current number of wakeup events being processed is zero, store @count as the
926  * old number of registered wakeup events for pm_check_wakeup_events(), enable
927  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
928  * detection and return 'false'.
929  */
930 bool pm_save_wakeup_count(unsigned int count)
931 {
932 	unsigned int cnt, inpr;
933 	unsigned long flags;
934 
935 	events_check_enabled = false;
936 	spin_lock_irqsave(&events_lock, flags);
937 	split_counters(&cnt, &inpr);
938 	if (cnt == count && inpr == 0) {
939 		saved_count = count;
940 		events_check_enabled = true;
941 	}
942 	spin_unlock_irqrestore(&events_lock, flags);
943 	return events_check_enabled;
944 }
945 
946 #ifdef CONFIG_PM_AUTOSLEEP
947 /**
948  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
949  * @enabled: Whether to set or to clear the autosleep_enabled flags.
950  */
951 void pm_wakep_autosleep_enabled(bool set)
952 {
953 	struct wakeup_source *ws;
954 	ktime_t now = ktime_get();
955 
956 	rcu_read_lock();
957 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
958 		spin_lock_irq(&ws->lock);
959 		if (ws->autosleep_enabled != set) {
960 			ws->autosleep_enabled = set;
961 			if (ws->active) {
962 				if (set)
963 					ws->start_prevent_time = now;
964 				else
965 					update_prevent_sleep_time(ws, now);
966 			}
967 		}
968 		spin_unlock_irq(&ws->lock);
969 	}
970 	rcu_read_unlock();
971 }
972 #endif /* CONFIG_PM_AUTOSLEEP */
973 
974 static struct dentry *wakeup_sources_stats_dentry;
975 
976 /**
977  * print_wakeup_source_stats - Print wakeup source statistics information.
978  * @m: seq_file to print the statistics into.
979  * @ws: Wakeup source object to print the statistics for.
980  */
981 static int print_wakeup_source_stats(struct seq_file *m,
982 				     struct wakeup_source *ws)
983 {
984 	unsigned long flags;
985 	ktime_t total_time;
986 	ktime_t max_time;
987 	unsigned long active_count;
988 	ktime_t active_time;
989 	ktime_t prevent_sleep_time;
990 
991 	spin_lock_irqsave(&ws->lock, flags);
992 
993 	total_time = ws->total_time;
994 	max_time = ws->max_time;
995 	prevent_sleep_time = ws->prevent_sleep_time;
996 	active_count = ws->active_count;
997 	if (ws->active) {
998 		ktime_t now = ktime_get();
999 
1000 		active_time = ktime_sub(now, ws->last_time);
1001 		total_time = ktime_add(total_time, active_time);
1002 		if (active_time.tv64 > max_time.tv64)
1003 			max_time = active_time;
1004 
1005 		if (ws->autosleep_enabled)
1006 			prevent_sleep_time = ktime_add(prevent_sleep_time,
1007 				ktime_sub(now, ws->start_prevent_time));
1008 	} else {
1009 		active_time = ktime_set(0, 0);
1010 	}
1011 
1012 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1013 		   ws->name, active_count, ws->event_count,
1014 		   ws->wakeup_count, ws->expire_count,
1015 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1016 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1017 		   ktime_to_ms(prevent_sleep_time));
1018 
1019 	spin_unlock_irqrestore(&ws->lock, flags);
1020 
1021 	return 0;
1022 }
1023 
1024 /**
1025  * wakeup_sources_stats_show - Print wakeup sources statistics information.
1026  * @m: seq_file to print the statistics into.
1027  */
1028 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
1029 {
1030 	struct wakeup_source *ws;
1031 
1032 	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1033 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
1034 		"last_change\tprevent_suspend_time\n");
1035 
1036 	rcu_read_lock();
1037 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
1038 		print_wakeup_source_stats(m, ws);
1039 	rcu_read_unlock();
1040 
1041 	print_wakeup_source_stats(m, &deleted_ws);
1042 
1043 	return 0;
1044 }
1045 
1046 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1047 {
1048 	return single_open(file, wakeup_sources_stats_show, NULL);
1049 }
1050 
1051 static const struct file_operations wakeup_sources_stats_fops = {
1052 	.owner = THIS_MODULE,
1053 	.open = wakeup_sources_stats_open,
1054 	.read = seq_read,
1055 	.llseek = seq_lseek,
1056 	.release = single_release,
1057 };
1058 
1059 static int __init wakeup_sources_debugfs_init(void)
1060 {
1061 	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
1062 			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
1063 	return 0;
1064 }
1065 
1066 postcore_initcall(wakeup_sources_debugfs_init);
1067