xref: /linux/drivers/base/power/wakeup.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/wakeup.c - System wakeup events framework
4  *
5  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <trace/events/power.h>
19 
20 #include "power.h"
21 
22 #define list_for_each_entry_rcu_locked(pos, head, member) \
23 	list_for_each_entry_rcu(pos, head, member, \
24 		srcu_read_lock_held(&wakeup_srcu))
25 /*
26  * If set, the suspend/hibernate code will abort transitions to a sleep state
27  * if wakeup events are registered during or immediately before the transition.
28  */
29 bool events_check_enabled __read_mostly;
30 
31 /* First wakeup IRQ seen by the kernel in the last cycle. */
32 static unsigned int wakeup_irq[2] __read_mostly;
33 static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
34 
35 /* If greater than 0 and the system is suspending, terminate the suspend. */
36 static atomic_t pm_abort_suspend __read_mostly;
37 
38 /*
39  * Combined counters of registered wakeup events and wakeup events in progress.
40  * They need to be modified together atomically, so it's better to use one
41  * atomic variable to hold them both.
42  */
43 static atomic_t combined_event_count = ATOMIC_INIT(0);
44 
45 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
46 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
47 
48 static void split_counters(unsigned int *cnt, unsigned int *inpr)
49 {
50 	unsigned int comb = atomic_read(&combined_event_count);
51 
52 	*cnt = (comb >> IN_PROGRESS_BITS);
53 	*inpr = comb & MAX_IN_PROGRESS;
54 }
55 
56 /* A preserved old value of the events counter. */
57 static unsigned int saved_count;
58 
59 static DEFINE_RAW_SPINLOCK(events_lock);
60 
61 static void pm_wakeup_timer_fn(struct timer_list *t);
62 
63 static LIST_HEAD(wakeup_sources);
64 
65 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
66 
67 DEFINE_STATIC_SRCU(wakeup_srcu);
68 
69 static struct wakeup_source deleted_ws = {
70 	.name = "deleted",
71 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
72 };
73 
74 static DEFINE_IDA(wakeup_ida);
75 
76 /**
77  * wakeup_source_create - Create a struct wakeup_source object.
78  * @name: Name of the new wakeup source.
79  */
80 static struct wakeup_source *wakeup_source_create(const char *name)
81 {
82 	struct wakeup_source *ws;
83 	const char *ws_name;
84 	int id;
85 
86 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
87 	if (!ws)
88 		goto err_ws;
89 
90 	ws_name = kstrdup_const(name, GFP_KERNEL);
91 	if (!ws_name)
92 		goto err_name;
93 	ws->name = ws_name;
94 
95 	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
96 	if (id < 0)
97 		goto err_id;
98 	ws->id = id;
99 
100 	return ws;
101 
102 err_id:
103 	kfree_const(ws->name);
104 err_name:
105 	kfree(ws);
106 err_ws:
107 	return NULL;
108 }
109 
110 /*
111  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
112  */
113 static void wakeup_source_record(struct wakeup_source *ws)
114 {
115 	unsigned long flags;
116 
117 	spin_lock_irqsave(&deleted_ws.lock, flags);
118 
119 	if (ws->event_count) {
120 		deleted_ws.total_time =
121 			ktime_add(deleted_ws.total_time, ws->total_time);
122 		deleted_ws.prevent_sleep_time =
123 			ktime_add(deleted_ws.prevent_sleep_time,
124 				  ws->prevent_sleep_time);
125 		deleted_ws.max_time =
126 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
127 				deleted_ws.max_time : ws->max_time;
128 		deleted_ws.event_count += ws->event_count;
129 		deleted_ws.active_count += ws->active_count;
130 		deleted_ws.relax_count += ws->relax_count;
131 		deleted_ws.expire_count += ws->expire_count;
132 		deleted_ws.wakeup_count += ws->wakeup_count;
133 	}
134 
135 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
136 }
137 
138 static void wakeup_source_free(struct wakeup_source *ws)
139 {
140 	ida_free(&wakeup_ida, ws->id);
141 	kfree_const(ws->name);
142 	kfree(ws);
143 }
144 
145 /**
146  * wakeup_source_destroy - Destroy a struct wakeup_source object.
147  * @ws: Wakeup source to destroy.
148  *
149  * Use only for wakeup source objects created with wakeup_source_create().
150  */
151 static void wakeup_source_destroy(struct wakeup_source *ws)
152 {
153 	if (!ws)
154 		return;
155 
156 	__pm_relax(ws);
157 	wakeup_source_record(ws);
158 	wakeup_source_free(ws);
159 }
160 
161 /**
162  * wakeup_source_add - Add given object to the list of wakeup sources.
163  * @ws: Wakeup source object to add to the list.
164  */
165 static void wakeup_source_add(struct wakeup_source *ws)
166 {
167 	unsigned long flags;
168 
169 	if (WARN_ON(!ws))
170 		return;
171 
172 	spin_lock_init(&ws->lock);
173 	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
174 	ws->active = false;
175 
176 	raw_spin_lock_irqsave(&events_lock, flags);
177 	list_add_rcu(&ws->entry, &wakeup_sources);
178 	raw_spin_unlock_irqrestore(&events_lock, flags);
179 }
180 
181 /**
182  * wakeup_source_remove - Remove given object from the wakeup sources list.
183  * @ws: Wakeup source object to remove from the list.
184  */
185 static void wakeup_source_remove(struct wakeup_source *ws)
186 {
187 	unsigned long flags;
188 
189 	if (WARN_ON(!ws))
190 		return;
191 
192 	/*
193 	 * After shutting down the timer, wakeup_source_activate() will warn if
194 	 * the given wakeup source is passed to it.
195 	 */
196 	timer_shutdown_sync(&ws->timer);
197 
198 	raw_spin_lock_irqsave(&events_lock, flags);
199 	list_del_rcu(&ws->entry);
200 	raw_spin_unlock_irqrestore(&events_lock, flags);
201 	synchronize_srcu(&wakeup_srcu);
202 }
203 
204 /**
205  * wakeup_source_register - Create wakeup source and add it to the list.
206  * @dev: Device this wakeup source is associated with (or NULL if virtual).
207  * @name: Name of the wakeup source to register.
208  */
209 struct wakeup_source *wakeup_source_register(struct device *dev,
210 					     const char *name)
211 {
212 	struct wakeup_source *ws;
213 	int ret;
214 
215 	ws = wakeup_source_create(name);
216 	if (ws) {
217 		if (!dev || device_is_registered(dev)) {
218 			ret = wakeup_source_sysfs_add(dev, ws);
219 			if (ret) {
220 				wakeup_source_free(ws);
221 				return NULL;
222 			}
223 		}
224 		wakeup_source_add(ws);
225 	}
226 	return ws;
227 }
228 EXPORT_SYMBOL_GPL(wakeup_source_register);
229 
230 /**
231  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
232  * @ws: Wakeup source object to unregister.
233  */
234 void wakeup_source_unregister(struct wakeup_source *ws)
235 {
236 	if (ws) {
237 		wakeup_source_remove(ws);
238 		if (ws->dev)
239 			wakeup_source_sysfs_remove(ws);
240 
241 		wakeup_source_destroy(ws);
242 	}
243 }
244 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
245 
246 /**
247  * wakeup_sources_read_lock - Lock wakeup source list for read.
248  *
249  * Returns an index of srcu lock for struct wakeup_srcu.
250  * This index must be passed to the matching wakeup_sources_read_unlock().
251  */
252 int wakeup_sources_read_lock(void)
253 {
254 	return srcu_read_lock(&wakeup_srcu);
255 }
256 EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
257 
258 /**
259  * wakeup_sources_read_unlock - Unlock wakeup source list.
260  * @idx: return value from corresponding wakeup_sources_read_lock()
261  */
262 void wakeup_sources_read_unlock(int idx)
263 {
264 	srcu_read_unlock(&wakeup_srcu, idx);
265 }
266 EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
267 
268 /**
269  * wakeup_sources_walk_start - Begin a walk on wakeup source list
270  *
271  * Returns first object of the list of wakeup sources.
272  *
273  * Note that to be safe, wakeup sources list needs to be locked by calling
274  * wakeup_source_read_lock() for this.
275  */
276 struct wakeup_source *wakeup_sources_walk_start(void)
277 {
278 	struct list_head *ws_head = &wakeup_sources;
279 
280 	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
281 }
282 EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
283 
284 /**
285  * wakeup_sources_walk_next - Get next wakeup source from the list
286  * @ws: Previous wakeup source object
287  *
288  * Note that to be safe, wakeup sources list needs to be locked by calling
289  * wakeup_source_read_lock() for this.
290  */
291 struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
292 {
293 	struct list_head *ws_head = &wakeup_sources;
294 
295 	return list_next_or_null_rcu(ws_head, &ws->entry,
296 				struct wakeup_source, entry);
297 }
298 EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
299 
300 /**
301  * device_wakeup_attach - Attach a wakeup source object to a device object.
302  * @dev: Device to handle.
303  * @ws: Wakeup source object to attach to @dev.
304  *
305  * This causes @dev to be treated as a wakeup device.
306  */
307 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
308 {
309 	spin_lock_irq(&dev->power.lock);
310 	if (dev->power.wakeup) {
311 		spin_unlock_irq(&dev->power.lock);
312 		return -EEXIST;
313 	}
314 	dev->power.wakeup = ws;
315 	if (dev->power.wakeirq)
316 		device_wakeup_attach_irq(dev, dev->power.wakeirq);
317 	spin_unlock_irq(&dev->power.lock);
318 	return 0;
319 }
320 
321 /**
322  * device_wakeup_enable - Enable given device to be a wakeup source.
323  * @dev: Device to handle.
324  *
325  * Create a wakeup source object, register it and attach it to @dev.
326  */
327 int device_wakeup_enable(struct device *dev)
328 {
329 	struct wakeup_source *ws;
330 	int ret;
331 
332 	if (!dev || !dev->power.can_wakeup)
333 		return -EINVAL;
334 
335 	if (pm_sleep_transition_in_progress())
336 		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
337 
338 	ws = wakeup_source_register(dev, dev_name(dev));
339 	if (!ws)
340 		return -ENOMEM;
341 
342 	ret = device_wakeup_attach(dev, ws);
343 	if (ret)
344 		wakeup_source_unregister(ws);
345 
346 	return ret;
347 }
348 EXPORT_SYMBOL_GPL(device_wakeup_enable);
349 
350 /**
351  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
352  * @dev: Device to handle
353  * @wakeirq: Device specific wakeirq entry
354  *
355  * Attach a device wakeirq to the wakeup source so the device
356  * wake IRQ can be configured automatically for suspend and
357  * resume.
358  *
359  * Call under the device's power.lock lock.
360  */
361 void device_wakeup_attach_irq(struct device *dev,
362 			     struct wake_irq *wakeirq)
363 {
364 	struct wakeup_source *ws;
365 
366 	ws = dev->power.wakeup;
367 	if (!ws)
368 		return;
369 
370 	if (ws->wakeirq)
371 		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
372 
373 	ws->wakeirq = wakeirq;
374 }
375 
376 /**
377  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
378  * @dev: Device to handle
379  *
380  * Removes a device wakeirq from the wakeup source.
381  *
382  * Call under the device's power.lock lock.
383  */
384 void device_wakeup_detach_irq(struct device *dev)
385 {
386 	struct wakeup_source *ws;
387 
388 	ws = dev->power.wakeup;
389 	if (ws)
390 		ws->wakeirq = NULL;
391 }
392 
393 /**
394  * device_wakeup_arm_wake_irqs -
395  *
396  * Iterates over the list of device wakeirqs to arm them.
397  */
398 void device_wakeup_arm_wake_irqs(void)
399 {
400 	struct wakeup_source *ws;
401 	int srcuidx;
402 
403 	srcuidx = srcu_read_lock(&wakeup_srcu);
404 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
405 		dev_pm_arm_wake_irq(ws->wakeirq);
406 	srcu_read_unlock(&wakeup_srcu, srcuidx);
407 }
408 
409 /**
410  * device_wakeup_disarm_wake_irqs -
411  *
412  * Iterates over the list of device wakeirqs to disarm them.
413  */
414 void device_wakeup_disarm_wake_irqs(void)
415 {
416 	struct wakeup_source *ws;
417 	int srcuidx;
418 
419 	srcuidx = srcu_read_lock(&wakeup_srcu);
420 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
421 		dev_pm_disarm_wake_irq(ws->wakeirq);
422 	srcu_read_unlock(&wakeup_srcu, srcuidx);
423 }
424 
425 /**
426  * device_wakeup_detach - Detach a device's wakeup source object from it.
427  * @dev: Device to detach the wakeup source object from.
428  *
429  * After it returns, @dev will not be treated as a wakeup device any more.
430  */
431 static struct wakeup_source *device_wakeup_detach(struct device *dev)
432 {
433 	struct wakeup_source *ws;
434 
435 	spin_lock_irq(&dev->power.lock);
436 	ws = dev->power.wakeup;
437 	dev->power.wakeup = NULL;
438 	spin_unlock_irq(&dev->power.lock);
439 	return ws;
440 }
441 
442 /**
443  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
444  * @dev: Device to handle.
445  *
446  * Detach the @dev's wakeup source object from it, unregister this wakeup source
447  * object and destroy it.
448  */
449 void device_wakeup_disable(struct device *dev)
450 {
451 	struct wakeup_source *ws;
452 
453 	if (!dev || !dev->power.can_wakeup)
454 		return;
455 
456 	ws = device_wakeup_detach(dev);
457 	wakeup_source_unregister(ws);
458 }
459 EXPORT_SYMBOL_GPL(device_wakeup_disable);
460 
461 /**
462  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
463  * @dev: Device to handle.
464  * @capable: Whether or not @dev is capable of waking up the system from sleep.
465  *
466  * If @capable is set, set the @dev's power.can_wakeup flag and add its
467  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
468  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
469  *
470  * This function may sleep and it can't be called from any context where
471  * sleeping is not allowed.
472  */
473 void device_set_wakeup_capable(struct device *dev, bool capable)
474 {
475 	if (!!dev->power.can_wakeup == !!capable)
476 		return;
477 
478 	dev->power.can_wakeup = capable;
479 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
480 		if (capable) {
481 			int ret = wakeup_sysfs_add(dev);
482 
483 			if (ret)
484 				dev_info(dev, "Wakeup sysfs attributes not added\n");
485 		} else {
486 			wakeup_sysfs_remove(dev);
487 		}
488 	}
489 }
490 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
491 
492 /**
493  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
494  * @dev: Device to handle.
495  * @enable: enable/disable flag
496  */
497 int device_set_wakeup_enable(struct device *dev, bool enable)
498 {
499 	if (enable)
500 		return device_wakeup_enable(dev);
501 
502 	device_wakeup_disable(dev);
503 	return 0;
504 }
505 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
506 
507 /**
508  * wakeup_source_not_usable - validate the given wakeup source.
509  * @ws: Wakeup source to be validated.
510  */
511 static bool wakeup_source_not_usable(struct wakeup_source *ws)
512 {
513 	/*
514 	 * Use the timer struct to check if the given wakeup source has been
515 	 * initialized by wakeup_source_add() and it is not going away.
516 	 */
517 	return ws->timer.function != pm_wakeup_timer_fn;
518 }
519 
520 /*
521  * The functions below use the observation that each wakeup event starts a
522  * period in which the system should not be suspended.  The moment this period
523  * will end depends on how the wakeup event is going to be processed after being
524  * detected and all of the possible cases can be divided into two distinct
525  * groups.
526  *
527  * First, a wakeup event may be detected by the same functional unit that will
528  * carry out the entire processing of it and possibly will pass it to user space
529  * for further processing.  In that case the functional unit that has detected
530  * the event may later "close" the "no suspend" period associated with it
531  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
532  * pm_relax(), balanced with each other, is supposed to be used in such
533  * situations.
534  *
535  * Second, a wakeup event may be detected by one functional unit and processed
536  * by another one.  In that case the unit that has detected it cannot really
537  * "close" the "no suspend" period associated with it, unless it knows in
538  * advance what's going to happen to the event during processing.  This
539  * knowledge, however, may not be available to it, so it can simply specify time
540  * to wait before the system can be suspended and pass it as the second
541  * argument of pm_wakeup_event().
542  *
543  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
544  * "no suspend" period will be ended either by the pm_relax(), or by the timer
545  * function executed when the timer expires, whichever comes first.
546  */
547 
548 /**
549  * wakeup_source_activate - Mark given wakeup source as active.
550  * @ws: Wakeup source to handle.
551  *
552  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
553  * core of the event by incrementing the counter of the wakeup events being
554  * processed.
555  */
556 static void wakeup_source_activate(struct wakeup_source *ws)
557 {
558 	unsigned int cec;
559 
560 	if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n"))
561 		return;
562 
563 	ws->active = true;
564 	ws->active_count++;
565 	ws->last_time = ktime_get();
566 	if (ws->autosleep_enabled)
567 		ws->start_prevent_time = ws->last_time;
568 
569 	/* Increment the counter of events in progress. */
570 	cec = atomic_inc_return(&combined_event_count);
571 
572 	trace_wakeup_source_activate(ws->name, cec);
573 }
574 
575 /**
576  * wakeup_source_report_event - Report wakeup event using the given source.
577  * @ws: Wakeup source to report the event for.
578  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
579  */
580 static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
581 {
582 	ws->event_count++;
583 	/* This is racy, but the counter is approximate anyway. */
584 	if (events_check_enabled)
585 		ws->wakeup_count++;
586 
587 	if (!ws->active)
588 		wakeup_source_activate(ws);
589 
590 	if (hard)
591 		pm_system_wakeup();
592 }
593 
594 /**
595  * __pm_stay_awake - Notify the PM core of a wakeup event.
596  * @ws: Wakeup source object associated with the source of the event.
597  *
598  * It is safe to call this function from interrupt context.
599  */
600 void __pm_stay_awake(struct wakeup_source *ws)
601 {
602 	unsigned long flags;
603 
604 	if (!ws)
605 		return;
606 
607 	spin_lock_irqsave(&ws->lock, flags);
608 
609 	wakeup_source_report_event(ws, false);
610 	timer_delete(&ws->timer);
611 	ws->timer_expires = 0;
612 
613 	spin_unlock_irqrestore(&ws->lock, flags);
614 }
615 EXPORT_SYMBOL_GPL(__pm_stay_awake);
616 
617 /**
618  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
619  * @dev: Device the wakeup event is related to.
620  *
621  * Notify the PM core of a wakeup event (signaled by @dev) by calling
622  * __pm_stay_awake for the @dev's wakeup source object.
623  *
624  * Call this function after detecting of a wakeup event if pm_relax() is going
625  * to be called directly after processing the event (and possibly passing it to
626  * user space for further processing).
627  */
628 void pm_stay_awake(struct device *dev)
629 {
630 	unsigned long flags;
631 
632 	if (!dev)
633 		return;
634 
635 	spin_lock_irqsave(&dev->power.lock, flags);
636 	__pm_stay_awake(dev->power.wakeup);
637 	spin_unlock_irqrestore(&dev->power.lock, flags);
638 }
639 EXPORT_SYMBOL_GPL(pm_stay_awake);
640 
641 #ifdef CONFIG_PM_AUTOSLEEP
642 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
643 {
644 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
645 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
646 }
647 #else
648 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
649 					     ktime_t now) {}
650 #endif
651 
652 /**
653  * wakeup_source_deactivate - Mark given wakeup source as inactive.
654  * @ws: Wakeup source to handle.
655  *
656  * Update the @ws' statistics and notify the PM core that the wakeup source has
657  * become inactive by decrementing the counter of wakeup events being processed
658  * and incrementing the counter of registered wakeup events.
659  */
660 static void wakeup_source_deactivate(struct wakeup_source *ws)
661 {
662 	unsigned int cnt, inpr, cec;
663 	ktime_t duration;
664 	ktime_t now;
665 
666 	ws->relax_count++;
667 	/*
668 	 * __pm_relax() may be called directly or from a timer function.
669 	 * If it is called directly right after the timer function has been
670 	 * started, but before the timer function calls __pm_relax(), it is
671 	 * possible that __pm_stay_awake() will be called in the meantime and
672 	 * will set ws->active.  Then, ws->active may be cleared immediately
673 	 * by the __pm_relax() called from the timer function, but in such a
674 	 * case ws->relax_count will be different from ws->active_count.
675 	 */
676 	if (ws->relax_count != ws->active_count) {
677 		ws->relax_count--;
678 		return;
679 	}
680 
681 	ws->active = false;
682 
683 	now = ktime_get();
684 	duration = ktime_sub(now, ws->last_time);
685 	ws->total_time = ktime_add(ws->total_time, duration);
686 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
687 		ws->max_time = duration;
688 
689 	ws->last_time = now;
690 	timer_delete(&ws->timer);
691 	ws->timer_expires = 0;
692 
693 	if (ws->autosleep_enabled)
694 		update_prevent_sleep_time(ws, now);
695 
696 	/*
697 	 * Increment the counter of registered wakeup events and decrement the
698 	 * counter of wakeup events in progress simultaneously.
699 	 */
700 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
701 	trace_wakeup_source_deactivate(ws->name, cec);
702 
703 	split_counters(&cnt, &inpr);
704 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
705 		wake_up(&wakeup_count_wait_queue);
706 }
707 
708 /**
709  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
710  * @ws: Wakeup source object associated with the source of the event.
711  *
712  * Call this function for wakeup events whose processing started with calling
713  * __pm_stay_awake().
714  *
715  * It is safe to call it from interrupt context.
716  */
717 void __pm_relax(struct wakeup_source *ws)
718 {
719 	unsigned long flags;
720 
721 	if (!ws)
722 		return;
723 
724 	spin_lock_irqsave(&ws->lock, flags);
725 	if (ws->active)
726 		wakeup_source_deactivate(ws);
727 	spin_unlock_irqrestore(&ws->lock, flags);
728 }
729 EXPORT_SYMBOL_GPL(__pm_relax);
730 
731 /**
732  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
733  * @dev: Device that signaled the event.
734  *
735  * Execute __pm_relax() for the @dev's wakeup source object.
736  */
737 void pm_relax(struct device *dev)
738 {
739 	unsigned long flags;
740 
741 	if (!dev)
742 		return;
743 
744 	spin_lock_irqsave(&dev->power.lock, flags);
745 	__pm_relax(dev->power.wakeup);
746 	spin_unlock_irqrestore(&dev->power.lock, flags);
747 }
748 EXPORT_SYMBOL_GPL(pm_relax);
749 
750 /**
751  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
752  * @t: timer list
753  *
754  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
755  * in @data if it is currently active and its timer has not been canceled and
756  * the expiration time of the timer is not in future.
757  */
758 static void pm_wakeup_timer_fn(struct timer_list *t)
759 {
760 	struct wakeup_source *ws = timer_container_of(ws, t, timer);
761 	unsigned long flags;
762 
763 	spin_lock_irqsave(&ws->lock, flags);
764 
765 	if (ws->active && ws->timer_expires
766 	    && time_after_eq(jiffies, ws->timer_expires)) {
767 		wakeup_source_deactivate(ws);
768 		ws->expire_count++;
769 	}
770 
771 	spin_unlock_irqrestore(&ws->lock, flags);
772 }
773 
774 /**
775  * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
776  * @ws: Wakeup source object associated with the event source.
777  * @msec: Anticipated event processing time (in milliseconds).
778  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
779  *
780  * Notify the PM core of a wakeup event whose source is @ws that will take
781  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
782  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
783  * execute pm_wakeup_timer_fn() in future.
784  *
785  * It is safe to call this function from interrupt context.
786  */
787 void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
788 {
789 	unsigned long flags;
790 	unsigned long expires;
791 
792 	if (!ws)
793 		return;
794 
795 	spin_lock_irqsave(&ws->lock, flags);
796 
797 	wakeup_source_report_event(ws, hard);
798 
799 	if (!msec) {
800 		wakeup_source_deactivate(ws);
801 		goto unlock;
802 	}
803 
804 	expires = jiffies + msecs_to_jiffies(msec);
805 	if (!expires)
806 		expires = 1;
807 
808 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
809 		mod_timer(&ws->timer, expires);
810 		ws->timer_expires = expires;
811 	}
812 
813  unlock:
814 	spin_unlock_irqrestore(&ws->lock, flags);
815 }
816 EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
817 
818 /**
819  * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
820  * @dev: Device the wakeup event is related to.
821  * @msec: Anticipated event processing time (in milliseconds).
822  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
823  *
824  * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
825  */
826 void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
827 {
828 	unsigned long flags;
829 
830 	if (!dev)
831 		return;
832 
833 	spin_lock_irqsave(&dev->power.lock, flags);
834 	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
835 	spin_unlock_irqrestore(&dev->power.lock, flags);
836 }
837 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
838 
839 void pm_print_active_wakeup_sources(void)
840 {
841 	struct wakeup_source *ws;
842 	int srcuidx, active = 0;
843 	struct wakeup_source *last_activity_ws = NULL;
844 
845 	srcuidx = srcu_read_lock(&wakeup_srcu);
846 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
847 		if (ws->active) {
848 			pm_pr_dbg("active wakeup source: %s\n", ws->name);
849 			active = 1;
850 		} else if (!active &&
851 			   (!last_activity_ws ||
852 			    ktime_to_ns(ws->last_time) >
853 			    ktime_to_ns(last_activity_ws->last_time))) {
854 			last_activity_ws = ws;
855 		}
856 	}
857 
858 	if (!active && last_activity_ws)
859 		pm_pr_dbg("last active wakeup source: %s\n",
860 			last_activity_ws->name);
861 	srcu_read_unlock(&wakeup_srcu, srcuidx);
862 }
863 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
864 
865 /**
866  * pm_wakeup_pending - Check if power transition in progress should be aborted.
867  *
868  * Compare the current number of registered wakeup events with its preserved
869  * value from the past and return true if new wakeup events have been registered
870  * since the old value was stored.  Also return true if the current number of
871  * wakeup events being processed is different from zero.
872  */
873 bool pm_wakeup_pending(void)
874 {
875 	unsigned long flags;
876 	bool ret = false;
877 
878 	raw_spin_lock_irqsave(&events_lock, flags);
879 	if (events_check_enabled) {
880 		unsigned int cnt, inpr;
881 
882 		split_counters(&cnt, &inpr);
883 		ret = (cnt != saved_count || inpr > 0);
884 		events_check_enabled = !ret;
885 	}
886 	raw_spin_unlock_irqrestore(&events_lock, flags);
887 
888 	if (ret) {
889 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
890 		pm_print_active_wakeup_sources();
891 	}
892 
893 	return ret || atomic_read(&pm_abort_suspend) > 0;
894 }
895 EXPORT_SYMBOL_GPL(pm_wakeup_pending);
896 
897 void pm_system_wakeup(void)
898 {
899 	atomic_inc(&pm_abort_suspend);
900 	s2idle_wake();
901 }
902 EXPORT_SYMBOL_GPL(pm_system_wakeup);
903 
904 void pm_system_cancel_wakeup(void)
905 {
906 	atomic_dec_if_positive(&pm_abort_suspend);
907 }
908 
909 void pm_wakeup_clear(unsigned int irq_number)
910 {
911 	raw_spin_lock_irq(&wakeup_irq_lock);
912 
913 	if (irq_number && wakeup_irq[0] == irq_number)
914 		wakeup_irq[0] = wakeup_irq[1];
915 	else
916 		wakeup_irq[0] = 0;
917 
918 	wakeup_irq[1] = 0;
919 
920 	raw_spin_unlock_irq(&wakeup_irq_lock);
921 
922 	if (!irq_number)
923 		atomic_set(&pm_abort_suspend, 0);
924 }
925 
926 void pm_system_irq_wakeup(unsigned int irq_number)
927 {
928 	unsigned long flags;
929 
930 	raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
931 
932 	if (wakeup_irq[0] == 0)
933 		wakeup_irq[0] = irq_number;
934 	else if (wakeup_irq[1] == 0)
935 		wakeup_irq[1] = irq_number;
936 	else
937 		irq_number = 0;
938 
939 	pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
940 
941 	raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
942 
943 	if (irq_number)
944 		pm_system_wakeup();
945 }
946 
947 unsigned int pm_wakeup_irq(void)
948 {
949 	return wakeup_irq[0];
950 }
951 
952 /**
953  * pm_get_wakeup_count - Read the number of registered wakeup events.
954  * @count: Address to store the value at.
955  * @block: Whether or not to block.
956  *
957  * Store the number of registered wakeup events at the address in @count.  If
958  * @block is set, block until the current number of wakeup events being
959  * processed is zero.
960  *
961  * Return 'false' if the current number of wakeup events being processed is
962  * nonzero.  Otherwise return 'true'.
963  */
964 bool pm_get_wakeup_count(unsigned int *count, bool block)
965 {
966 	unsigned int cnt, inpr;
967 
968 	if (block) {
969 		DEFINE_WAIT(wait);
970 
971 		for (;;) {
972 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
973 					TASK_INTERRUPTIBLE);
974 			split_counters(&cnt, &inpr);
975 			if (inpr == 0 || signal_pending(current))
976 				break;
977 			pm_print_active_wakeup_sources();
978 			schedule();
979 		}
980 		finish_wait(&wakeup_count_wait_queue, &wait);
981 	}
982 
983 	split_counters(&cnt, &inpr);
984 	*count = cnt;
985 	return !inpr;
986 }
987 
988 /**
989  * pm_save_wakeup_count - Save the current number of registered wakeup events.
990  * @count: Value to compare with the current number of registered wakeup events.
991  *
992  * If @count is equal to the current number of registered wakeup events and the
993  * current number of wakeup events being processed is zero, store @count as the
994  * old number of registered wakeup events for pm_check_wakeup_events(), enable
995  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
996  * detection and return 'false'.
997  */
998 bool pm_save_wakeup_count(unsigned int count)
999 {
1000 	unsigned int cnt, inpr;
1001 	unsigned long flags;
1002 
1003 	events_check_enabled = false;
1004 	raw_spin_lock_irqsave(&events_lock, flags);
1005 	split_counters(&cnt, &inpr);
1006 	if (cnt == count && inpr == 0) {
1007 		saved_count = count;
1008 		events_check_enabled = true;
1009 	}
1010 	raw_spin_unlock_irqrestore(&events_lock, flags);
1011 	return events_check_enabled;
1012 }
1013 
1014 #ifdef CONFIG_PM_AUTOSLEEP
1015 /**
1016  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1017  * @set: Whether to set or to clear the autosleep_enabled flags.
1018  */
1019 void pm_wakep_autosleep_enabled(bool set)
1020 {
1021 	struct wakeup_source *ws;
1022 	ktime_t now = ktime_get();
1023 	int srcuidx;
1024 
1025 	srcuidx = srcu_read_lock(&wakeup_srcu);
1026 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1027 		spin_lock_irq(&ws->lock);
1028 		if (ws->autosleep_enabled != set) {
1029 			ws->autosleep_enabled = set;
1030 			if (ws->active) {
1031 				if (set)
1032 					ws->start_prevent_time = now;
1033 				else
1034 					update_prevent_sleep_time(ws, now);
1035 			}
1036 		}
1037 		spin_unlock_irq(&ws->lock);
1038 	}
1039 	srcu_read_unlock(&wakeup_srcu, srcuidx);
1040 }
1041 #endif /* CONFIG_PM_AUTOSLEEP */
1042 
1043 /**
1044  * print_wakeup_source_stats - Print wakeup source statistics information.
1045  * @m: seq_file to print the statistics into.
1046  * @ws: Wakeup source object to print the statistics for.
1047  */
1048 static int print_wakeup_source_stats(struct seq_file *m,
1049 				     struct wakeup_source *ws)
1050 {
1051 	unsigned long flags;
1052 	ktime_t total_time;
1053 	ktime_t max_time;
1054 	unsigned long active_count;
1055 	ktime_t active_time;
1056 	ktime_t prevent_sleep_time;
1057 
1058 	spin_lock_irqsave(&ws->lock, flags);
1059 
1060 	total_time = ws->total_time;
1061 	max_time = ws->max_time;
1062 	prevent_sleep_time = ws->prevent_sleep_time;
1063 	active_count = ws->active_count;
1064 	if (ws->active) {
1065 		ktime_t now = ktime_get();
1066 
1067 		active_time = ktime_sub(now, ws->last_time);
1068 		total_time = ktime_add(total_time, active_time);
1069 		if (active_time > max_time)
1070 			max_time = active_time;
1071 
1072 		if (ws->autosleep_enabled)
1073 			prevent_sleep_time = ktime_add(prevent_sleep_time,
1074 				ktime_sub(now, ws->start_prevent_time));
1075 	} else {
1076 		active_time = 0;
1077 	}
1078 
1079 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1080 		   ws->name, active_count, ws->event_count,
1081 		   ws->wakeup_count, ws->expire_count,
1082 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1083 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1084 		   ktime_to_ms(prevent_sleep_time));
1085 
1086 	spin_unlock_irqrestore(&ws->lock, flags);
1087 
1088 	return 0;
1089 }
1090 
1091 static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1092 					loff_t *pos)
1093 {
1094 	struct wakeup_source *ws;
1095 	loff_t n = *pos;
1096 	int *srcuidx = m->private;
1097 
1098 	if (n == 0) {
1099 		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1100 			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1101 			"last_change\tprevent_suspend_time\n");
1102 	}
1103 
1104 	*srcuidx = srcu_read_lock(&wakeup_srcu);
1105 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1106 		if (n-- <= 0)
1107 			return ws;
1108 	}
1109 
1110 	return NULL;
1111 }
1112 
1113 static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1114 					void *v, loff_t *pos)
1115 {
1116 	struct wakeup_source *ws = v;
1117 	struct wakeup_source *next_ws = NULL;
1118 
1119 	++(*pos);
1120 
1121 	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1122 		next_ws = ws;
1123 		break;
1124 	}
1125 
1126 	if (!next_ws)
1127 		print_wakeup_source_stats(m, &deleted_ws);
1128 
1129 	return next_ws;
1130 }
1131 
1132 static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1133 {
1134 	int *srcuidx = m->private;
1135 
1136 	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1137 }
1138 
1139 /**
1140  * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1141  * @m: seq_file to print the statistics into.
1142  * @v: wakeup_source of each iteration
1143  */
1144 static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1145 {
1146 	struct wakeup_source *ws = v;
1147 
1148 	print_wakeup_source_stats(m, ws);
1149 
1150 	return 0;
1151 }
1152 
1153 static const struct seq_operations wakeup_sources_stats_seq_ops = {
1154 	.start = wakeup_sources_stats_seq_start,
1155 	.next  = wakeup_sources_stats_seq_next,
1156 	.stop  = wakeup_sources_stats_seq_stop,
1157 	.show  = wakeup_sources_stats_seq_show,
1158 };
1159 
1160 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1161 {
1162 	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1163 }
1164 
1165 static const struct file_operations wakeup_sources_stats_fops = {
1166 	.owner = THIS_MODULE,
1167 	.open = wakeup_sources_stats_open,
1168 	.read = seq_read,
1169 	.llseek = seq_lseek,
1170 	.release = seq_release_private,
1171 };
1172 
1173 static int __init wakeup_sources_debugfs_init(void)
1174 {
1175 	debugfs_create_file("wakeup_sources", 0444, NULL, NULL,
1176 			    &wakeup_sources_stats_fops);
1177 	return 0;
1178 }
1179 
1180 postcore_initcall(wakeup_sources_debugfs_init);
1181