xref: /linux/kernel/power/wakelock.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * kernel/power/wakelock.c
4  *
5  * User space wakeup sources support.
6  *
7  * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This code is based on the analogous interface allowing user space to
10  * manipulate wakelocks on Android.
11  */
12 
13 #include <linux/capability.h>
14 #include <linux/ctype.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/hrtimer.h>
18 #include <linux/list.h>
19 #include <linux/rbtree.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22 
23 #include "power.h"
24 
25 static DEFINE_MUTEX(wakelocks_lock);
26 
27 struct wakelock {
28 	char			*name;
29 	struct rb_node		node;
30 	struct wakeup_source	*ws;
31 #ifdef CONFIG_PM_WAKELOCKS_GC
32 	struct list_head	lru;
33 #endif
34 };
35 
36 static struct rb_root wakelocks_tree = RB_ROOT;
37 
pm_show_wakelocks(char * buf,bool show_active)38 ssize_t pm_show_wakelocks(char *buf, bool show_active)
39 {
40 	struct rb_node *node;
41 	struct wakelock *wl;
42 	int len = 0;
43 
44 	mutex_lock(&wakelocks_lock);
45 
46 	for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
47 		wl = rb_entry(node, struct wakelock, node);
48 		if (wl->ws->active == show_active)
49 			len += sysfs_emit_at(buf, len, "%s ", wl->name);
50 	}
51 
52 	len += sysfs_emit_at(buf, len, "\n");
53 
54 	mutex_unlock(&wakelocks_lock);
55 	return len;
56 }
57 
58 #if CONFIG_PM_WAKELOCKS_LIMIT > 0
59 static unsigned int number_of_wakelocks;
60 
wakelocks_limit_exceeded(void)61 static inline bool wakelocks_limit_exceeded(void)
62 {
63 	return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
64 }
65 
increment_wakelocks_number(void)66 static inline void increment_wakelocks_number(void)
67 {
68 	number_of_wakelocks++;
69 }
70 
decrement_wakelocks_number(void)71 static inline void decrement_wakelocks_number(void)
72 {
73 	number_of_wakelocks--;
74 }
75 #else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
wakelocks_limit_exceeded(void)76 static inline bool wakelocks_limit_exceeded(void) { return false; }
increment_wakelocks_number(void)77 static inline void increment_wakelocks_number(void) {}
decrement_wakelocks_number(void)78 static inline void decrement_wakelocks_number(void) {}
79 #endif /* CONFIG_PM_WAKELOCKS_LIMIT */
80 
81 #ifdef CONFIG_PM_WAKELOCKS_GC
82 #define WL_GC_COUNT_MAX	100
83 #define WL_GC_TIME_SEC	300
84 
85 static void __wakelocks_gc(struct work_struct *work);
86 static LIST_HEAD(wakelocks_lru_list);
87 static DECLARE_WORK(wakelock_work, __wakelocks_gc);
88 static unsigned int wakelocks_gc_count;
89 
wakelocks_lru_add(struct wakelock * wl)90 static inline void wakelocks_lru_add(struct wakelock *wl)
91 {
92 	list_add(&wl->lru, &wakelocks_lru_list);
93 }
94 
wakelocks_lru_most_recent(struct wakelock * wl)95 static inline void wakelocks_lru_most_recent(struct wakelock *wl)
96 {
97 	list_move(&wl->lru, &wakelocks_lru_list);
98 }
99 
__wakelocks_gc(struct work_struct * work)100 static void __wakelocks_gc(struct work_struct *work)
101 {
102 	struct wakelock *wl, *aux;
103 	ktime_t now;
104 
105 	mutex_lock(&wakelocks_lock);
106 
107 	now = ktime_get();
108 	list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
109 		u64 idle_time_ns;
110 		bool active;
111 
112 		spin_lock_irq(&wl->ws->lock);
113 		idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time));
114 		active = wl->ws->active;
115 		spin_unlock_irq(&wl->ws->lock);
116 
117 		if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
118 			break;
119 
120 		if (!active) {
121 			wakeup_source_unregister(wl->ws);
122 			rb_erase(&wl->node, &wakelocks_tree);
123 			list_del(&wl->lru);
124 			kfree(wl->name);
125 			kfree(wl);
126 			decrement_wakelocks_number();
127 		}
128 	}
129 	wakelocks_gc_count = 0;
130 
131 	mutex_unlock(&wakelocks_lock);
132 }
133 
wakelocks_gc(void)134 static void wakelocks_gc(void)
135 {
136 	if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
137 		return;
138 
139 	schedule_work(&wakelock_work);
140 }
141 #else /* !CONFIG_PM_WAKELOCKS_GC */
wakelocks_lru_add(struct wakelock * wl)142 static inline void wakelocks_lru_add(struct wakelock *wl) {}
wakelocks_lru_most_recent(struct wakelock * wl)143 static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
wakelocks_gc(void)144 static inline void wakelocks_gc(void) {}
145 #endif /* !CONFIG_PM_WAKELOCKS_GC */
146 
wakelock_lookup_add(const char * name,size_t len,bool add_if_not_found)147 static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
148 					    bool add_if_not_found)
149 {
150 	struct rb_node **node = &wakelocks_tree.rb_node;
151 	struct rb_node *parent = *node;
152 	struct wakelock *wl;
153 
154 	while (*node) {
155 		int diff;
156 
157 		parent = *node;
158 		wl = rb_entry(*node, struct wakelock, node);
159 		diff = strncmp(name, wl->name, len);
160 		if (diff == 0) {
161 			if (wl->name[len])
162 				diff = -1;
163 			else
164 				return wl;
165 		}
166 		if (diff < 0)
167 			node = &(*node)->rb_left;
168 		else
169 			node = &(*node)->rb_right;
170 	}
171 	if (!add_if_not_found)
172 		return ERR_PTR(-EINVAL);
173 
174 	if (wakelocks_limit_exceeded())
175 		return ERR_PTR(-ENOSPC);
176 
177 	/* Not found, we have to add a new one. */
178 	wl = kzalloc(sizeof(*wl), GFP_KERNEL);
179 	if (!wl)
180 		return ERR_PTR(-ENOMEM);
181 
182 	wl->name = kstrndup(name, len, GFP_KERNEL);
183 	if (!wl->name) {
184 		kfree(wl);
185 		return ERR_PTR(-ENOMEM);
186 	}
187 
188 	wl->ws = wakeup_source_register(NULL, wl->name);
189 	if (!wl->ws) {
190 		kfree(wl->name);
191 		kfree(wl);
192 		return ERR_PTR(-ENOMEM);
193 	}
194 	wl->ws->last_time = ktime_get();
195 
196 	rb_link_node(&wl->node, parent, node);
197 	rb_insert_color(&wl->node, &wakelocks_tree);
198 	wakelocks_lru_add(wl);
199 	increment_wakelocks_number();
200 	return wl;
201 }
202 
pm_wake_lock(const char * buf)203 int pm_wake_lock(const char *buf)
204 {
205 	const char *str = buf;
206 	struct wakelock *wl;
207 	u64 timeout_ns = 0;
208 	size_t len;
209 	int ret = 0;
210 
211 	if (!capable(CAP_BLOCK_SUSPEND))
212 		return -EPERM;
213 
214 	while (*str && !isspace(*str))
215 		str++;
216 
217 	len = str - buf;
218 	if (!len)
219 		return -EINVAL;
220 
221 	if (*str && *str != '\n') {
222 		/* Find out if there's a valid timeout string appended. */
223 		ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
224 		if (ret)
225 			return -EINVAL;
226 	}
227 
228 	mutex_lock(&wakelocks_lock);
229 
230 	wl = wakelock_lookup_add(buf, len, true);
231 	if (IS_ERR(wl)) {
232 		ret = PTR_ERR(wl);
233 		goto out;
234 	}
235 	if (timeout_ns) {
236 		u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
237 
238 		do_div(timeout_ms, NSEC_PER_MSEC);
239 		__pm_wakeup_event(wl->ws, timeout_ms);
240 	} else {
241 		__pm_stay_awake(wl->ws);
242 	}
243 
244 	wakelocks_lru_most_recent(wl);
245 
246  out:
247 	mutex_unlock(&wakelocks_lock);
248 	return ret;
249 }
250 
pm_wake_unlock(const char * buf)251 int pm_wake_unlock(const char *buf)
252 {
253 	struct wakelock *wl;
254 	size_t len;
255 	int ret = 0;
256 
257 	if (!capable(CAP_BLOCK_SUSPEND))
258 		return -EPERM;
259 
260 	len = strlen(buf);
261 	if (!len)
262 		return -EINVAL;
263 
264 	if (buf[len-1] == '\n')
265 		len--;
266 
267 	if (!len)
268 		return -EINVAL;
269 
270 	mutex_lock(&wakelocks_lock);
271 
272 	wl = wakelock_lookup_add(buf, len, false);
273 	if (IS_ERR(wl)) {
274 		ret = PTR_ERR(wl);
275 		goto out;
276 	}
277 	__pm_relax(wl->ws);
278 
279 	wakelocks_lru_most_recent(wl);
280 	wakelocks_gc();
281 
282  out:
283 	mutex_unlock(&wakelocks_lock);
284 	return ret;
285 }
286