1 /* 2 * kernel/power/wakelock.c 3 * 4 * User space wakeup sources support. 5 * 6 * Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl> 7 * 8 * This code is based on the analogous interface allowing user space to 9 * manipulate wakelocks on Android. 10 */ 11 12 #include <linux/ctype.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/hrtimer.h> 16 #include <linux/list.h> 17 #include <linux/rbtree.h> 18 #include <linux/slab.h> 19 20 static DEFINE_MUTEX(wakelocks_lock); 21 22 struct wakelock { 23 char *name; 24 struct rb_node node; 25 struct wakeup_source ws; 26 #ifdef CONFIG_PM_WAKELOCKS_GC 27 struct list_head lru; 28 #endif 29 }; 30 31 static struct rb_root wakelocks_tree = RB_ROOT; 32 33 ssize_t pm_show_wakelocks(char *buf, bool show_active) 34 { 35 struct rb_node *node; 36 struct wakelock *wl; 37 char *str = buf; 38 char *end = buf + PAGE_SIZE; 39 40 mutex_lock(&wakelocks_lock); 41 42 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { 43 wl = rb_entry(node, struct wakelock, node); 44 if (wl->ws.active == show_active) 45 str += scnprintf(str, end - str, "%s ", wl->name); 46 } 47 if (str > buf) 48 str--; 49 50 str += scnprintf(str, end - str, "\n"); 51 52 mutex_unlock(&wakelocks_lock); 53 return (str - buf); 54 } 55 56 #if CONFIG_PM_WAKELOCKS_LIMIT > 0 57 static unsigned int number_of_wakelocks; 58 59 static inline bool wakelocks_limit_exceeded(void) 60 { 61 return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT; 62 } 63 64 static inline void increment_wakelocks_number(void) 65 { 66 number_of_wakelocks++; 67 } 68 69 static inline void decrement_wakelocks_number(void) 70 { 71 number_of_wakelocks--; 72 } 73 #else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */ 74 static inline bool wakelocks_limit_exceeded(void) { return false; } 75 static inline void increment_wakelocks_number(void) {} 76 static inline void decrement_wakelocks_number(void) {} 77 #endif /* CONFIG_PM_WAKELOCKS_LIMIT */ 78 79 #ifdef CONFIG_PM_WAKELOCKS_GC 80 #define WL_GC_COUNT_MAX 100 81 #define WL_GC_TIME_SEC 300 82 83 static LIST_HEAD(wakelocks_lru_list); 84 static unsigned int wakelocks_gc_count; 85 86 static inline void wakelocks_lru_add(struct wakelock *wl) 87 { 88 list_add(&wl->lru, &wakelocks_lru_list); 89 } 90 91 static inline void wakelocks_lru_most_recent(struct wakelock *wl) 92 { 93 list_move(&wl->lru, &wakelocks_lru_list); 94 } 95 96 static void wakelocks_gc(void) 97 { 98 struct wakelock *wl, *aux; 99 ktime_t now; 100 101 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX) 102 return; 103 104 now = ktime_get(); 105 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { 106 u64 idle_time_ns; 107 bool active; 108 109 spin_lock_irq(&wl->ws.lock); 110 idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time)); 111 active = wl->ws.active; 112 spin_unlock_irq(&wl->ws.lock); 113 114 if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC)) 115 break; 116 117 if (!active) { 118 wakeup_source_remove(&wl->ws); 119 rb_erase(&wl->node, &wakelocks_tree); 120 list_del(&wl->lru); 121 kfree(wl->name); 122 kfree(wl); 123 decrement_wakelocks_number(); 124 } 125 } 126 wakelocks_gc_count = 0; 127 } 128 #else /* !CONFIG_PM_WAKELOCKS_GC */ 129 static inline void wakelocks_lru_add(struct wakelock *wl) {} 130 static inline void wakelocks_lru_most_recent(struct wakelock *wl) {} 131 static inline void wakelocks_gc(void) {} 132 #endif /* !CONFIG_PM_WAKELOCKS_GC */ 133 134 static struct wakelock *wakelock_lookup_add(const char *name, size_t len, 135 bool add_if_not_found) 136 { 137 struct rb_node **node = &wakelocks_tree.rb_node; 138 struct rb_node *parent = *node; 139 struct wakelock *wl; 140 141 while (*node) { 142 int diff; 143 144 parent = *node; 145 wl = rb_entry(*node, struct wakelock, node); 146 diff = strncmp(name, wl->name, len); 147 if (diff == 0) { 148 if (wl->name[len]) 149 diff = -1; 150 else 151 return wl; 152 } 153 if (diff < 0) 154 node = &(*node)->rb_left; 155 else 156 node = &(*node)->rb_right; 157 } 158 if (!add_if_not_found) 159 return ERR_PTR(-EINVAL); 160 161 if (wakelocks_limit_exceeded()) 162 return ERR_PTR(-ENOSPC); 163 164 /* Not found, we have to add a new one. */ 165 wl = kzalloc(sizeof(*wl), GFP_KERNEL); 166 if (!wl) 167 return ERR_PTR(-ENOMEM); 168 169 wl->name = kstrndup(name, len, GFP_KERNEL); 170 if (!wl->name) { 171 kfree(wl); 172 return ERR_PTR(-ENOMEM); 173 } 174 wl->ws.name = wl->name; 175 wakeup_source_add(&wl->ws); 176 rb_link_node(&wl->node, parent, node); 177 rb_insert_color(&wl->node, &wakelocks_tree); 178 wakelocks_lru_add(wl); 179 increment_wakelocks_number(); 180 return wl; 181 } 182 183 int pm_wake_lock(const char *buf) 184 { 185 const char *str = buf; 186 struct wakelock *wl; 187 u64 timeout_ns = 0; 188 size_t len; 189 int ret = 0; 190 191 while (*str && !isspace(*str)) 192 str++; 193 194 len = str - buf; 195 if (!len) 196 return -EINVAL; 197 198 if (*str && *str != '\n') { 199 /* Find out if there's a valid timeout string appended. */ 200 ret = kstrtou64(skip_spaces(str), 10, &timeout_ns); 201 if (ret) 202 return -EINVAL; 203 } 204 205 mutex_lock(&wakelocks_lock); 206 207 wl = wakelock_lookup_add(buf, len, true); 208 if (IS_ERR(wl)) { 209 ret = PTR_ERR(wl); 210 goto out; 211 } 212 if (timeout_ns) { 213 u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1; 214 215 do_div(timeout_ms, NSEC_PER_MSEC); 216 __pm_wakeup_event(&wl->ws, timeout_ms); 217 } else { 218 __pm_stay_awake(&wl->ws); 219 } 220 221 wakelocks_lru_most_recent(wl); 222 223 out: 224 mutex_unlock(&wakelocks_lock); 225 return ret; 226 } 227 228 int pm_wake_unlock(const char *buf) 229 { 230 struct wakelock *wl; 231 size_t len; 232 int ret = 0; 233 234 len = strlen(buf); 235 if (!len) 236 return -EINVAL; 237 238 if (buf[len-1] == '\n') 239 len--; 240 241 if (!len) 242 return -EINVAL; 243 244 mutex_lock(&wakelocks_lock); 245 246 wl = wakelock_lookup_add(buf, len, false); 247 if (IS_ERR(wl)) { 248 ret = PTR_ERR(wl); 249 goto out; 250 } 251 __pm_relax(&wl->ws); 252 253 wakelocks_lru_most_recent(wl); 254 wakelocks_gc(); 255 256 out: 257 mutex_unlock(&wakelocks_lock); 258 return ret; 259 } 260