1 // SPDX-License-Identifier: GPL-2.0
2 /* sysfs entries for device PM */
3 #include <linux/device.h>
4 #include <linux/kobject.h>
5 #include <linux/string.h>
6 #include <linux/export.h>
7 #include <linux/pm_qos.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/atomic.h>
10 #include <linux/jiffies.h>
11 #include "power.h"
12
13 /*
14 * control - Report/change current runtime PM setting of the device
15 *
16 * Runtime power management of a device can be blocked with the help of
17 * this attribute. All devices have one of the following two values for
18 * the power/control file:
19 *
20 * + "auto\n" to allow the device to be power managed at run time;
21 * + "on\n" to prevent the device from being power managed at run time;
22 *
23 * The default for all devices is "auto", which means that devices may be
24 * subject to automatic power management, depending on their drivers.
25 * Changing this attribute to "on" prevents the driver from power managing
26 * the device at run time. Doing that while the device is suspended causes
27 * it to be woken up.
28 *
29 * wakeup - Report/change current wakeup option for device
30 *
31 * Some devices support "wakeup" events, which are hardware signals
32 * used to activate devices from suspended or low power states. Such
33 * devices have one of three values for the sysfs power/wakeup file:
34 *
35 * + "enabled\n" to issue the events;
36 * + "disabled\n" not to do so; or
37 * + "\n" for temporary or permanent inability to issue wakeup.
38 *
39 * (For example, unconfigured USB devices can't issue wakeups.)
40 *
41 * Familiar examples of devices that can issue wakeup events include
42 * keyboards and mice (both PS2 and USB styles), power buttons, modems,
43 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
44 * will wake the entire system from a suspend state; others may just
45 * wake up the device (if the system as a whole is already active).
46 * Some wakeup events use normal IRQ lines; other use special out
47 * of band signaling.
48 *
49 * It is the responsibility of device drivers to enable (or disable)
50 * wakeup signaling as part of changing device power states, respecting
51 * the policy choices provided through the driver model.
52 *
53 * Devices may not be able to generate wakeup events from all power
54 * states. Also, the events may be ignored in some configurations;
55 * for example, they might need help from other devices that aren't
56 * active, or which may have wakeup disabled. Some drivers rely on
57 * wakeup events internally (unless they are disabled), keeping
58 * their hardware in low power modes whenever they're unused. This
59 * saves runtime power, without requiring system-wide sleep states.
60 *
61 * async - Report/change current async suspend setting for the device
62 *
63 * Asynchronous suspend and resume of the device during system-wide power
64 * state transitions can be enabled by writing "enabled" to this file.
65 * Analogously, if "disabled" is written to this file, the device will be
66 * suspended and resumed synchronously.
67 *
68 * All devices have one of the following two values for power/async:
69 *
70 * + "enabled\n" to permit the asynchronous suspend/resume of the device;
71 * + "disabled\n" to forbid it;
72 *
73 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
74 * of a device unless it is certain that all of the PM dependencies of the
75 * device are known to the PM core. However, for some devices this
76 * attribute is set to "enabled" by bus type code or device drivers and in
77 * that cases it should be safe to leave the default value.
78 *
79 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
80 *
81 * Some drivers don't want to carry out a runtime suspend as soon as a
82 * device becomes idle; they want it always to remain idle for some period
83 * of time before suspending it. This period is the autosuspend_delay
84 * value (expressed in milliseconds) and it can be controlled by the user.
85 * If the value is negative then the device will never be runtime
86 * suspended.
87 *
88 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
89 * value are used only if the driver calls pm_runtime_use_autosuspend().
90 *
91 * wakeup_count - Report the number of wakeup events related to the device
92 */
93
94 const char power_group_name[] = "power";
95 EXPORT_SYMBOL_GPL(power_group_name);
96
97 static const char ctrl_auto[] = "auto";
98 static const char ctrl_on[] = "on";
99
control_show(struct device * dev,struct device_attribute * attr,char * buf)100 static ssize_t control_show(struct device *dev, struct device_attribute *attr,
101 char *buf)
102 {
103 return sysfs_emit(buf, "%s\n",
104 dev->power.runtime_auto ? ctrl_auto : ctrl_on);
105 }
106
control_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)107 static ssize_t control_store(struct device * dev, struct device_attribute *attr,
108 const char * buf, size_t n)
109 {
110 device_lock(dev);
111 if (sysfs_streq(buf, ctrl_auto))
112 pm_runtime_allow(dev);
113 else if (sysfs_streq(buf, ctrl_on))
114 pm_runtime_forbid(dev);
115 else
116 n = -EINVAL;
117 device_unlock(dev);
118 return n;
119 }
120
121 static DEVICE_ATTR_RW(control);
122
runtime_active_time_show(struct device * dev,struct device_attribute * attr,char * buf)123 static ssize_t runtime_active_time_show(struct device *dev,
124 struct device_attribute *attr,
125 char *buf)
126 {
127 u64 tmp = pm_runtime_active_time(dev);
128
129 do_div(tmp, NSEC_PER_MSEC);
130
131 return sysfs_emit(buf, "%llu\n", tmp);
132 }
133
134 static DEVICE_ATTR_RO(runtime_active_time);
135
runtime_suspended_time_show(struct device * dev,struct device_attribute * attr,char * buf)136 static ssize_t runtime_suspended_time_show(struct device *dev,
137 struct device_attribute *attr,
138 char *buf)
139 {
140 u64 tmp = pm_runtime_suspended_time(dev);
141
142 do_div(tmp, NSEC_PER_MSEC);
143
144 return sysfs_emit(buf, "%llu\n", tmp);
145 }
146
147 static DEVICE_ATTR_RO(runtime_suspended_time);
148
runtime_status_show(struct device * dev,struct device_attribute * attr,char * buf)149 static ssize_t runtime_status_show(struct device *dev,
150 struct device_attribute *attr, char *buf)
151 {
152 const char *output;
153
154 if (dev->power.runtime_error) {
155 output = "error";
156 } else if (dev->power.disable_depth) {
157 output = "unsupported";
158 } else {
159 switch (dev->power.runtime_status) {
160 case RPM_SUSPENDED:
161 output = "suspended";
162 break;
163 case RPM_SUSPENDING:
164 output = "suspending";
165 break;
166 case RPM_RESUMING:
167 output = "resuming";
168 break;
169 case RPM_ACTIVE:
170 output = "active";
171 break;
172 default:
173 return -EIO;
174 }
175 }
176 return sysfs_emit(buf, "%s\n", output);
177 }
178
179 static DEVICE_ATTR_RO(runtime_status);
180
autosuspend_delay_ms_show(struct device * dev,struct device_attribute * attr,char * buf)181 static ssize_t autosuspend_delay_ms_show(struct device *dev,
182 struct device_attribute *attr,
183 char *buf)
184 {
185 if (!dev->power.use_autosuspend)
186 return -EIO;
187
188 return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
189 }
190
autosuspend_delay_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)191 static ssize_t autosuspend_delay_ms_store(struct device *dev,
192 struct device_attribute *attr, const char *buf, size_t n)
193 {
194 long delay;
195
196 if (!dev->power.use_autosuspend)
197 return -EIO;
198
199 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
200 return -EINVAL;
201
202 device_lock(dev);
203 pm_runtime_set_autosuspend_delay(dev, delay);
204 device_unlock(dev);
205 return n;
206 }
207
208 static DEVICE_ATTR_RW(autosuspend_delay_ms);
209
pm_qos_resume_latency_us_show(struct device * dev,struct device_attribute * attr,char * buf)210 static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
211 struct device_attribute *attr,
212 char *buf)
213 {
214 s32 value = dev_pm_qos_requested_resume_latency(dev);
215
216 if (value == 0)
217 return sysfs_emit(buf, "n/a\n");
218 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
219 value = 0;
220
221 return sysfs_emit(buf, "%d\n", value);
222 }
223
pm_qos_resume_latency_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)224 static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t n)
227 {
228 s32 value;
229 int ret;
230
231 if (!kstrtos32(buf, 0, &value)) {
232 /*
233 * Prevent users from writing negative or "no constraint" values
234 * directly.
235 */
236 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
237 return -EINVAL;
238
239 if (value == 0)
240 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
241 } else if (sysfs_streq(buf, "n/a")) {
242 value = 0;
243 } else {
244 return -EINVAL;
245 }
246
247 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
248 value);
249 return ret < 0 ? ret : n;
250 }
251
252 static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
253
pm_qos_latency_tolerance_us_show(struct device * dev,struct device_attribute * attr,char * buf)254 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
255 struct device_attribute *attr,
256 char *buf)
257 {
258 s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
259
260 if (value < 0)
261 return sysfs_emit(buf, "%s\n", "auto");
262 if (value == PM_QOS_LATENCY_ANY)
263 return sysfs_emit(buf, "%s\n", "any");
264
265 return sysfs_emit(buf, "%d\n", value);
266 }
267
pm_qos_latency_tolerance_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)268 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
269 struct device_attribute *attr,
270 const char *buf, size_t n)
271 {
272 s32 value;
273 int ret;
274
275 if (kstrtos32(buf, 0, &value) == 0) {
276 /* Users can't write negative values directly */
277 if (value < 0)
278 return -EINVAL;
279 } else {
280 if (sysfs_streq(buf, "auto"))
281 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
282 else if (sysfs_streq(buf, "any"))
283 value = PM_QOS_LATENCY_ANY;
284 else
285 return -EINVAL;
286 }
287 ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
288 return ret < 0 ? ret : n;
289 }
290
291 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
292
pm_qos_no_power_off_show(struct device * dev,struct device_attribute * attr,char * buf)293 static ssize_t pm_qos_no_power_off_show(struct device *dev,
294 struct device_attribute *attr,
295 char *buf)
296 {
297 return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
298 & PM_QOS_FLAG_NO_POWER_OFF));
299 }
300
pm_qos_no_power_off_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)301 static ssize_t pm_qos_no_power_off_store(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t n)
304 {
305 int ret;
306
307 if (kstrtoint(buf, 0, &ret))
308 return -EINVAL;
309
310 if (ret != 0 && ret != 1)
311 return -EINVAL;
312
313 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
314 return ret < 0 ? ret : n;
315 }
316
317 static DEVICE_ATTR_RW(pm_qos_no_power_off);
318
319 #ifdef CONFIG_PM_SLEEP
320 static const char _enabled[] = "enabled";
321 static const char _disabled[] = "disabled";
322
wakeup_show(struct device * dev,struct device_attribute * attr,char * buf)323 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
324 char *buf)
325 {
326 return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
327 ? (device_may_wakeup(dev) ? _enabled : _disabled)
328 : "");
329 }
330
wakeup_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)331 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
332 const char *buf, size_t n)
333 {
334 if (!device_can_wakeup(dev))
335 return -EINVAL;
336
337 if (sysfs_streq(buf, _enabled))
338 device_set_wakeup_enable(dev, 1);
339 else if (sysfs_streq(buf, _disabled))
340 device_set_wakeup_enable(dev, 0);
341 else
342 return -EINVAL;
343 return n;
344 }
345
346 static DEVICE_ATTR_RW(wakeup);
347
wakeup_count_show(struct device * dev,struct device_attribute * attr,char * buf)348 static ssize_t wakeup_count_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
350 {
351 unsigned long count;
352 bool enabled = false;
353
354 spin_lock_irq(&dev->power.lock);
355 if (dev->power.wakeup) {
356 count = dev->power.wakeup->wakeup_count;
357 enabled = true;
358 }
359 spin_unlock_irq(&dev->power.lock);
360
361 if (!enabled)
362 return sysfs_emit(buf, "\n");
363 return sysfs_emit(buf, "%lu\n", count);
364 }
365
366 static DEVICE_ATTR_RO(wakeup_count);
367
wakeup_active_count_show(struct device * dev,struct device_attribute * attr,char * buf)368 static ssize_t wakeup_active_count_show(struct device *dev,
369 struct device_attribute *attr,
370 char *buf)
371 {
372 unsigned long count;
373 bool enabled = false;
374
375 spin_lock_irq(&dev->power.lock);
376 if (dev->power.wakeup) {
377 count = dev->power.wakeup->active_count;
378 enabled = true;
379 }
380 spin_unlock_irq(&dev->power.lock);
381
382 if (!enabled)
383 return sysfs_emit(buf, "\n");
384 return sysfs_emit(buf, "%lu\n", count);
385 }
386
387 static DEVICE_ATTR_RO(wakeup_active_count);
388
wakeup_abort_count_show(struct device * dev,struct device_attribute * attr,char * buf)389 static ssize_t wakeup_abort_count_show(struct device *dev,
390 struct device_attribute *attr,
391 char *buf)
392 {
393 unsigned long count;
394 bool enabled = false;
395
396 spin_lock_irq(&dev->power.lock);
397 if (dev->power.wakeup) {
398 count = dev->power.wakeup->wakeup_count;
399 enabled = true;
400 }
401 spin_unlock_irq(&dev->power.lock);
402
403 if (!enabled)
404 return sysfs_emit(buf, "\n");
405 return sysfs_emit(buf, "%lu\n", count);
406 }
407
408 static DEVICE_ATTR_RO(wakeup_abort_count);
409
wakeup_expire_count_show(struct device * dev,struct device_attribute * attr,char * buf)410 static ssize_t wakeup_expire_count_show(struct device *dev,
411 struct device_attribute *attr,
412 char *buf)
413 {
414 unsigned long count;
415 bool enabled = false;
416
417 spin_lock_irq(&dev->power.lock);
418 if (dev->power.wakeup) {
419 count = dev->power.wakeup->expire_count;
420 enabled = true;
421 }
422 spin_unlock_irq(&dev->power.lock);
423
424 if (!enabled)
425 return sysfs_emit(buf, "\n");
426 return sysfs_emit(buf, "%lu\n", count);
427 }
428
429 static DEVICE_ATTR_RO(wakeup_expire_count);
430
wakeup_active_show(struct device * dev,struct device_attribute * attr,char * buf)431 static ssize_t wakeup_active_show(struct device *dev,
432 struct device_attribute *attr, char *buf)
433 {
434 unsigned int active;
435 bool enabled = false;
436
437 spin_lock_irq(&dev->power.lock);
438 if (dev->power.wakeup) {
439 active = dev->power.wakeup->active;
440 enabled = true;
441 }
442 spin_unlock_irq(&dev->power.lock);
443
444 if (!enabled)
445 return sysfs_emit(buf, "\n");
446 return sysfs_emit(buf, "%u\n", active);
447 }
448
449 static DEVICE_ATTR_RO(wakeup_active);
450
wakeup_total_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)451 static ssize_t wakeup_total_time_ms_show(struct device *dev,
452 struct device_attribute *attr,
453 char *buf)
454 {
455 s64 msec;
456 bool enabled = false;
457
458 spin_lock_irq(&dev->power.lock);
459 if (dev->power.wakeup) {
460 msec = ktime_to_ms(dev->power.wakeup->total_time);
461 enabled = true;
462 }
463 spin_unlock_irq(&dev->power.lock);
464
465 if (!enabled)
466 return sysfs_emit(buf, "\n");
467 return sysfs_emit(buf, "%lld\n", msec);
468 }
469
470 static DEVICE_ATTR_RO(wakeup_total_time_ms);
471
wakeup_max_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)472 static ssize_t wakeup_max_time_ms_show(struct device *dev,
473 struct device_attribute *attr, char *buf)
474 {
475 s64 msec;
476 bool enabled = false;
477
478 spin_lock_irq(&dev->power.lock);
479 if (dev->power.wakeup) {
480 msec = ktime_to_ms(dev->power.wakeup->max_time);
481 enabled = true;
482 }
483 spin_unlock_irq(&dev->power.lock);
484
485 if (!enabled)
486 return sysfs_emit(buf, "\n");
487 return sysfs_emit(buf, "%lld\n", msec);
488 }
489
490 static DEVICE_ATTR_RO(wakeup_max_time_ms);
491
wakeup_last_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)492 static ssize_t wakeup_last_time_ms_show(struct device *dev,
493 struct device_attribute *attr,
494 char *buf)
495 {
496 s64 msec;
497 bool enabled = false;
498
499 spin_lock_irq(&dev->power.lock);
500 if (dev->power.wakeup) {
501 msec = ktime_to_ms(dev->power.wakeup->last_time);
502 enabled = true;
503 }
504 spin_unlock_irq(&dev->power.lock);
505
506 if (!enabled)
507 return sysfs_emit(buf, "\n");
508 return sysfs_emit(buf, "%lld\n", msec);
509 }
510
511 static DEVICE_ATTR_RO(wakeup_last_time_ms);
512
513 #ifdef CONFIG_PM_AUTOSLEEP
wakeup_prevent_sleep_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)514 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
515 struct device_attribute *attr,
516 char *buf)
517 {
518 s64 msec;
519 bool enabled = false;
520
521 spin_lock_irq(&dev->power.lock);
522 if (dev->power.wakeup) {
523 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
524 enabled = true;
525 }
526 spin_unlock_irq(&dev->power.lock);
527
528 if (!enabled)
529 return sysfs_emit(buf, "\n");
530 return sysfs_emit(buf, "%lld\n", msec);
531 }
532
533 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
534 #endif /* CONFIG_PM_AUTOSLEEP */
535
dpm_sysfs_wakeup_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)536 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
537 kgid_t kgid)
538 {
539 if (dev->power.wakeup && dev->power.wakeup->dev)
540 return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
541 return 0;
542 }
543
544 #else /* CONFIG_PM_SLEEP */
dpm_sysfs_wakeup_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)545 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
546 kgid_t kgid)
547 {
548 return 0;
549 }
550 #endif
551
552 #ifdef CONFIG_PM_ADVANCED_DEBUG
runtime_usage_show(struct device * dev,struct device_attribute * attr,char * buf)553 static ssize_t runtime_usage_show(struct device *dev,
554 struct device_attribute *attr, char *buf)
555 {
556 return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
557 }
558 static DEVICE_ATTR_RO(runtime_usage);
559
runtime_active_kids_show(struct device * dev,struct device_attribute * attr,char * buf)560 static ssize_t runtime_active_kids_show(struct device *dev,
561 struct device_attribute *attr,
562 char *buf)
563 {
564 return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
565 0 : atomic_read(&dev->power.child_count));
566 }
567 static DEVICE_ATTR_RO(runtime_active_kids);
568
runtime_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)569 static ssize_t runtime_enabled_show(struct device *dev,
570 struct device_attribute *attr, char *buf)
571 {
572 const char *output;
573
574 if (dev->power.disable_depth && !dev->power.runtime_auto)
575 output = "disabled & forbidden";
576 else if (dev->power.disable_depth)
577 output = "disabled";
578 else if (!dev->power.runtime_auto)
579 output = "forbidden";
580 else
581 output = "enabled";
582
583 return sysfs_emit(buf, "%s\n", output);
584 }
585 static DEVICE_ATTR_RO(runtime_enabled);
586
587 #ifdef CONFIG_PM_SLEEP
async_show(struct device * dev,struct device_attribute * attr,char * buf)588 static ssize_t async_show(struct device *dev, struct device_attribute *attr,
589 char *buf)
590 {
591 return sysfs_emit(buf, "%s\n",
592 device_async_suspend_enabled(dev) ?
593 _enabled : _disabled);
594 }
595
async_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)596 static ssize_t async_store(struct device *dev, struct device_attribute *attr,
597 const char *buf, size_t n)
598 {
599 if (sysfs_streq(buf, _enabled))
600 device_enable_async_suspend(dev);
601 else if (sysfs_streq(buf, _disabled))
602 device_disable_async_suspend(dev);
603 else
604 return -EINVAL;
605 return n;
606 }
607
608 static DEVICE_ATTR_RW(async);
609
610 #endif /* CONFIG_PM_SLEEP */
611 #endif /* CONFIG_PM_ADVANCED_DEBUG */
612
613 static struct attribute *power_attrs[] = {
614 #if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP)
615 &dev_attr_async.attr,
616 #endif
617 NULL,
618 };
619 static const struct attribute_group pm_attr_group = {
620 .name = power_group_name,
621 .attrs = power_attrs,
622 };
623
624 static struct attribute *wakeup_attrs[] = {
625 #ifdef CONFIG_PM_SLEEP
626 &dev_attr_wakeup.attr,
627 &dev_attr_wakeup_count.attr,
628 &dev_attr_wakeup_active_count.attr,
629 &dev_attr_wakeup_abort_count.attr,
630 &dev_attr_wakeup_expire_count.attr,
631 &dev_attr_wakeup_active.attr,
632 &dev_attr_wakeup_total_time_ms.attr,
633 &dev_attr_wakeup_max_time_ms.attr,
634 &dev_attr_wakeup_last_time_ms.attr,
635 #ifdef CONFIG_PM_AUTOSLEEP
636 &dev_attr_wakeup_prevent_sleep_time_ms.attr,
637 #endif
638 #endif
639 NULL,
640 };
641 static const struct attribute_group pm_wakeup_attr_group = {
642 .name = power_group_name,
643 .attrs = wakeup_attrs,
644 };
645
646 static struct attribute *runtime_attrs[] = {
647 &dev_attr_runtime_status.attr,
648 &dev_attr_control.attr,
649 &dev_attr_runtime_suspended_time.attr,
650 &dev_attr_runtime_active_time.attr,
651 &dev_attr_autosuspend_delay_ms.attr,
652 #ifdef CONFIG_PM_ADVANCED_DEBUG
653 &dev_attr_runtime_usage.attr,
654 &dev_attr_runtime_active_kids.attr,
655 &dev_attr_runtime_enabled.attr,
656 #endif
657 NULL,
658 };
659 static const struct attribute_group pm_runtime_attr_group = {
660 .name = power_group_name,
661 .attrs = runtime_attrs,
662 };
663
664 static struct attribute *pm_qos_resume_latency_attrs[] = {
665 &dev_attr_pm_qos_resume_latency_us.attr,
666 NULL,
667 };
668 static const struct attribute_group pm_qos_resume_latency_attr_group = {
669 .name = power_group_name,
670 .attrs = pm_qos_resume_latency_attrs,
671 };
672
673 static struct attribute *pm_qos_latency_tolerance_attrs[] = {
674 &dev_attr_pm_qos_latency_tolerance_us.attr,
675 NULL,
676 };
677 static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
678 .name = power_group_name,
679 .attrs = pm_qos_latency_tolerance_attrs,
680 };
681
682 static struct attribute *pm_qos_flags_attrs[] = {
683 &dev_attr_pm_qos_no_power_off.attr,
684 NULL,
685 };
686 static const struct attribute_group pm_qos_flags_attr_group = {
687 .name = power_group_name,
688 .attrs = pm_qos_flags_attrs,
689 };
690
dpm_sysfs_add(struct device * dev)691 int dpm_sysfs_add(struct device *dev)
692 {
693 int rc;
694
695 /* No need to create PM sysfs if explicitly disabled. */
696 if (device_pm_not_required(dev))
697 return 0;
698
699 rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
700 if (rc)
701 return rc;
702
703 if (!pm_runtime_has_no_callbacks(dev)) {
704 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
705 if (rc)
706 goto err_out;
707 }
708 if (device_can_wakeup(dev)) {
709 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
710 if (rc)
711 goto err_runtime;
712 }
713 if (dev->power.set_latency_tolerance) {
714 rc = sysfs_merge_group(&dev->kobj,
715 &pm_qos_latency_tolerance_attr_group);
716 if (rc)
717 goto err_wakeup;
718 }
719 rc = pm_wakeup_source_sysfs_add(dev);
720 if (rc)
721 goto err_latency;
722 return 0;
723
724 err_latency:
725 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
726 err_wakeup:
727 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
728 err_runtime:
729 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
730 err_out:
731 sysfs_remove_group(&dev->kobj, &pm_attr_group);
732 return rc;
733 }
734
dpm_sysfs_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)735 int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
736 {
737 int rc;
738
739 if (device_pm_not_required(dev))
740 return 0;
741
742 rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
743 if (rc)
744 return rc;
745
746 if (!pm_runtime_has_no_callbacks(dev)) {
747 rc = sysfs_group_change_owner(
748 &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
749 if (rc)
750 return rc;
751 }
752
753 if (device_can_wakeup(dev)) {
754 rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
755 kuid, kgid);
756 if (rc)
757 return rc;
758
759 rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
760 if (rc)
761 return rc;
762 }
763
764 if (dev->power.set_latency_tolerance) {
765 rc = sysfs_group_change_owner(
766 &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
767 kgid);
768 if (rc)
769 return rc;
770 }
771 return 0;
772 }
773
wakeup_sysfs_add(struct device * dev)774 int wakeup_sysfs_add(struct device *dev)
775 {
776 int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
777
778 if (!ret)
779 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
780
781 return ret;
782 }
783
wakeup_sysfs_remove(struct device * dev)784 void wakeup_sysfs_remove(struct device *dev)
785 {
786 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
787 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
788 }
789
pm_qos_sysfs_add_resume_latency(struct device * dev)790 int pm_qos_sysfs_add_resume_latency(struct device *dev)
791 {
792 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
793 }
794
pm_qos_sysfs_remove_resume_latency(struct device * dev)795 void pm_qos_sysfs_remove_resume_latency(struct device *dev)
796 {
797 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
798 }
799
pm_qos_sysfs_add_flags(struct device * dev)800 int pm_qos_sysfs_add_flags(struct device *dev)
801 {
802 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
803 }
804
pm_qos_sysfs_remove_flags(struct device * dev)805 void pm_qos_sysfs_remove_flags(struct device *dev)
806 {
807 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
808 }
809
pm_qos_sysfs_add_latency_tolerance(struct device * dev)810 int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
811 {
812 return sysfs_merge_group(&dev->kobj,
813 &pm_qos_latency_tolerance_attr_group);
814 }
815
pm_qos_sysfs_remove_latency_tolerance(struct device * dev)816 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
817 {
818 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
819 }
820
rpm_sysfs_remove(struct device * dev)821 void rpm_sysfs_remove(struct device *dev)
822 {
823 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
824 }
825
dpm_sysfs_remove(struct device * dev)826 void dpm_sysfs_remove(struct device *dev)
827 {
828 if (device_pm_not_required(dev))
829 return;
830 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
831 dev_pm_qos_constraints_destroy(dev);
832 rpm_sysfs_remove(dev);
833 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
834 sysfs_remove_group(&dev->kobj, &pm_attr_group);
835 }
836