1 // SPDX-License-Identifier: GPL-2.0 2 /* sysfs entries for device PM */ 3 #include <linux/device.h> 4 #include <linux/kobject.h> 5 #include <linux/string.h> 6 #include <linux/export.h> 7 #include <linux/pm_qos.h> 8 #include <linux/pm_runtime.h> 9 #include <linux/pm_wakeup.h> 10 #include <linux/atomic.h> 11 #include <linux/jiffies.h> 12 #include "power.h" 13 14 /* 15 * control - Report/change current runtime PM setting of the device 16 * 17 * Runtime power management of a device can be blocked with the help of 18 * this attribute. All devices have one of the following two values for 19 * the power/control file: 20 * 21 * + "auto\n" to allow the device to be power managed at run time; 22 * + "on\n" to prevent the device from being power managed at run time; 23 * 24 * The default for all devices is "auto", which means that devices may be 25 * subject to automatic power management, depending on their drivers. 26 * Changing this attribute to "on" prevents the driver from power managing 27 * the device at run time. Doing that while the device is suspended causes 28 * it to be woken up. 29 * 30 * wakeup - Report/change current wakeup option for device 31 * 32 * Some devices support "wakeup" events, which are hardware signals 33 * used to activate devices from suspended or low power states. Such 34 * devices have one of three values for the sysfs power/wakeup file: 35 * 36 * + "enabled\n" to issue the events; 37 * + "disabled\n" not to do so; or 38 * + "\n" for temporary or permanent inability to issue wakeup. 39 * 40 * (For example, unconfigured USB devices can't issue wakeups.) 41 * 42 * Familiar examples of devices that can issue wakeup events include 43 * keyboards and mice (both PS2 and USB styles), power buttons, modems, 44 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events 45 * will wake the entire system from a suspend state; others may just 46 * wake up the device (if the system as a whole is already active). 47 * Some wakeup events use normal IRQ lines; other use special out 48 * of band signaling. 49 * 50 * It is the responsibility of device drivers to enable (or disable) 51 * wakeup signaling as part of changing device power states, respecting 52 * the policy choices provided through the driver model. 53 * 54 * Devices may not be able to generate wakeup events from all power 55 * states. Also, the events may be ignored in some configurations; 56 * for example, they might need help from other devices that aren't 57 * active, or which may have wakeup disabled. Some drivers rely on 58 * wakeup events internally (unless they are disabled), keeping 59 * their hardware in low power modes whenever they're unused. This 60 * saves runtime power, without requiring system-wide sleep states. 61 * 62 * async - Report/change current async suspend setting for the device 63 * 64 * Asynchronous suspend and resume of the device during system-wide power 65 * state transitions can be enabled by writing "enabled" to this file. 66 * Analogously, if "disabled" is written to this file, the device will be 67 * suspended and resumed synchronously. 68 * 69 * All devices have one of the following two values for power/async: 70 * 71 * + "enabled\n" to permit the asynchronous suspend/resume of the device; 72 * + "disabled\n" to forbid it; 73 * 74 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume 75 * of a device unless it is certain that all of the PM dependencies of the 76 * device are known to the PM core. However, for some devices this 77 * attribute is set to "enabled" by bus type code or device drivers and in 78 * that cases it should be safe to leave the default value. 79 * 80 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value 81 * 82 * Some drivers don't want to carry out a runtime suspend as soon as a 83 * device becomes idle; they want it always to remain idle for some period 84 * of time before suspending it. This period is the autosuspend_delay 85 * value (expressed in milliseconds) and it can be controlled by the user. 86 * If the value is negative then the device will never be runtime 87 * suspended. 88 * 89 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay 90 * value are used only if the driver calls pm_runtime_use_autosuspend(). 91 * 92 * wakeup_count - Report the number of wakeup events related to the device 93 */ 94 95 const char power_group_name[] = "power"; 96 EXPORT_SYMBOL_GPL(power_group_name); 97 98 static const char ctrl_auto[] = "auto"; 99 static const char ctrl_on[] = "on"; 100 101 static ssize_t control_show(struct device *dev, struct device_attribute *attr, 102 char *buf) 103 { 104 return sysfs_emit(buf, "%s\n", 105 dev->power.runtime_auto ? ctrl_auto : ctrl_on); 106 } 107 108 static ssize_t control_store(struct device * dev, struct device_attribute *attr, 109 const char * buf, size_t n) 110 { 111 device_lock(dev); 112 if (sysfs_streq(buf, ctrl_auto)) 113 pm_runtime_allow(dev); 114 else if (sysfs_streq(buf, ctrl_on)) 115 pm_runtime_forbid(dev); 116 else 117 n = -EINVAL; 118 device_unlock(dev); 119 return n; 120 } 121 122 static DEVICE_ATTR_RW(control); 123 124 static ssize_t runtime_active_time_show(struct device *dev, 125 struct device_attribute *attr, char *buf) 126 { 127 int ret; 128 u64 tmp = pm_runtime_active_time(dev); 129 do_div(tmp, NSEC_PER_MSEC); 130 ret = sysfs_emit(buf, "%llu\n", tmp); 131 return ret; 132 } 133 134 static DEVICE_ATTR_RO(runtime_active_time); 135 136 static ssize_t runtime_suspended_time_show(struct device *dev, 137 struct device_attribute *attr, char *buf) 138 { 139 int ret; 140 u64 tmp = pm_runtime_suspended_time(dev); 141 do_div(tmp, NSEC_PER_MSEC); 142 ret = sysfs_emit(buf, "%llu\n", tmp); 143 return ret; 144 } 145 146 static DEVICE_ATTR_RO(runtime_suspended_time); 147 148 static ssize_t runtime_status_show(struct device *dev, 149 struct device_attribute *attr, char *buf) 150 { 151 const char *p; 152 153 if (dev->power.runtime_error) { 154 p = "error\n"; 155 } else if (dev->power.disable_depth) { 156 p = "unsupported\n"; 157 } else { 158 switch (dev->power.runtime_status) { 159 case RPM_SUSPENDED: 160 p = "suspended\n"; 161 break; 162 case RPM_SUSPENDING: 163 p = "suspending\n"; 164 break; 165 case RPM_RESUMING: 166 p = "resuming\n"; 167 break; 168 case RPM_ACTIVE: 169 p = "active\n"; 170 break; 171 default: 172 return -EIO; 173 } 174 } 175 return sysfs_emit(buf, p); 176 } 177 178 static DEVICE_ATTR_RO(runtime_status); 179 180 static ssize_t autosuspend_delay_ms_show(struct device *dev, 181 struct device_attribute *attr, char *buf) 182 { 183 if (!dev->power.use_autosuspend) 184 return -EIO; 185 return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay); 186 } 187 188 static ssize_t autosuspend_delay_ms_store(struct device *dev, 189 struct device_attribute *attr, const char *buf, size_t n) 190 { 191 long delay; 192 193 if (!dev->power.use_autosuspend) 194 return -EIO; 195 196 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) 197 return -EINVAL; 198 199 device_lock(dev); 200 pm_runtime_set_autosuspend_delay(dev, delay); 201 device_unlock(dev); 202 return n; 203 } 204 205 static DEVICE_ATTR_RW(autosuspend_delay_ms); 206 207 static ssize_t pm_qos_resume_latency_us_show(struct device *dev, 208 struct device_attribute *attr, 209 char *buf) 210 { 211 s32 value = dev_pm_qos_requested_resume_latency(dev); 212 213 if (value == 0) 214 return sysfs_emit(buf, "n/a\n"); 215 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 216 value = 0; 217 218 return sysfs_emit(buf, "%d\n", value); 219 } 220 221 static ssize_t pm_qos_resume_latency_us_store(struct device *dev, 222 struct device_attribute *attr, 223 const char *buf, size_t n) 224 { 225 s32 value; 226 int ret; 227 228 if (!kstrtos32(buf, 0, &value)) { 229 /* 230 * Prevent users from writing negative or "no constraint" values 231 * directly. 232 */ 233 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 234 return -EINVAL; 235 236 if (value == 0) 237 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; 238 } else if (sysfs_streq(buf, "n/a")) { 239 value = 0; 240 } else { 241 return -EINVAL; 242 } 243 244 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, 245 value); 246 return ret < 0 ? ret : n; 247 } 248 249 static DEVICE_ATTR_RW(pm_qos_resume_latency_us); 250 251 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, 252 struct device_attribute *attr, 253 char *buf) 254 { 255 s32 value = dev_pm_qos_get_user_latency_tolerance(dev); 256 257 if (value < 0) 258 return sysfs_emit(buf, "%s\n", "auto"); 259 if (value == PM_QOS_LATENCY_ANY) 260 return sysfs_emit(buf, "%s\n", "any"); 261 262 return sysfs_emit(buf, "%d\n", value); 263 } 264 265 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, 266 struct device_attribute *attr, 267 const char *buf, size_t n) 268 { 269 s32 value; 270 int ret; 271 272 if (kstrtos32(buf, 0, &value) == 0) { 273 /* Users can't write negative values directly */ 274 if (value < 0) 275 return -EINVAL; 276 } else { 277 if (sysfs_streq(buf, "auto")) 278 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; 279 else if (sysfs_streq(buf, "any")) 280 value = PM_QOS_LATENCY_ANY; 281 else 282 return -EINVAL; 283 } 284 ret = dev_pm_qos_update_user_latency_tolerance(dev, value); 285 return ret < 0 ? ret : n; 286 } 287 288 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); 289 290 static ssize_t pm_qos_no_power_off_show(struct device *dev, 291 struct device_attribute *attr, 292 char *buf) 293 { 294 return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) 295 & PM_QOS_FLAG_NO_POWER_OFF)); 296 } 297 298 static ssize_t pm_qos_no_power_off_store(struct device *dev, 299 struct device_attribute *attr, 300 const char *buf, size_t n) 301 { 302 int ret; 303 304 if (kstrtoint(buf, 0, &ret)) 305 return -EINVAL; 306 307 if (ret != 0 && ret != 1) 308 return -EINVAL; 309 310 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); 311 return ret < 0 ? ret : n; 312 } 313 314 static DEVICE_ATTR_RW(pm_qos_no_power_off); 315 316 #ifdef CONFIG_PM_SLEEP 317 static const char _enabled[] = "enabled"; 318 static const char _disabled[] = "disabled"; 319 320 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, 321 char *buf) 322 { 323 return sysfs_emit(buf, "%s\n", device_can_wakeup(dev) 324 ? (device_may_wakeup(dev) ? _enabled : _disabled) 325 : ""); 326 } 327 328 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, 329 const char *buf, size_t n) 330 { 331 if (!device_can_wakeup(dev)) 332 return -EINVAL; 333 334 if (sysfs_streq(buf, _enabled)) 335 device_set_wakeup_enable(dev, 1); 336 else if (sysfs_streq(buf, _disabled)) 337 device_set_wakeup_enable(dev, 0); 338 else 339 return -EINVAL; 340 return n; 341 } 342 343 static DEVICE_ATTR_RW(wakeup); 344 345 static ssize_t wakeup_count_show(struct device *dev, 346 struct device_attribute *attr, char *buf) 347 { 348 unsigned long count = 0; 349 bool enabled = false; 350 351 spin_lock_irq(&dev->power.lock); 352 if (dev->power.wakeup) { 353 count = dev->power.wakeup->wakeup_count; 354 enabled = true; 355 } 356 spin_unlock_irq(&dev->power.lock); 357 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 358 } 359 360 static DEVICE_ATTR_RO(wakeup_count); 361 362 static ssize_t wakeup_active_count_show(struct device *dev, 363 struct device_attribute *attr, 364 char *buf) 365 { 366 unsigned long count = 0; 367 bool enabled = false; 368 369 spin_lock_irq(&dev->power.lock); 370 if (dev->power.wakeup) { 371 count = dev->power.wakeup->active_count; 372 enabled = true; 373 } 374 spin_unlock_irq(&dev->power.lock); 375 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 376 } 377 378 static DEVICE_ATTR_RO(wakeup_active_count); 379 380 static ssize_t wakeup_abort_count_show(struct device *dev, 381 struct device_attribute *attr, 382 char *buf) 383 { 384 unsigned long count = 0; 385 bool enabled = false; 386 387 spin_lock_irq(&dev->power.lock); 388 if (dev->power.wakeup) { 389 count = dev->power.wakeup->wakeup_count; 390 enabled = true; 391 } 392 spin_unlock_irq(&dev->power.lock); 393 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 394 } 395 396 static DEVICE_ATTR_RO(wakeup_abort_count); 397 398 static ssize_t wakeup_expire_count_show(struct device *dev, 399 struct device_attribute *attr, 400 char *buf) 401 { 402 unsigned long count = 0; 403 bool enabled = false; 404 405 spin_lock_irq(&dev->power.lock); 406 if (dev->power.wakeup) { 407 count = dev->power.wakeup->expire_count; 408 enabled = true; 409 } 410 spin_unlock_irq(&dev->power.lock); 411 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 412 } 413 414 static DEVICE_ATTR_RO(wakeup_expire_count); 415 416 static ssize_t wakeup_active_show(struct device *dev, 417 struct device_attribute *attr, char *buf) 418 { 419 unsigned int active = 0; 420 bool enabled = false; 421 422 spin_lock_irq(&dev->power.lock); 423 if (dev->power.wakeup) { 424 active = dev->power.wakeup->active; 425 enabled = true; 426 } 427 spin_unlock_irq(&dev->power.lock); 428 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); 429 } 430 431 static DEVICE_ATTR_RO(wakeup_active); 432 433 static ssize_t wakeup_total_time_ms_show(struct device *dev, 434 struct device_attribute *attr, 435 char *buf) 436 { 437 s64 msec = 0; 438 bool enabled = false; 439 440 spin_lock_irq(&dev->power.lock); 441 if (dev->power.wakeup) { 442 msec = ktime_to_ms(dev->power.wakeup->total_time); 443 enabled = true; 444 } 445 spin_unlock_irq(&dev->power.lock); 446 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 447 } 448 449 static DEVICE_ATTR_RO(wakeup_total_time_ms); 450 451 static ssize_t wakeup_max_time_ms_show(struct device *dev, 452 struct device_attribute *attr, char *buf) 453 { 454 s64 msec = 0; 455 bool enabled = false; 456 457 spin_lock_irq(&dev->power.lock); 458 if (dev->power.wakeup) { 459 msec = ktime_to_ms(dev->power.wakeup->max_time); 460 enabled = true; 461 } 462 spin_unlock_irq(&dev->power.lock); 463 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 464 } 465 466 static DEVICE_ATTR_RO(wakeup_max_time_ms); 467 468 static ssize_t wakeup_last_time_ms_show(struct device *dev, 469 struct device_attribute *attr, 470 char *buf) 471 { 472 s64 msec = 0; 473 bool enabled = false; 474 475 spin_lock_irq(&dev->power.lock); 476 if (dev->power.wakeup) { 477 msec = ktime_to_ms(dev->power.wakeup->last_time); 478 enabled = true; 479 } 480 spin_unlock_irq(&dev->power.lock); 481 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 482 } 483 484 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, 485 kgid_t kgid) 486 { 487 if (dev->power.wakeup && dev->power.wakeup->dev) 488 return device_change_owner(dev->power.wakeup->dev, kuid, kgid); 489 return 0; 490 } 491 492 static DEVICE_ATTR_RO(wakeup_last_time_ms); 493 494 #ifdef CONFIG_PM_AUTOSLEEP 495 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, 496 struct device_attribute *attr, 497 char *buf) 498 { 499 s64 msec = 0; 500 bool enabled = false; 501 502 spin_lock_irq(&dev->power.lock); 503 if (dev->power.wakeup) { 504 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); 505 enabled = true; 506 } 507 spin_unlock_irq(&dev->power.lock); 508 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 509 } 510 511 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); 512 #endif /* CONFIG_PM_AUTOSLEEP */ 513 #else /* CONFIG_PM_SLEEP */ 514 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, 515 kgid_t kgid) 516 { 517 return 0; 518 } 519 #endif 520 521 #ifdef CONFIG_PM_ADVANCED_DEBUG 522 static ssize_t runtime_usage_show(struct device *dev, 523 struct device_attribute *attr, char *buf) 524 { 525 return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count)); 526 } 527 static DEVICE_ATTR_RO(runtime_usage); 528 529 static ssize_t runtime_active_kids_show(struct device *dev, 530 struct device_attribute *attr, 531 char *buf) 532 { 533 return sysfs_emit(buf, "%d\n", dev->power.ignore_children ? 534 0 : atomic_read(&dev->power.child_count)); 535 } 536 static DEVICE_ATTR_RO(runtime_active_kids); 537 538 static ssize_t runtime_enabled_show(struct device *dev, 539 struct device_attribute *attr, char *buf) 540 { 541 const char *output; 542 543 if (dev->power.disable_depth && !dev->power.runtime_auto) 544 output = "disabled & forbidden"; 545 else if (dev->power.disable_depth) 546 output = "disabled"; 547 else if (!dev->power.runtime_auto) 548 output = "forbidden"; 549 else 550 output = "enabled"; 551 552 return sysfs_emit(buf, "%s\n", output); 553 } 554 static DEVICE_ATTR_RO(runtime_enabled); 555 556 #ifdef CONFIG_PM_SLEEP 557 static ssize_t async_show(struct device *dev, struct device_attribute *attr, 558 char *buf) 559 { 560 return sysfs_emit(buf, "%s\n", 561 device_async_suspend_enabled(dev) ? 562 _enabled : _disabled); 563 } 564 565 static ssize_t async_store(struct device *dev, struct device_attribute *attr, 566 const char *buf, size_t n) 567 { 568 if (sysfs_streq(buf, _enabled)) 569 device_enable_async_suspend(dev); 570 else if (sysfs_streq(buf, _disabled)) 571 device_disable_async_suspend(dev); 572 else 573 return -EINVAL; 574 return n; 575 } 576 577 static DEVICE_ATTR_RW(async); 578 579 #endif /* CONFIG_PM_SLEEP */ 580 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 581 582 static struct attribute *power_attrs[] = { 583 #ifdef CONFIG_PM_ADVANCED_DEBUG 584 #ifdef CONFIG_PM_SLEEP 585 &dev_attr_async.attr, 586 #endif 587 &dev_attr_runtime_status.attr, 588 &dev_attr_runtime_usage.attr, 589 &dev_attr_runtime_active_kids.attr, 590 &dev_attr_runtime_enabled.attr, 591 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 592 NULL, 593 }; 594 static const struct attribute_group pm_attr_group = { 595 .name = power_group_name, 596 .attrs = power_attrs, 597 }; 598 599 static struct attribute *wakeup_attrs[] = { 600 #ifdef CONFIG_PM_SLEEP 601 &dev_attr_wakeup.attr, 602 &dev_attr_wakeup_count.attr, 603 &dev_attr_wakeup_active_count.attr, 604 &dev_attr_wakeup_abort_count.attr, 605 &dev_attr_wakeup_expire_count.attr, 606 &dev_attr_wakeup_active.attr, 607 &dev_attr_wakeup_total_time_ms.attr, 608 &dev_attr_wakeup_max_time_ms.attr, 609 &dev_attr_wakeup_last_time_ms.attr, 610 #ifdef CONFIG_PM_AUTOSLEEP 611 &dev_attr_wakeup_prevent_sleep_time_ms.attr, 612 #endif 613 #endif 614 NULL, 615 }; 616 static const struct attribute_group pm_wakeup_attr_group = { 617 .name = power_group_name, 618 .attrs = wakeup_attrs, 619 }; 620 621 static struct attribute *runtime_attrs[] = { 622 #ifndef CONFIG_PM_ADVANCED_DEBUG 623 &dev_attr_runtime_status.attr, 624 #endif 625 &dev_attr_control.attr, 626 &dev_attr_runtime_suspended_time.attr, 627 &dev_attr_runtime_active_time.attr, 628 &dev_attr_autosuspend_delay_ms.attr, 629 NULL, 630 }; 631 static const struct attribute_group pm_runtime_attr_group = { 632 .name = power_group_name, 633 .attrs = runtime_attrs, 634 }; 635 636 static struct attribute *pm_qos_resume_latency_attrs[] = { 637 &dev_attr_pm_qos_resume_latency_us.attr, 638 NULL, 639 }; 640 static const struct attribute_group pm_qos_resume_latency_attr_group = { 641 .name = power_group_name, 642 .attrs = pm_qos_resume_latency_attrs, 643 }; 644 645 static struct attribute *pm_qos_latency_tolerance_attrs[] = { 646 &dev_attr_pm_qos_latency_tolerance_us.attr, 647 NULL, 648 }; 649 static const struct attribute_group pm_qos_latency_tolerance_attr_group = { 650 .name = power_group_name, 651 .attrs = pm_qos_latency_tolerance_attrs, 652 }; 653 654 static struct attribute *pm_qos_flags_attrs[] = { 655 &dev_attr_pm_qos_no_power_off.attr, 656 NULL, 657 }; 658 static const struct attribute_group pm_qos_flags_attr_group = { 659 .name = power_group_name, 660 .attrs = pm_qos_flags_attrs, 661 }; 662 663 int dpm_sysfs_add(struct device *dev) 664 { 665 int rc; 666 667 /* No need to create PM sysfs if explicitly disabled. */ 668 if (device_pm_not_required(dev)) 669 return 0; 670 671 rc = sysfs_create_group(&dev->kobj, &pm_attr_group); 672 if (rc) 673 return rc; 674 675 if (!pm_runtime_has_no_callbacks(dev)) { 676 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); 677 if (rc) 678 goto err_out; 679 } 680 if (device_can_wakeup(dev)) { 681 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 682 if (rc) 683 goto err_runtime; 684 } 685 if (dev->power.set_latency_tolerance) { 686 rc = sysfs_merge_group(&dev->kobj, 687 &pm_qos_latency_tolerance_attr_group); 688 if (rc) 689 goto err_wakeup; 690 } 691 rc = pm_wakeup_source_sysfs_add(dev); 692 if (rc) 693 goto err_latency; 694 return 0; 695 696 err_latency: 697 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 698 err_wakeup: 699 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 700 err_runtime: 701 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 702 err_out: 703 sysfs_remove_group(&dev->kobj, &pm_attr_group); 704 return rc; 705 } 706 707 int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) 708 { 709 int rc; 710 711 if (device_pm_not_required(dev)) 712 return 0; 713 714 rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid); 715 if (rc) 716 return rc; 717 718 if (!pm_runtime_has_no_callbacks(dev)) { 719 rc = sysfs_group_change_owner( 720 &dev->kobj, &pm_runtime_attr_group, kuid, kgid); 721 if (rc) 722 return rc; 723 } 724 725 if (device_can_wakeup(dev)) { 726 rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group, 727 kuid, kgid); 728 if (rc) 729 return rc; 730 731 rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid); 732 if (rc) 733 return rc; 734 } 735 736 if (dev->power.set_latency_tolerance) { 737 rc = sysfs_group_change_owner( 738 &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid, 739 kgid); 740 if (rc) 741 return rc; 742 } 743 return 0; 744 } 745 746 int wakeup_sysfs_add(struct device *dev) 747 { 748 int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 749 750 if (!ret) 751 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 752 753 return ret; 754 } 755 756 void wakeup_sysfs_remove(struct device *dev) 757 { 758 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 759 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 760 } 761 762 int pm_qos_sysfs_add_resume_latency(struct device *dev) 763 { 764 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 765 } 766 767 void pm_qos_sysfs_remove_resume_latency(struct device *dev) 768 { 769 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 770 } 771 772 int pm_qos_sysfs_add_flags(struct device *dev) 773 { 774 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); 775 } 776 777 void pm_qos_sysfs_remove_flags(struct device *dev) 778 { 779 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); 780 } 781 782 int pm_qos_sysfs_add_latency_tolerance(struct device *dev) 783 { 784 return sysfs_merge_group(&dev->kobj, 785 &pm_qos_latency_tolerance_attr_group); 786 } 787 788 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) 789 { 790 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 791 } 792 793 void rpm_sysfs_remove(struct device *dev) 794 { 795 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 796 } 797 798 void dpm_sysfs_remove(struct device *dev) 799 { 800 if (device_pm_not_required(dev)) 801 return; 802 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 803 dev_pm_qos_constraints_destroy(dev); 804 rpm_sysfs_remove(dev); 805 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 806 sysfs_remove_group(&dev->kobj, &pm_attr_group); 807 } 808