1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_opp.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_qos.h> 17 #include <linux/pm_clock.h> 18 #include <linux/slab.h> 19 #include <linux/err.h> 20 #include <linux/sched.h> 21 #include <linux/suspend.h> 22 #include <linux/export.h> 23 #include <linux/cpu.h> 24 #include <linux/debugfs.h> 25 26 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 27 28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 29 ({ \ 30 type (*__routine)(struct device *__d); \ 31 type __ret = (type)0; \ 32 \ 33 __routine = genpd->dev_ops.callback; \ 34 if (__routine) { \ 35 __ret = __routine(dev); \ 36 } \ 37 __ret; \ 38 }) 39 40 static LIST_HEAD(gpd_list); 41 static DEFINE_MUTEX(gpd_list_lock); 42 43 struct genpd_lock_ops { 44 void (*lock)(struct generic_pm_domain *genpd); 45 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 46 int (*lock_interruptible)(struct generic_pm_domain *genpd); 47 void (*unlock)(struct generic_pm_domain *genpd); 48 }; 49 50 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 51 { 52 mutex_lock(&genpd->mlock); 53 } 54 55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 56 int depth) 57 { 58 mutex_lock_nested(&genpd->mlock, depth); 59 } 60 61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 62 { 63 return mutex_lock_interruptible(&genpd->mlock); 64 } 65 66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 67 { 68 return mutex_unlock(&genpd->mlock); 69 } 70 71 static const struct genpd_lock_ops genpd_mtx_ops = { 72 .lock = genpd_lock_mtx, 73 .lock_nested = genpd_lock_nested_mtx, 74 .lock_interruptible = genpd_lock_interruptible_mtx, 75 .unlock = genpd_unlock_mtx, 76 }; 77 78 static void genpd_lock_spin(struct generic_pm_domain *genpd) 79 __acquires(&genpd->slock) 80 { 81 unsigned long flags; 82 83 spin_lock_irqsave(&genpd->slock, flags); 84 genpd->lock_flags = flags; 85 } 86 87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 88 int depth) 89 __acquires(&genpd->slock) 90 { 91 unsigned long flags; 92 93 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 94 genpd->lock_flags = flags; 95 } 96 97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 98 __acquires(&genpd->slock) 99 { 100 unsigned long flags; 101 102 spin_lock_irqsave(&genpd->slock, flags); 103 genpd->lock_flags = flags; 104 return 0; 105 } 106 107 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 108 __releases(&genpd->slock) 109 { 110 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 111 } 112 113 static const struct genpd_lock_ops genpd_spin_ops = { 114 .lock = genpd_lock_spin, 115 .lock_nested = genpd_lock_nested_spin, 116 .lock_interruptible = genpd_lock_interruptible_spin, 117 .unlock = genpd_unlock_spin, 118 }; 119 120 #define genpd_lock(p) p->lock_ops->lock(p) 121 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 122 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 123 #define genpd_unlock(p) p->lock_ops->unlock(p) 124 125 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 126 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 127 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 128 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 129 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 130 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 131 #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) 132 133 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 134 const struct generic_pm_domain *genpd) 135 { 136 bool ret; 137 138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 139 140 /* 141 * Warn once if an IRQ safe device is attached to a domain, which 142 * callbacks are allowed to sleep. This indicates a suboptimal 143 * configuration for PM, but it doesn't matter for an always on domain. 144 */ 145 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) 146 return ret; 147 148 if (ret) 149 dev_warn_once(dev, "PM domain %s will not be powered off\n", 150 genpd->name); 151 152 return ret; 153 } 154 155 static int genpd_runtime_suspend(struct device *dev); 156 157 /* 158 * Get the generic PM domain for a particular struct device. 159 * This validates the struct device pointer, the PM domain pointer, 160 * and checks that the PM domain pointer is a real generic PM domain. 161 * Any failure results in NULL being returned. 162 */ 163 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 164 { 165 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 166 return NULL; 167 168 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 169 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 170 return pd_to_genpd(dev->pm_domain); 171 172 return NULL; 173 } 174 175 /* 176 * This should only be used where we are certain that the pm_domain 177 * attached to the device is a genpd domain. 178 */ 179 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 180 { 181 if (IS_ERR_OR_NULL(dev->pm_domain)) 182 return ERR_PTR(-EINVAL); 183 184 return pd_to_genpd(dev->pm_domain); 185 } 186 187 struct device *dev_to_genpd_dev(struct device *dev) 188 { 189 struct generic_pm_domain *genpd = dev_to_genpd(dev); 190 191 if (IS_ERR(genpd)) 192 return ERR_CAST(genpd); 193 194 return &genpd->dev; 195 } 196 197 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 198 struct device *dev) 199 { 200 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 201 } 202 203 static int genpd_start_dev(const struct generic_pm_domain *genpd, 204 struct device *dev) 205 { 206 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 207 } 208 209 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 210 { 211 bool ret = false; 212 213 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 214 ret = !!atomic_dec_and_test(&genpd->sd_count); 215 216 return ret; 217 } 218 219 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 220 { 221 atomic_inc(&genpd->sd_count); 222 smp_mb__after_atomic(); 223 } 224 225 #ifdef CONFIG_DEBUG_FS 226 static struct dentry *genpd_debugfs_dir; 227 228 static void genpd_debug_add(struct generic_pm_domain *genpd); 229 230 static void genpd_debug_remove(struct generic_pm_domain *genpd) 231 { 232 if (!genpd_debugfs_dir) 233 return; 234 235 debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir); 236 } 237 238 static void genpd_update_accounting(struct generic_pm_domain *genpd) 239 { 240 u64 delta, now; 241 242 now = ktime_get_mono_fast_ns(); 243 if (now <= genpd->accounting_time) 244 return; 245 246 delta = now - genpd->accounting_time; 247 248 /* 249 * If genpd->status is active, it means we are just 250 * out of off and so update the idle time and vice 251 * versa. 252 */ 253 if (genpd->status == GENPD_STATE_ON) 254 genpd->states[genpd->state_idx].idle_time += delta; 255 else 256 genpd->on_time += delta; 257 258 genpd->accounting_time = now; 259 } 260 #else 261 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 262 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 263 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 264 #endif 265 266 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 267 unsigned int state) 268 { 269 struct generic_pm_domain_data *pd_data; 270 struct pm_domain_data *pdd; 271 struct gpd_link *link; 272 273 /* New requested state is same as Max requested state */ 274 if (state == genpd->performance_state) 275 return state; 276 277 /* New requested state is higher than Max requested state */ 278 if (state > genpd->performance_state) 279 return state; 280 281 /* Traverse all devices within the domain */ 282 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 283 pd_data = to_gpd_data(pdd); 284 285 if (pd_data->performance_state > state) 286 state = pd_data->performance_state; 287 } 288 289 /* 290 * Traverse all sub-domains within the domain. This can be 291 * done without any additional locking as the link->performance_state 292 * field is protected by the parent genpd->lock, which is already taken. 293 * 294 * Also note that link->performance_state (subdomain's performance state 295 * requirement to parent domain) is different from 296 * link->child->performance_state (current performance state requirement 297 * of the devices/sub-domains of the subdomain) and so can have a 298 * different value. 299 * 300 * Note that we also take vote from powered-off sub-domains into account 301 * as the same is done for devices right now. 302 */ 303 list_for_each_entry(link, &genpd->parent_links, parent_node) { 304 if (link->performance_state > state) 305 state = link->performance_state; 306 } 307 308 return state; 309 } 310 311 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 312 struct generic_pm_domain *parent, 313 unsigned int pstate) 314 { 315 if (!parent->set_performance_state) 316 return pstate; 317 318 return dev_pm_opp_xlate_performance_state(genpd->opp_table, 319 parent->opp_table, 320 pstate); 321 } 322 323 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 324 unsigned int state, int depth); 325 326 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth) 327 { 328 struct generic_pm_domain *parent = link->parent; 329 int parent_state; 330 331 genpd_lock_nested(parent, depth + 1); 332 333 parent_state = link->prev_performance_state; 334 link->performance_state = parent_state; 335 336 parent_state = _genpd_reeval_performance_state(parent, parent_state); 337 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 338 pr_err("%s: Failed to roll back to %d performance state\n", 339 parent->name, parent_state); 340 } 341 342 genpd_unlock(parent); 343 } 344 345 static int _genpd_set_parent_state(struct generic_pm_domain *genpd, 346 struct gpd_link *link, 347 unsigned int state, int depth) 348 { 349 struct generic_pm_domain *parent = link->parent; 350 int parent_state, ret; 351 352 /* Find parent's performance state */ 353 ret = genpd_xlate_performance_state(genpd, parent, state); 354 if (unlikely(ret < 0)) 355 return ret; 356 357 parent_state = ret; 358 359 genpd_lock_nested(parent, depth + 1); 360 361 link->prev_performance_state = link->performance_state; 362 link->performance_state = parent_state; 363 364 parent_state = _genpd_reeval_performance_state(parent, parent_state); 365 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 366 if (ret) 367 link->performance_state = link->prev_performance_state; 368 369 genpd_unlock(parent); 370 371 return ret; 372 } 373 374 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 375 unsigned int state, int depth) 376 { 377 struct gpd_link *link = NULL; 378 int ret; 379 380 if (state == genpd->performance_state) 381 return 0; 382 383 /* When scaling up, propagate to parents first in normal order */ 384 if (state > genpd->performance_state) { 385 list_for_each_entry(link, &genpd->child_links, child_node) { 386 ret = _genpd_set_parent_state(genpd, link, state, depth); 387 if (ret) 388 goto rollback_parents_up; 389 } 390 } 391 392 if (genpd->set_performance_state) { 393 ret = genpd->set_performance_state(genpd, state); 394 if (ret) { 395 if (link) 396 goto rollback_parents_up; 397 return ret; 398 } 399 } 400 401 /* When scaling down, propagate to parents last in reverse order */ 402 if (state < genpd->performance_state) { 403 list_for_each_entry_reverse(link, &genpd->child_links, child_node) { 404 ret = _genpd_set_parent_state(genpd, link, state, depth); 405 if (ret) 406 goto rollback_parents_down; 407 } 408 } 409 410 genpd->performance_state = state; 411 return 0; 412 413 rollback_parents_up: 414 list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node) 415 _genpd_rollback_parent_state(link, depth); 416 return ret; 417 rollback_parents_down: 418 list_for_each_entry_continue(link, &genpd->child_links, child_node) 419 _genpd_rollback_parent_state(link, depth); 420 return ret; 421 } 422 423 static int genpd_set_performance_state(struct device *dev, unsigned int state) 424 { 425 struct generic_pm_domain *genpd = dev_to_genpd(dev); 426 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 427 unsigned int prev_state; 428 int ret; 429 430 prev_state = gpd_data->performance_state; 431 if (prev_state == state) 432 return 0; 433 434 gpd_data->performance_state = state; 435 state = _genpd_reeval_performance_state(genpd, state); 436 437 ret = _genpd_set_performance_state(genpd, state, 0); 438 if (ret) 439 gpd_data->performance_state = prev_state; 440 441 return ret; 442 } 443 444 static int genpd_drop_performance_state(struct device *dev) 445 { 446 unsigned int prev_state = dev_gpd_data(dev)->performance_state; 447 448 if (!genpd_set_performance_state(dev, 0)) 449 return prev_state; 450 451 return 0; 452 } 453 454 static void genpd_restore_performance_state(struct device *dev, 455 unsigned int state) 456 { 457 if (state) 458 genpd_set_performance_state(dev, state); 459 } 460 461 static int genpd_dev_pm_set_performance_state(struct device *dev, 462 unsigned int state) 463 { 464 struct generic_pm_domain *genpd = dev_to_genpd(dev); 465 int ret = 0; 466 467 genpd_lock(genpd); 468 if (pm_runtime_suspended(dev)) { 469 dev_gpd_data(dev)->rpm_pstate = state; 470 } else { 471 ret = genpd_set_performance_state(dev, state); 472 if (!ret) 473 dev_gpd_data(dev)->rpm_pstate = 0; 474 } 475 genpd_unlock(genpd); 476 477 return ret; 478 } 479 480 /** 481 * dev_pm_genpd_set_performance_state- Set performance state of device's power 482 * domain. 483 * 484 * @dev: Device for which the performance-state needs to be set. 485 * @state: Target performance state of the device. This can be set as 0 when the 486 * device doesn't have any performance state constraints left (And so 487 * the device wouldn't participate anymore to find the target 488 * performance state of the genpd). 489 * 490 * It is assumed that the users guarantee that the genpd wouldn't be detached 491 * while this routine is getting called. 492 * 493 * Returns 0 on success and negative error values on failures. 494 */ 495 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 496 { 497 struct generic_pm_domain *genpd; 498 499 genpd = dev_to_genpd_safe(dev); 500 if (!genpd) 501 return -ENODEV; 502 503 if (WARN_ON(!dev->power.subsys_data || 504 !dev->power.subsys_data->domain_data)) 505 return -EINVAL; 506 507 return genpd_dev_pm_set_performance_state(dev, state); 508 } 509 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 510 511 /** 512 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 513 * 514 * @dev: Device to handle 515 * @next: impending interrupt/wakeup for the device 516 * 517 * 518 * Allow devices to inform of the next wakeup. It's assumed that the users 519 * guarantee that the genpd wouldn't be detached while this routine is getting 520 * called. Additionally, it's also assumed that @dev isn't runtime suspended 521 * (RPM_SUSPENDED)." 522 * Although devices are expected to update the next_wakeup after the end of 523 * their usecase as well, it is possible the devices themselves may not know 524 * about that, so stale @next will be ignored when powering off the domain. 525 */ 526 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 527 { 528 struct generic_pm_domain *genpd; 529 struct gpd_timing_data *td; 530 531 genpd = dev_to_genpd_safe(dev); 532 if (!genpd) 533 return; 534 535 td = to_gpd_data(dev->power.subsys_data->domain_data)->td; 536 if (td) 537 td->next_wakeup = next; 538 } 539 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 540 541 /** 542 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd 543 * @dev: A device that is attached to the genpd. 544 * 545 * This routine should typically be called for a device, at the point of when a 546 * GENPD_NOTIFY_PRE_OFF notification has been sent for it. 547 * 548 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no 549 * valid value have been set. 550 */ 551 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev) 552 { 553 struct generic_pm_domain *genpd; 554 555 genpd = dev_to_genpd_safe(dev); 556 if (!genpd) 557 return KTIME_MAX; 558 559 if (genpd->gd) 560 return genpd->gd->next_hrtimer; 561 562 return KTIME_MAX; 563 } 564 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer); 565 566 /* 567 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous 568 * 569 * @dev: A device that is attached to the genpd. 570 * 571 * Allows a consumer of the genpd to notify the provider that the next power off 572 * should be synchronous. 573 * 574 * It is assumed that the users guarantee that the genpd wouldn't be detached 575 * while this routine is getting called. 576 */ 577 void dev_pm_genpd_synced_poweroff(struct device *dev) 578 { 579 struct generic_pm_domain *genpd; 580 581 genpd = dev_to_genpd_safe(dev); 582 if (!genpd) 583 return; 584 585 genpd_lock(genpd); 586 genpd->synced_poweroff = true; 587 genpd_unlock(genpd); 588 } 589 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff); 590 591 /** 592 * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain. 593 * 594 * @dev: Device for which the HW-mode should be changed. 595 * @enable: Value to set or unset the HW-mode. 596 * 597 * Some PM domains can rely on HW signals to control the power for a device. To 598 * allow a consumer driver to switch the behaviour for its device in runtime, 599 * which may be beneficial from a latency or energy point of view, this function 600 * may be called. 601 * 602 * It is assumed that the users guarantee that the genpd wouldn't be detached 603 * while this routine is getting called. 604 * 605 * Return: Returns 0 on success and negative error values on failures. 606 */ 607 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable) 608 { 609 struct generic_pm_domain *genpd; 610 int ret = 0; 611 612 genpd = dev_to_genpd_safe(dev); 613 if (!genpd) 614 return -ENODEV; 615 616 if (!genpd->set_hwmode_dev) 617 return -EOPNOTSUPP; 618 619 genpd_lock(genpd); 620 621 if (dev_gpd_data(dev)->hw_mode == enable) 622 goto out; 623 624 ret = genpd->set_hwmode_dev(genpd, dev, enable); 625 if (!ret) 626 dev_gpd_data(dev)->hw_mode = enable; 627 628 out: 629 genpd_unlock(genpd); 630 return ret; 631 } 632 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode); 633 634 /** 635 * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device. 636 * 637 * @dev: Device for which the current HW-mode setting should be fetched. 638 * 639 * This helper function allows consumer drivers to fetch the current HW mode 640 * setting of its the device. 641 * 642 * It is assumed that the users guarantee that the genpd wouldn't be detached 643 * while this routine is getting called. 644 * 645 * Return: Returns the HW mode setting of device from SW cached hw_mode. 646 */ 647 bool dev_pm_genpd_get_hwmode(struct device *dev) 648 { 649 return dev_gpd_data(dev)->hw_mode; 650 } 651 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode); 652 653 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 654 { 655 unsigned int state_idx = genpd->state_idx; 656 ktime_t time_start; 657 s64 elapsed_ns; 658 int ret; 659 660 /* Notify consumers that we are about to power on. */ 661 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 662 GENPD_NOTIFY_PRE_ON, 663 GENPD_NOTIFY_OFF, NULL); 664 ret = notifier_to_errno(ret); 665 if (ret) 666 return ret; 667 668 if (!genpd->power_on) 669 goto out; 670 671 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 672 if (!timed) { 673 ret = genpd->power_on(genpd); 674 if (ret) 675 goto err; 676 677 goto out; 678 } 679 680 time_start = ktime_get(); 681 ret = genpd->power_on(genpd); 682 if (ret) 683 goto err; 684 685 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 686 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 687 goto out; 688 689 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 690 genpd->gd->max_off_time_changed = true; 691 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 692 genpd->name, "on", elapsed_ns); 693 694 out: 695 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 696 genpd->synced_poweroff = false; 697 return 0; 698 err: 699 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 700 NULL); 701 return ret; 702 } 703 704 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 705 { 706 unsigned int state_idx = genpd->state_idx; 707 ktime_t time_start; 708 s64 elapsed_ns; 709 int ret; 710 711 /* Notify consumers that we are about to power off. */ 712 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 713 GENPD_NOTIFY_PRE_OFF, 714 GENPD_NOTIFY_ON, NULL); 715 ret = notifier_to_errno(ret); 716 if (ret) 717 return ret; 718 719 if (!genpd->power_off) 720 goto out; 721 722 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 723 if (!timed) { 724 ret = genpd->power_off(genpd); 725 if (ret) 726 goto busy; 727 728 goto out; 729 } 730 731 time_start = ktime_get(); 732 ret = genpd->power_off(genpd); 733 if (ret) 734 goto busy; 735 736 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 737 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 738 goto out; 739 740 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 741 genpd->gd->max_off_time_changed = true; 742 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 743 genpd->name, "off", elapsed_ns); 744 745 out: 746 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 747 NULL); 748 return 0; 749 busy: 750 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 751 return ret; 752 } 753 754 /** 755 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 756 * @genpd: PM domain to power off. 757 * 758 * Queue up the execution of genpd_power_off() unless it's already been done 759 * before. 760 */ 761 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 762 { 763 queue_work(pm_wq, &genpd->power_off_work); 764 } 765 766 /** 767 * genpd_power_off - Remove power from a given PM domain. 768 * @genpd: PM domain to power down. 769 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 770 * RPM status of the releated device is in an intermediate state, not yet turned 771 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 772 * be RPM_SUSPENDED, while it tries to power off the PM domain. 773 * @depth: nesting count for lockdep. 774 * 775 * If all of the @genpd's devices have been suspended and all of its subdomains 776 * have been powered down, remove power from @genpd. 777 */ 778 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 779 unsigned int depth) 780 { 781 struct pm_domain_data *pdd; 782 struct gpd_link *link; 783 unsigned int not_suspended = 0; 784 int ret; 785 786 /* 787 * Do not try to power off the domain in the following situations: 788 * (1) The domain is already in the "power off" state. 789 * (2) System suspend is in progress. 790 */ 791 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 792 return 0; 793 794 /* 795 * Abort power off for the PM domain in the following situations: 796 * (1) The domain is configured as always on. 797 * (2) When the domain has a subdomain being powered on. 798 */ 799 if (genpd_is_always_on(genpd) || 800 genpd_is_rpm_always_on(genpd) || 801 atomic_read(&genpd->sd_count) > 0) 802 return -EBUSY; 803 804 /* 805 * The children must be in their deepest (powered-off) states to allow 806 * the parent to be powered off. Note that, there's no need for 807 * additional locking, as powering on a child, requires the parent's 808 * lock to be acquired first. 809 */ 810 list_for_each_entry(link, &genpd->parent_links, parent_node) { 811 struct generic_pm_domain *child = link->child; 812 if (child->state_idx < child->state_count - 1) 813 return -EBUSY; 814 } 815 816 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 817 /* 818 * Do not allow PM domain to be powered off, when an IRQ safe 819 * device is part of a non-IRQ safe domain. 820 */ 821 if (!pm_runtime_suspended(pdd->dev) || 822 irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) 823 not_suspended++; 824 } 825 826 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 827 return -EBUSY; 828 829 if (genpd->gov && genpd->gov->power_down_ok) { 830 if (!genpd->gov->power_down_ok(&genpd->domain)) 831 return -EAGAIN; 832 } 833 834 /* Default to shallowest state. */ 835 if (!genpd->gov) 836 genpd->state_idx = 0; 837 838 /* Don't power off, if a child domain is waiting to power on. */ 839 if (atomic_read(&genpd->sd_count) > 0) 840 return -EBUSY; 841 842 ret = _genpd_power_off(genpd, true); 843 if (ret) { 844 genpd->states[genpd->state_idx].rejected++; 845 return ret; 846 } 847 848 genpd->status = GENPD_STATE_OFF; 849 genpd_update_accounting(genpd); 850 genpd->states[genpd->state_idx].usage++; 851 852 list_for_each_entry(link, &genpd->child_links, child_node) { 853 genpd_sd_counter_dec(link->parent); 854 genpd_lock_nested(link->parent, depth + 1); 855 genpd_power_off(link->parent, false, depth + 1); 856 genpd_unlock(link->parent); 857 } 858 859 return 0; 860 } 861 862 /** 863 * genpd_power_on - Restore power to a given PM domain and its parents. 864 * @genpd: PM domain to power up. 865 * @depth: nesting count for lockdep. 866 * 867 * Restore power to @genpd and all of its parents so that it is possible to 868 * resume a device belonging to it. 869 */ 870 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 871 { 872 struct gpd_link *link; 873 int ret = 0; 874 875 if (genpd_status_on(genpd)) 876 return 0; 877 878 /* 879 * The list is guaranteed not to change while the loop below is being 880 * executed, unless one of the parents' .power_on() callbacks fiddles 881 * with it. 882 */ 883 list_for_each_entry(link, &genpd->child_links, child_node) { 884 struct generic_pm_domain *parent = link->parent; 885 886 genpd_sd_counter_inc(parent); 887 888 genpd_lock_nested(parent, depth + 1); 889 ret = genpd_power_on(parent, depth + 1); 890 genpd_unlock(parent); 891 892 if (ret) { 893 genpd_sd_counter_dec(parent); 894 goto err; 895 } 896 } 897 898 ret = _genpd_power_on(genpd, true); 899 if (ret) 900 goto err; 901 902 genpd->status = GENPD_STATE_ON; 903 genpd_update_accounting(genpd); 904 905 return 0; 906 907 err: 908 list_for_each_entry_continue_reverse(link, 909 &genpd->child_links, 910 child_node) { 911 genpd_sd_counter_dec(link->parent); 912 genpd_lock_nested(link->parent, depth + 1); 913 genpd_power_off(link->parent, false, depth + 1); 914 genpd_unlock(link->parent); 915 } 916 917 return ret; 918 } 919 920 static int genpd_dev_pm_start(struct device *dev) 921 { 922 struct generic_pm_domain *genpd = dev_to_genpd(dev); 923 924 return genpd_start_dev(genpd, dev); 925 } 926 927 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 928 unsigned long val, void *ptr) 929 { 930 struct generic_pm_domain_data *gpd_data; 931 struct device *dev; 932 933 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 934 dev = gpd_data->base.dev; 935 936 for (;;) { 937 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); 938 struct pm_domain_data *pdd; 939 struct gpd_timing_data *td; 940 941 spin_lock_irq(&dev->power.lock); 942 943 pdd = dev->power.subsys_data ? 944 dev->power.subsys_data->domain_data : NULL; 945 if (pdd) { 946 td = to_gpd_data(pdd)->td; 947 if (td) { 948 td->constraint_changed = true; 949 genpd = dev_to_genpd(dev); 950 } 951 } 952 953 spin_unlock_irq(&dev->power.lock); 954 955 if (!IS_ERR(genpd)) { 956 genpd_lock(genpd); 957 genpd->gd->max_off_time_changed = true; 958 genpd_unlock(genpd); 959 } 960 961 dev = dev->parent; 962 if (!dev || dev->power.ignore_children) 963 break; 964 } 965 966 return NOTIFY_DONE; 967 } 968 969 /** 970 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 971 * @work: Work structure used for scheduling the execution of this function. 972 */ 973 static void genpd_power_off_work_fn(struct work_struct *work) 974 { 975 struct generic_pm_domain *genpd; 976 977 genpd = container_of(work, struct generic_pm_domain, power_off_work); 978 979 genpd_lock(genpd); 980 genpd_power_off(genpd, false, 0); 981 genpd_unlock(genpd); 982 } 983 984 /** 985 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 986 * @dev: Device to handle. 987 */ 988 static int __genpd_runtime_suspend(struct device *dev) 989 { 990 int (*cb)(struct device *__dev); 991 992 if (dev->type && dev->type->pm) 993 cb = dev->type->pm->runtime_suspend; 994 else if (dev->class && dev->class->pm) 995 cb = dev->class->pm->runtime_suspend; 996 else if (dev->bus && dev->bus->pm) 997 cb = dev->bus->pm->runtime_suspend; 998 else 999 cb = NULL; 1000 1001 if (!cb && dev->driver && dev->driver->pm) 1002 cb = dev->driver->pm->runtime_suspend; 1003 1004 return cb ? cb(dev) : 0; 1005 } 1006 1007 /** 1008 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 1009 * @dev: Device to handle. 1010 */ 1011 static int __genpd_runtime_resume(struct device *dev) 1012 { 1013 int (*cb)(struct device *__dev); 1014 1015 if (dev->type && dev->type->pm) 1016 cb = dev->type->pm->runtime_resume; 1017 else if (dev->class && dev->class->pm) 1018 cb = dev->class->pm->runtime_resume; 1019 else if (dev->bus && dev->bus->pm) 1020 cb = dev->bus->pm->runtime_resume; 1021 else 1022 cb = NULL; 1023 1024 if (!cb && dev->driver && dev->driver->pm) 1025 cb = dev->driver->pm->runtime_resume; 1026 1027 return cb ? cb(dev) : 0; 1028 } 1029 1030 /** 1031 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 1032 * @dev: Device to suspend. 1033 * 1034 * Carry out a runtime suspend of a device under the assumption that its 1035 * pm_domain field points to the domain member of an object of type 1036 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1037 */ 1038 static int genpd_runtime_suspend(struct device *dev) 1039 { 1040 struct generic_pm_domain *genpd; 1041 bool (*suspend_ok)(struct device *__dev); 1042 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1043 struct gpd_timing_data *td = gpd_data->td; 1044 bool runtime_pm = pm_runtime_enabled(dev); 1045 ktime_t time_start = 0; 1046 s64 elapsed_ns; 1047 int ret; 1048 1049 dev_dbg(dev, "%s()\n", __func__); 1050 1051 genpd = dev_to_genpd(dev); 1052 if (IS_ERR(genpd)) 1053 return -EINVAL; 1054 1055 /* 1056 * A runtime PM centric subsystem/driver may re-use the runtime PM 1057 * callbacks for other purposes than runtime PM. In those scenarios 1058 * runtime PM is disabled. Under these circumstances, we shall skip 1059 * validating/measuring the PM QoS latency. 1060 */ 1061 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 1062 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 1063 return -EBUSY; 1064 1065 /* Measure suspend latency. */ 1066 if (td && runtime_pm) 1067 time_start = ktime_get(); 1068 1069 ret = __genpd_runtime_suspend(dev); 1070 if (ret) 1071 return ret; 1072 1073 ret = genpd_stop_dev(genpd, dev); 1074 if (ret) { 1075 __genpd_runtime_resume(dev); 1076 return ret; 1077 } 1078 1079 /* Update suspend latency value if the measured time exceeds it. */ 1080 if (td && runtime_pm) { 1081 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1082 if (elapsed_ns > td->suspend_latency_ns) { 1083 td->suspend_latency_ns = elapsed_ns; 1084 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 1085 elapsed_ns); 1086 genpd->gd->max_off_time_changed = true; 1087 td->constraint_changed = true; 1088 } 1089 } 1090 1091 /* 1092 * If power.irq_safe is set, this routine may be run with 1093 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 1094 */ 1095 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1096 return 0; 1097 1098 genpd_lock(genpd); 1099 genpd_power_off(genpd, true, 0); 1100 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1101 genpd_unlock(genpd); 1102 1103 return 0; 1104 } 1105 1106 /** 1107 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 1108 * @dev: Device to resume. 1109 * 1110 * Carry out a runtime resume of a device under the assumption that its 1111 * pm_domain field points to the domain member of an object of type 1112 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1113 */ 1114 static int genpd_runtime_resume(struct device *dev) 1115 { 1116 struct generic_pm_domain *genpd; 1117 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1118 struct gpd_timing_data *td = gpd_data->td; 1119 bool timed = td && pm_runtime_enabled(dev); 1120 ktime_t time_start = 0; 1121 s64 elapsed_ns; 1122 int ret; 1123 1124 dev_dbg(dev, "%s()\n", __func__); 1125 1126 genpd = dev_to_genpd(dev); 1127 if (IS_ERR(genpd)) 1128 return -EINVAL; 1129 1130 /* 1131 * As we don't power off a non IRQ safe domain, which holds 1132 * an IRQ safe device, we don't need to restore power to it. 1133 */ 1134 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1135 goto out; 1136 1137 genpd_lock(genpd); 1138 genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 1139 ret = genpd_power_on(genpd, 0); 1140 genpd_unlock(genpd); 1141 1142 if (ret) 1143 return ret; 1144 1145 out: 1146 /* Measure resume latency. */ 1147 if (timed) 1148 time_start = ktime_get(); 1149 1150 ret = genpd_start_dev(genpd, dev); 1151 if (ret) 1152 goto err_poweroff; 1153 1154 ret = __genpd_runtime_resume(dev); 1155 if (ret) 1156 goto err_stop; 1157 1158 /* Update resume latency value if the measured time exceeds it. */ 1159 if (timed) { 1160 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1161 if (elapsed_ns > td->resume_latency_ns) { 1162 td->resume_latency_ns = elapsed_ns; 1163 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1164 elapsed_ns); 1165 genpd->gd->max_off_time_changed = true; 1166 td->constraint_changed = true; 1167 } 1168 } 1169 1170 return 0; 1171 1172 err_stop: 1173 genpd_stop_dev(genpd, dev); 1174 err_poweroff: 1175 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1176 genpd_lock(genpd); 1177 genpd_power_off(genpd, true, 0); 1178 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1179 genpd_unlock(genpd); 1180 } 1181 1182 return ret; 1183 } 1184 1185 static bool pd_ignore_unused; 1186 static int __init pd_ignore_unused_setup(char *__unused) 1187 { 1188 pd_ignore_unused = true; 1189 return 1; 1190 } 1191 __setup("pd_ignore_unused", pd_ignore_unused_setup); 1192 1193 /** 1194 * genpd_power_off_unused - Power off all PM domains with no devices in use. 1195 */ 1196 static int __init genpd_power_off_unused(void) 1197 { 1198 struct generic_pm_domain *genpd; 1199 1200 if (pd_ignore_unused) { 1201 pr_warn("genpd: Not disabling unused power domains\n"); 1202 return 0; 1203 } 1204 1205 pr_info("genpd: Disabling unused power domains\n"); 1206 mutex_lock(&gpd_list_lock); 1207 1208 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 1209 genpd_queue_power_off_work(genpd); 1210 1211 mutex_unlock(&gpd_list_lock); 1212 1213 return 0; 1214 } 1215 late_initcall_sync(genpd_power_off_unused); 1216 1217 #ifdef CONFIG_PM_SLEEP 1218 1219 /** 1220 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1221 * @genpd: PM domain to power off, if possible. 1222 * @use_lock: use the lock. 1223 * @depth: nesting count for lockdep. 1224 * 1225 * Check if the given PM domain can be powered off (during system suspend or 1226 * hibernation) and do that if so. Also, in that case propagate to its parents. 1227 * 1228 * This function is only called in "noirq" and "syscore" stages of system power 1229 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1230 * these cases the lock must be held. 1231 */ 1232 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1233 unsigned int depth) 1234 { 1235 struct gpd_link *link; 1236 1237 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1238 return; 1239 1240 if (genpd->suspended_count != genpd->device_count 1241 || atomic_read(&genpd->sd_count) > 0) 1242 return; 1243 1244 /* Check that the children are in their deepest (powered-off) state. */ 1245 list_for_each_entry(link, &genpd->parent_links, parent_node) { 1246 struct generic_pm_domain *child = link->child; 1247 if (child->state_idx < child->state_count - 1) 1248 return; 1249 } 1250 1251 /* Choose the deepest state when suspending */ 1252 genpd->state_idx = genpd->state_count - 1; 1253 if (_genpd_power_off(genpd, false)) { 1254 genpd->states[genpd->state_idx].rejected++; 1255 return; 1256 } else { 1257 genpd->states[genpd->state_idx].usage++; 1258 } 1259 1260 genpd->status = GENPD_STATE_OFF; 1261 1262 list_for_each_entry(link, &genpd->child_links, child_node) { 1263 genpd_sd_counter_dec(link->parent); 1264 1265 if (use_lock) 1266 genpd_lock_nested(link->parent, depth + 1); 1267 1268 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1269 1270 if (use_lock) 1271 genpd_unlock(link->parent); 1272 } 1273 } 1274 1275 /** 1276 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1277 * @genpd: PM domain to power on. 1278 * @use_lock: use the lock. 1279 * @depth: nesting count for lockdep. 1280 * 1281 * This function is only called in "noirq" and "syscore" stages of system power 1282 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1283 * these cases the lock must be held. 1284 */ 1285 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1286 unsigned int depth) 1287 { 1288 struct gpd_link *link; 1289 1290 if (genpd_status_on(genpd)) 1291 return; 1292 1293 list_for_each_entry(link, &genpd->child_links, child_node) { 1294 genpd_sd_counter_inc(link->parent); 1295 1296 if (use_lock) 1297 genpd_lock_nested(link->parent, depth + 1); 1298 1299 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1300 1301 if (use_lock) 1302 genpd_unlock(link->parent); 1303 } 1304 1305 _genpd_power_on(genpd, false); 1306 genpd->status = GENPD_STATE_ON; 1307 } 1308 1309 /** 1310 * genpd_prepare - Start power transition of a device in a PM domain. 1311 * @dev: Device to start the transition of. 1312 * 1313 * Start a power transition of a device (during a system-wide power transition) 1314 * under the assumption that its pm_domain field points to the domain member of 1315 * an object of type struct generic_pm_domain representing a PM domain 1316 * consisting of I/O devices. 1317 */ 1318 static int genpd_prepare(struct device *dev) 1319 { 1320 struct generic_pm_domain *genpd; 1321 int ret; 1322 1323 dev_dbg(dev, "%s()\n", __func__); 1324 1325 genpd = dev_to_genpd(dev); 1326 if (IS_ERR(genpd)) 1327 return -EINVAL; 1328 1329 genpd_lock(genpd); 1330 genpd->prepared_count++; 1331 genpd_unlock(genpd); 1332 1333 ret = pm_generic_prepare(dev); 1334 if (ret < 0) { 1335 genpd_lock(genpd); 1336 1337 genpd->prepared_count--; 1338 1339 genpd_unlock(genpd); 1340 } 1341 1342 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1343 return ret >= 0 ? 0 : ret; 1344 } 1345 1346 /** 1347 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1348 * I/O pm domain. 1349 * @dev: Device to suspend. 1350 * @suspend_noirq: Generic suspend_noirq callback. 1351 * @resume_noirq: Generic resume_noirq callback. 1352 * 1353 * Stop the device and remove power from the domain if all devices in it have 1354 * been stopped. 1355 */ 1356 static int genpd_finish_suspend(struct device *dev, 1357 int (*suspend_noirq)(struct device *dev), 1358 int (*resume_noirq)(struct device *dev)) 1359 { 1360 struct generic_pm_domain *genpd; 1361 int ret = 0; 1362 1363 genpd = dev_to_genpd(dev); 1364 if (IS_ERR(genpd)) 1365 return -EINVAL; 1366 1367 ret = suspend_noirq(dev); 1368 if (ret) 1369 return ret; 1370 1371 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1372 return 0; 1373 1374 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1375 !pm_runtime_status_suspended(dev)) { 1376 ret = genpd_stop_dev(genpd, dev); 1377 if (ret) { 1378 resume_noirq(dev); 1379 return ret; 1380 } 1381 } 1382 1383 genpd_lock(genpd); 1384 genpd->suspended_count++; 1385 genpd_sync_power_off(genpd, true, 0); 1386 genpd_unlock(genpd); 1387 1388 return 0; 1389 } 1390 1391 /** 1392 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1393 * @dev: Device to suspend. 1394 * 1395 * Stop the device and remove power from the domain if all devices in it have 1396 * been stopped. 1397 */ 1398 static int genpd_suspend_noirq(struct device *dev) 1399 { 1400 dev_dbg(dev, "%s()\n", __func__); 1401 1402 return genpd_finish_suspend(dev, 1403 pm_generic_suspend_noirq, 1404 pm_generic_resume_noirq); 1405 } 1406 1407 /** 1408 * genpd_finish_resume - Completion of resume of device in an I/O PM domain. 1409 * @dev: Device to resume. 1410 * @resume_noirq: Generic resume_noirq callback. 1411 * 1412 * Restore power to the device's PM domain, if necessary, and start the device. 1413 */ 1414 static int genpd_finish_resume(struct device *dev, 1415 int (*resume_noirq)(struct device *dev)) 1416 { 1417 struct generic_pm_domain *genpd; 1418 int ret; 1419 1420 dev_dbg(dev, "%s()\n", __func__); 1421 1422 genpd = dev_to_genpd(dev); 1423 if (IS_ERR(genpd)) 1424 return -EINVAL; 1425 1426 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1427 return resume_noirq(dev); 1428 1429 genpd_lock(genpd); 1430 genpd_sync_power_on(genpd, true, 0); 1431 genpd->suspended_count--; 1432 genpd_unlock(genpd); 1433 1434 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1435 !pm_runtime_status_suspended(dev)) { 1436 ret = genpd_start_dev(genpd, dev); 1437 if (ret) 1438 return ret; 1439 } 1440 1441 return pm_generic_resume_noirq(dev); 1442 } 1443 1444 /** 1445 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1446 * @dev: Device to resume. 1447 * 1448 * Restore power to the device's PM domain, if necessary, and start the device. 1449 */ 1450 static int genpd_resume_noirq(struct device *dev) 1451 { 1452 dev_dbg(dev, "%s()\n", __func__); 1453 1454 return genpd_finish_resume(dev, pm_generic_resume_noirq); 1455 } 1456 1457 /** 1458 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1459 * @dev: Device to freeze. 1460 * 1461 * Carry out a late freeze of a device under the assumption that its 1462 * pm_domain field points to the domain member of an object of type 1463 * struct generic_pm_domain representing a power domain consisting of I/O 1464 * devices. 1465 */ 1466 static int genpd_freeze_noirq(struct device *dev) 1467 { 1468 dev_dbg(dev, "%s()\n", __func__); 1469 1470 return genpd_finish_suspend(dev, 1471 pm_generic_freeze_noirq, 1472 pm_generic_thaw_noirq); 1473 } 1474 1475 /** 1476 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1477 * @dev: Device to thaw. 1478 * 1479 * Start the device, unless power has been removed from the domain already 1480 * before the system transition. 1481 */ 1482 static int genpd_thaw_noirq(struct device *dev) 1483 { 1484 dev_dbg(dev, "%s()\n", __func__); 1485 1486 return genpd_finish_resume(dev, pm_generic_thaw_noirq); 1487 } 1488 1489 /** 1490 * genpd_poweroff_noirq - Completion of hibernation of device in an 1491 * I/O PM domain. 1492 * @dev: Device to poweroff. 1493 * 1494 * Stop the device and remove power from the domain if all devices in it have 1495 * been stopped. 1496 */ 1497 static int genpd_poweroff_noirq(struct device *dev) 1498 { 1499 dev_dbg(dev, "%s()\n", __func__); 1500 1501 return genpd_finish_suspend(dev, 1502 pm_generic_poweroff_noirq, 1503 pm_generic_restore_noirq); 1504 } 1505 1506 /** 1507 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1508 * @dev: Device to resume. 1509 * 1510 * Make sure the domain will be in the same power state as before the 1511 * hibernation the system is resuming from and start the device if necessary. 1512 */ 1513 static int genpd_restore_noirq(struct device *dev) 1514 { 1515 dev_dbg(dev, "%s()\n", __func__); 1516 1517 return genpd_finish_resume(dev, pm_generic_restore_noirq); 1518 } 1519 1520 /** 1521 * genpd_complete - Complete power transition of a device in a power domain. 1522 * @dev: Device to complete the transition of. 1523 * 1524 * Complete a power transition of a device (during a system-wide power 1525 * transition) under the assumption that its pm_domain field points to the 1526 * domain member of an object of type struct generic_pm_domain representing 1527 * a power domain consisting of I/O devices. 1528 */ 1529 static void genpd_complete(struct device *dev) 1530 { 1531 struct generic_pm_domain *genpd; 1532 1533 dev_dbg(dev, "%s()\n", __func__); 1534 1535 genpd = dev_to_genpd(dev); 1536 if (IS_ERR(genpd)) 1537 return; 1538 1539 pm_generic_complete(dev); 1540 1541 genpd_lock(genpd); 1542 1543 genpd->prepared_count--; 1544 if (!genpd->prepared_count) 1545 genpd_queue_power_off_work(genpd); 1546 1547 genpd_unlock(genpd); 1548 } 1549 1550 static void genpd_switch_state(struct device *dev, bool suspend) 1551 { 1552 struct generic_pm_domain *genpd; 1553 bool use_lock; 1554 1555 genpd = dev_to_genpd_safe(dev); 1556 if (!genpd) 1557 return; 1558 1559 use_lock = genpd_is_irq_safe(genpd); 1560 1561 if (use_lock) 1562 genpd_lock(genpd); 1563 1564 if (suspend) { 1565 genpd->suspended_count++; 1566 genpd_sync_power_off(genpd, use_lock, 0); 1567 } else { 1568 genpd_sync_power_on(genpd, use_lock, 0); 1569 genpd->suspended_count--; 1570 } 1571 1572 if (use_lock) 1573 genpd_unlock(genpd); 1574 } 1575 1576 /** 1577 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1578 * @dev: The device that is attached to the genpd, that can be suspended. 1579 * 1580 * This routine should typically be called for a device that needs to be 1581 * suspended during the syscore suspend phase. It may also be called during 1582 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1583 * genpd. 1584 */ 1585 void dev_pm_genpd_suspend(struct device *dev) 1586 { 1587 genpd_switch_state(dev, true); 1588 } 1589 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1590 1591 /** 1592 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1593 * @dev: The device that is attached to the genpd, which needs to be resumed. 1594 * 1595 * This routine should typically be called for a device that needs to be resumed 1596 * during the syscore resume phase. It may also be called during suspend-to-idle 1597 * to resume a corresponding CPU device that is attached to a genpd. 1598 */ 1599 void dev_pm_genpd_resume(struct device *dev) 1600 { 1601 genpd_switch_state(dev, false); 1602 } 1603 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1604 1605 #else /* !CONFIG_PM_SLEEP */ 1606 1607 #define genpd_prepare NULL 1608 #define genpd_suspend_noirq NULL 1609 #define genpd_resume_noirq NULL 1610 #define genpd_freeze_noirq NULL 1611 #define genpd_thaw_noirq NULL 1612 #define genpd_poweroff_noirq NULL 1613 #define genpd_restore_noirq NULL 1614 #define genpd_complete NULL 1615 1616 #endif /* CONFIG_PM_SLEEP */ 1617 1618 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1619 bool has_governor) 1620 { 1621 struct generic_pm_domain_data *gpd_data; 1622 struct gpd_timing_data *td; 1623 int ret; 1624 1625 ret = dev_pm_get_subsys_data(dev); 1626 if (ret) 1627 return ERR_PTR(ret); 1628 1629 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1630 if (!gpd_data) { 1631 ret = -ENOMEM; 1632 goto err_put; 1633 } 1634 1635 gpd_data->base.dev = dev; 1636 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1637 1638 /* Allocate data used by a governor. */ 1639 if (has_governor) { 1640 td = kzalloc(sizeof(*td), GFP_KERNEL); 1641 if (!td) { 1642 ret = -ENOMEM; 1643 goto err_free; 1644 } 1645 1646 td->constraint_changed = true; 1647 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1648 td->next_wakeup = KTIME_MAX; 1649 gpd_data->td = td; 1650 } 1651 1652 spin_lock_irq(&dev->power.lock); 1653 1654 if (dev->power.subsys_data->domain_data) 1655 ret = -EINVAL; 1656 else 1657 dev->power.subsys_data->domain_data = &gpd_data->base; 1658 1659 spin_unlock_irq(&dev->power.lock); 1660 1661 if (ret) 1662 goto err_free; 1663 1664 return gpd_data; 1665 1666 err_free: 1667 kfree(gpd_data->td); 1668 kfree(gpd_data); 1669 err_put: 1670 dev_pm_put_subsys_data(dev); 1671 return ERR_PTR(ret); 1672 } 1673 1674 static void genpd_free_dev_data(struct device *dev, 1675 struct generic_pm_domain_data *gpd_data) 1676 { 1677 spin_lock_irq(&dev->power.lock); 1678 1679 dev->power.subsys_data->domain_data = NULL; 1680 1681 spin_unlock_irq(&dev->power.lock); 1682 1683 kfree(gpd_data->td); 1684 kfree(gpd_data); 1685 dev_pm_put_subsys_data(dev); 1686 } 1687 1688 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1689 int cpu, bool set, unsigned int depth) 1690 { 1691 struct gpd_link *link; 1692 1693 if (!genpd_is_cpu_domain(genpd)) 1694 return; 1695 1696 list_for_each_entry(link, &genpd->child_links, child_node) { 1697 struct generic_pm_domain *parent = link->parent; 1698 1699 genpd_lock_nested(parent, depth + 1); 1700 genpd_update_cpumask(parent, cpu, set, depth + 1); 1701 genpd_unlock(parent); 1702 } 1703 1704 if (set) 1705 cpumask_set_cpu(cpu, genpd->cpus); 1706 else 1707 cpumask_clear_cpu(cpu, genpd->cpus); 1708 } 1709 1710 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1711 { 1712 if (cpu >= 0) 1713 genpd_update_cpumask(genpd, cpu, true, 0); 1714 } 1715 1716 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1717 { 1718 if (cpu >= 0) 1719 genpd_update_cpumask(genpd, cpu, false, 0); 1720 } 1721 1722 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1723 { 1724 int cpu; 1725 1726 if (!genpd_is_cpu_domain(genpd)) 1727 return -1; 1728 1729 for_each_possible_cpu(cpu) { 1730 if (get_cpu_device(cpu) == dev) 1731 return cpu; 1732 } 1733 1734 return -1; 1735 } 1736 1737 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1738 struct device *base_dev) 1739 { 1740 struct genpd_governor_data *gd = genpd->gd; 1741 struct generic_pm_domain_data *gpd_data; 1742 int ret; 1743 1744 dev_dbg(dev, "%s()\n", __func__); 1745 1746 gpd_data = genpd_alloc_dev_data(dev, gd); 1747 if (IS_ERR(gpd_data)) 1748 return PTR_ERR(gpd_data); 1749 1750 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1751 1752 gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false; 1753 1754 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1755 if (ret) 1756 goto out; 1757 1758 genpd_lock(genpd); 1759 1760 genpd_set_cpumask(genpd, gpd_data->cpu); 1761 dev_pm_domain_set(dev, &genpd->domain); 1762 1763 genpd->device_count++; 1764 if (gd) 1765 gd->max_off_time_changed = true; 1766 1767 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1768 1769 genpd_unlock(genpd); 1770 out: 1771 if (ret) 1772 genpd_free_dev_data(dev, gpd_data); 1773 else 1774 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1775 DEV_PM_QOS_RESUME_LATENCY); 1776 1777 return ret; 1778 } 1779 1780 /** 1781 * pm_genpd_add_device - Add a device to an I/O PM domain. 1782 * @genpd: PM domain to add the device to. 1783 * @dev: Device to be added. 1784 */ 1785 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1786 { 1787 int ret; 1788 1789 if (!genpd || !dev) 1790 return -EINVAL; 1791 1792 mutex_lock(&gpd_list_lock); 1793 ret = genpd_add_device(genpd, dev, dev); 1794 mutex_unlock(&gpd_list_lock); 1795 1796 return ret; 1797 } 1798 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1799 1800 static int genpd_remove_device(struct generic_pm_domain *genpd, 1801 struct device *dev) 1802 { 1803 struct generic_pm_domain_data *gpd_data; 1804 struct pm_domain_data *pdd; 1805 int ret = 0; 1806 1807 dev_dbg(dev, "%s()\n", __func__); 1808 1809 pdd = dev->power.subsys_data->domain_data; 1810 gpd_data = to_gpd_data(pdd); 1811 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1812 DEV_PM_QOS_RESUME_LATENCY); 1813 1814 genpd_lock(genpd); 1815 1816 if (genpd->prepared_count > 0) { 1817 ret = -EAGAIN; 1818 goto out; 1819 } 1820 1821 genpd->device_count--; 1822 if (genpd->gd) 1823 genpd->gd->max_off_time_changed = true; 1824 1825 genpd_clear_cpumask(genpd, gpd_data->cpu); 1826 dev_pm_domain_set(dev, NULL); 1827 1828 list_del_init(&pdd->list_node); 1829 1830 genpd_unlock(genpd); 1831 1832 if (genpd->detach_dev) 1833 genpd->detach_dev(genpd, dev); 1834 1835 genpd_free_dev_data(dev, gpd_data); 1836 1837 return 0; 1838 1839 out: 1840 genpd_unlock(genpd); 1841 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1842 1843 return ret; 1844 } 1845 1846 /** 1847 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1848 * @dev: Device to be removed. 1849 */ 1850 int pm_genpd_remove_device(struct device *dev) 1851 { 1852 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1853 1854 if (!genpd) 1855 return -EINVAL; 1856 1857 return genpd_remove_device(genpd, dev); 1858 } 1859 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1860 1861 /** 1862 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 1863 * 1864 * @dev: Device that should be associated with the notifier 1865 * @nb: The notifier block to register 1866 * 1867 * Users may call this function to add a genpd power on/off notifier for an 1868 * attached @dev. Only one notifier per device is allowed. The notifier is 1869 * sent when genpd is powering on/off the PM domain. 1870 * 1871 * It is assumed that the user guarantee that the genpd wouldn't be detached 1872 * while this routine is getting called. 1873 * 1874 * Returns 0 on success and negative error values on failures. 1875 */ 1876 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 1877 { 1878 struct generic_pm_domain *genpd; 1879 struct generic_pm_domain_data *gpd_data; 1880 int ret; 1881 1882 genpd = dev_to_genpd_safe(dev); 1883 if (!genpd) 1884 return -ENODEV; 1885 1886 if (WARN_ON(!dev->power.subsys_data || 1887 !dev->power.subsys_data->domain_data)) 1888 return -EINVAL; 1889 1890 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1891 if (gpd_data->power_nb) 1892 return -EEXIST; 1893 1894 genpd_lock(genpd); 1895 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 1896 genpd_unlock(genpd); 1897 1898 if (ret) { 1899 dev_warn(dev, "failed to add notifier for PM domain %s\n", 1900 genpd->name); 1901 return ret; 1902 } 1903 1904 gpd_data->power_nb = nb; 1905 return 0; 1906 } 1907 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 1908 1909 /** 1910 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 1911 * 1912 * @dev: Device that is associated with the notifier 1913 * 1914 * Users may call this function to remove a genpd power on/off notifier for an 1915 * attached @dev. 1916 * 1917 * It is assumed that the user guarantee that the genpd wouldn't be detached 1918 * while this routine is getting called. 1919 * 1920 * Returns 0 on success and negative error values on failures. 1921 */ 1922 int dev_pm_genpd_remove_notifier(struct device *dev) 1923 { 1924 struct generic_pm_domain *genpd; 1925 struct generic_pm_domain_data *gpd_data; 1926 int ret; 1927 1928 genpd = dev_to_genpd_safe(dev); 1929 if (!genpd) 1930 return -ENODEV; 1931 1932 if (WARN_ON(!dev->power.subsys_data || 1933 !dev->power.subsys_data->domain_data)) 1934 return -EINVAL; 1935 1936 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1937 if (!gpd_data->power_nb) 1938 return -ENODEV; 1939 1940 genpd_lock(genpd); 1941 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 1942 gpd_data->power_nb); 1943 genpd_unlock(genpd); 1944 1945 if (ret) { 1946 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 1947 genpd->name); 1948 return ret; 1949 } 1950 1951 gpd_data->power_nb = NULL; 1952 return 0; 1953 } 1954 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 1955 1956 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1957 struct generic_pm_domain *subdomain) 1958 { 1959 struct gpd_link *link, *itr; 1960 int ret = 0; 1961 1962 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1963 || genpd == subdomain) 1964 return -EINVAL; 1965 1966 /* 1967 * If the domain can be powered on/off in an IRQ safe 1968 * context, ensure that the subdomain can also be 1969 * powered on/off in that context. 1970 */ 1971 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1972 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1973 genpd->name, subdomain->name); 1974 return -EINVAL; 1975 } 1976 1977 link = kzalloc(sizeof(*link), GFP_KERNEL); 1978 if (!link) 1979 return -ENOMEM; 1980 1981 genpd_lock(subdomain); 1982 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1983 1984 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1985 ret = -EINVAL; 1986 goto out; 1987 } 1988 1989 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 1990 if (itr->child == subdomain && itr->parent == genpd) { 1991 ret = -EINVAL; 1992 goto out; 1993 } 1994 } 1995 1996 link->parent = genpd; 1997 list_add_tail(&link->parent_node, &genpd->parent_links); 1998 link->child = subdomain; 1999 list_add_tail(&link->child_node, &subdomain->child_links); 2000 if (genpd_status_on(subdomain)) 2001 genpd_sd_counter_inc(genpd); 2002 2003 out: 2004 genpd_unlock(genpd); 2005 genpd_unlock(subdomain); 2006 if (ret) 2007 kfree(link); 2008 return ret; 2009 } 2010 2011 /** 2012 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2013 * @genpd: Leader PM domain to add the subdomain to. 2014 * @subdomain: Subdomain to be added. 2015 */ 2016 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 2017 struct generic_pm_domain *subdomain) 2018 { 2019 int ret; 2020 2021 mutex_lock(&gpd_list_lock); 2022 ret = genpd_add_subdomain(genpd, subdomain); 2023 mutex_unlock(&gpd_list_lock); 2024 2025 return ret; 2026 } 2027 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 2028 2029 /** 2030 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2031 * @genpd: Leader PM domain to remove the subdomain from. 2032 * @subdomain: Subdomain to be removed. 2033 */ 2034 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 2035 struct generic_pm_domain *subdomain) 2036 { 2037 struct gpd_link *l, *link; 2038 int ret = -EINVAL; 2039 2040 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 2041 return -EINVAL; 2042 2043 genpd_lock(subdomain); 2044 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 2045 2046 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 2047 pr_warn("%s: unable to remove subdomain %s\n", 2048 genpd->name, subdomain->name); 2049 ret = -EBUSY; 2050 goto out; 2051 } 2052 2053 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 2054 if (link->child != subdomain) 2055 continue; 2056 2057 list_del(&link->parent_node); 2058 list_del(&link->child_node); 2059 kfree(link); 2060 if (genpd_status_on(subdomain)) 2061 genpd_sd_counter_dec(genpd); 2062 2063 ret = 0; 2064 break; 2065 } 2066 2067 out: 2068 genpd_unlock(genpd); 2069 genpd_unlock(subdomain); 2070 2071 return ret; 2072 } 2073 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 2074 2075 static void genpd_free_default_power_state(struct genpd_power_state *states, 2076 unsigned int state_count) 2077 { 2078 kfree(states); 2079 } 2080 2081 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 2082 { 2083 struct genpd_power_state *state; 2084 2085 state = kzalloc(sizeof(*state), GFP_KERNEL); 2086 if (!state) 2087 return -ENOMEM; 2088 2089 genpd->states = state; 2090 genpd->state_count = 1; 2091 genpd->free_states = genpd_free_default_power_state; 2092 2093 return 0; 2094 } 2095 2096 static int genpd_alloc_data(struct generic_pm_domain *genpd) 2097 { 2098 struct genpd_governor_data *gd = NULL; 2099 int ret; 2100 2101 if (genpd_is_cpu_domain(genpd) && 2102 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 2103 return -ENOMEM; 2104 2105 if (genpd->gov) { 2106 gd = kzalloc(sizeof(*gd), GFP_KERNEL); 2107 if (!gd) { 2108 ret = -ENOMEM; 2109 goto free; 2110 } 2111 2112 gd->max_off_time_ns = -1; 2113 gd->max_off_time_changed = true; 2114 gd->next_wakeup = KTIME_MAX; 2115 gd->next_hrtimer = KTIME_MAX; 2116 } 2117 2118 /* Use only one "off" state if there were no states declared */ 2119 if (genpd->state_count == 0) { 2120 ret = genpd_set_default_power_state(genpd); 2121 if (ret) 2122 goto free; 2123 } 2124 2125 genpd->gd = gd; 2126 return 0; 2127 2128 free: 2129 if (genpd_is_cpu_domain(genpd)) 2130 free_cpumask_var(genpd->cpus); 2131 kfree(gd); 2132 return ret; 2133 } 2134 2135 static void genpd_free_data(struct generic_pm_domain *genpd) 2136 { 2137 if (genpd_is_cpu_domain(genpd)) 2138 free_cpumask_var(genpd->cpus); 2139 if (genpd->free_states) 2140 genpd->free_states(genpd->states, genpd->state_count); 2141 kfree(genpd->gd); 2142 } 2143 2144 static void genpd_lock_init(struct generic_pm_domain *genpd) 2145 { 2146 if (genpd_is_irq_safe(genpd)) { 2147 spin_lock_init(&genpd->slock); 2148 genpd->lock_ops = &genpd_spin_ops; 2149 } else { 2150 mutex_init(&genpd->mlock); 2151 genpd->lock_ops = &genpd_mtx_ops; 2152 } 2153 } 2154 2155 /** 2156 * pm_genpd_init - Initialize a generic I/O PM domain object. 2157 * @genpd: PM domain object to initialize. 2158 * @gov: PM domain governor to associate with the domain (may be NULL). 2159 * @is_off: Initial value of the domain's power_is_off field. 2160 * 2161 * Returns 0 on successful initialization, else a negative error code. 2162 */ 2163 int pm_genpd_init(struct generic_pm_domain *genpd, 2164 struct dev_power_governor *gov, bool is_off) 2165 { 2166 int ret; 2167 2168 if (IS_ERR_OR_NULL(genpd)) 2169 return -EINVAL; 2170 2171 INIT_LIST_HEAD(&genpd->parent_links); 2172 INIT_LIST_HEAD(&genpd->child_links); 2173 INIT_LIST_HEAD(&genpd->dev_list); 2174 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 2175 genpd_lock_init(genpd); 2176 genpd->gov = gov; 2177 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2178 atomic_set(&genpd->sd_count, 0); 2179 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2180 genpd->device_count = 0; 2181 genpd->provider = NULL; 2182 genpd->has_provider = false; 2183 genpd->accounting_time = ktime_get_mono_fast_ns(); 2184 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2185 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2186 genpd->domain.ops.prepare = genpd_prepare; 2187 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2188 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2189 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2190 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2191 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2192 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2193 genpd->domain.ops.complete = genpd_complete; 2194 genpd->domain.start = genpd_dev_pm_start; 2195 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state; 2196 2197 if (genpd->flags & GENPD_FLAG_PM_CLK) { 2198 genpd->dev_ops.stop = pm_clk_suspend; 2199 genpd->dev_ops.start = pm_clk_resume; 2200 } 2201 2202 /* The always-on governor works better with the corresponding flag. */ 2203 if (gov == &pm_domain_always_on_gov) 2204 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 2205 2206 /* Always-on domains must be powered on at initialization. */ 2207 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2208 !genpd_status_on(genpd)) { 2209 pr_err("always-on PM domain %s is not on\n", genpd->name); 2210 return -EINVAL; 2211 } 2212 2213 /* Multiple states but no governor doesn't make sense. */ 2214 if (!gov && genpd->state_count > 1) 2215 pr_warn("%s: no governor for states\n", genpd->name); 2216 2217 ret = genpd_alloc_data(genpd); 2218 if (ret) 2219 return ret; 2220 2221 device_initialize(&genpd->dev); 2222 dev_set_name(&genpd->dev, "%s", genpd->name); 2223 2224 mutex_lock(&gpd_list_lock); 2225 list_add(&genpd->gpd_list_node, &gpd_list); 2226 mutex_unlock(&gpd_list_lock); 2227 genpd_debug_add(genpd); 2228 2229 return 0; 2230 } 2231 EXPORT_SYMBOL_GPL(pm_genpd_init); 2232 2233 static int genpd_remove(struct generic_pm_domain *genpd) 2234 { 2235 struct gpd_link *l, *link; 2236 2237 if (IS_ERR_OR_NULL(genpd)) 2238 return -EINVAL; 2239 2240 genpd_lock(genpd); 2241 2242 if (genpd->has_provider) { 2243 genpd_unlock(genpd); 2244 pr_err("Provider present, unable to remove %s\n", genpd->name); 2245 return -EBUSY; 2246 } 2247 2248 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2249 genpd_unlock(genpd); 2250 pr_err("%s: unable to remove %s\n", __func__, genpd->name); 2251 return -EBUSY; 2252 } 2253 2254 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2255 list_del(&link->parent_node); 2256 list_del(&link->child_node); 2257 kfree(link); 2258 } 2259 2260 list_del(&genpd->gpd_list_node); 2261 genpd_unlock(genpd); 2262 genpd_debug_remove(genpd); 2263 cancel_work_sync(&genpd->power_off_work); 2264 genpd_free_data(genpd); 2265 2266 pr_debug("%s: removed %s\n", __func__, genpd->name); 2267 2268 return 0; 2269 } 2270 2271 /** 2272 * pm_genpd_remove - Remove a generic I/O PM domain 2273 * @genpd: Pointer to PM domain that is to be removed. 2274 * 2275 * To remove the PM domain, this function: 2276 * - Removes the PM domain as a subdomain to any parent domains, 2277 * if it was added. 2278 * - Removes the PM domain from the list of registered PM domains. 2279 * 2280 * The PM domain will only be removed, if the associated provider has 2281 * been removed, it is not a parent to any other PM domain and has no 2282 * devices associated with it. 2283 */ 2284 int pm_genpd_remove(struct generic_pm_domain *genpd) 2285 { 2286 int ret; 2287 2288 mutex_lock(&gpd_list_lock); 2289 ret = genpd_remove(genpd); 2290 mutex_unlock(&gpd_list_lock); 2291 2292 return ret; 2293 } 2294 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2295 2296 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2297 2298 /* 2299 * Device Tree based PM domain providers. 2300 * 2301 * The code below implements generic device tree based PM domain providers that 2302 * bind device tree nodes with generic PM domains registered in the system. 2303 * 2304 * Any driver that registers generic PM domains and needs to support binding of 2305 * devices to these domains is supposed to register a PM domain provider, which 2306 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2307 * 2308 * Two simple mapping functions have been provided for convenience: 2309 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2310 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2311 * index. 2312 */ 2313 2314 /** 2315 * struct of_genpd_provider - PM domain provider registration structure 2316 * @link: Entry in global list of PM domain providers 2317 * @node: Pointer to device tree node of PM domain provider 2318 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2319 * into a PM domain. 2320 * @data: context pointer to be passed into @xlate callback 2321 */ 2322 struct of_genpd_provider { 2323 struct list_head link; 2324 struct device_node *node; 2325 genpd_xlate_t xlate; 2326 void *data; 2327 }; 2328 2329 /* List of registered PM domain providers. */ 2330 static LIST_HEAD(of_genpd_providers); 2331 /* Mutex to protect the list above. */ 2332 static DEFINE_MUTEX(of_genpd_mutex); 2333 2334 /** 2335 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2336 * @genpdspec: OF phandle args to map into a PM domain 2337 * @data: xlate function private data - pointer to struct generic_pm_domain 2338 * 2339 * This is a generic xlate function that can be used to model PM domains that 2340 * have their own device tree nodes. The private data of xlate function needs 2341 * to be a valid pointer to struct generic_pm_domain. 2342 */ 2343 static struct generic_pm_domain *genpd_xlate_simple( 2344 const struct of_phandle_args *genpdspec, 2345 void *data) 2346 { 2347 return data; 2348 } 2349 2350 /** 2351 * genpd_xlate_onecell() - Xlate function using a single index. 2352 * @genpdspec: OF phandle args to map into a PM domain 2353 * @data: xlate function private data - pointer to struct genpd_onecell_data 2354 * 2355 * This is a generic xlate function that can be used to model simple PM domain 2356 * controllers that have one device tree node and provide multiple PM domains. 2357 * A single cell is used as an index into an array of PM domains specified in 2358 * the genpd_onecell_data struct when registering the provider. 2359 */ 2360 static struct generic_pm_domain *genpd_xlate_onecell( 2361 const struct of_phandle_args *genpdspec, 2362 void *data) 2363 { 2364 struct genpd_onecell_data *genpd_data = data; 2365 unsigned int idx = genpdspec->args[0]; 2366 2367 if (genpdspec->args_count != 1) 2368 return ERR_PTR(-EINVAL); 2369 2370 if (idx >= genpd_data->num_domains) { 2371 pr_err("%s: invalid domain index %u\n", __func__, idx); 2372 return ERR_PTR(-EINVAL); 2373 } 2374 2375 if (!genpd_data->domains[idx]) 2376 return ERR_PTR(-ENOENT); 2377 2378 return genpd_data->domains[idx]; 2379 } 2380 2381 /** 2382 * genpd_add_provider() - Register a PM domain provider for a node 2383 * @np: Device node pointer associated with the PM domain provider. 2384 * @xlate: Callback for decoding PM domain from phandle arguments. 2385 * @data: Context pointer for @xlate callback. 2386 */ 2387 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2388 void *data) 2389 { 2390 struct of_genpd_provider *cp; 2391 2392 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2393 if (!cp) 2394 return -ENOMEM; 2395 2396 cp->node = of_node_get(np); 2397 cp->data = data; 2398 cp->xlate = xlate; 2399 fwnode_dev_initialized(&np->fwnode, true); 2400 2401 mutex_lock(&of_genpd_mutex); 2402 list_add(&cp->link, &of_genpd_providers); 2403 mutex_unlock(&of_genpd_mutex); 2404 pr_debug("Added domain provider from %pOF\n", np); 2405 2406 return 0; 2407 } 2408 2409 static bool genpd_present(const struct generic_pm_domain *genpd) 2410 { 2411 bool ret = false; 2412 const struct generic_pm_domain *gpd; 2413 2414 mutex_lock(&gpd_list_lock); 2415 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2416 if (gpd == genpd) { 2417 ret = true; 2418 break; 2419 } 2420 } 2421 mutex_unlock(&gpd_list_lock); 2422 2423 return ret; 2424 } 2425 2426 /** 2427 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2428 * @np: Device node pointer associated with the PM domain provider. 2429 * @genpd: Pointer to PM domain associated with the PM domain provider. 2430 */ 2431 int of_genpd_add_provider_simple(struct device_node *np, 2432 struct generic_pm_domain *genpd) 2433 { 2434 int ret; 2435 2436 if (!np || !genpd) 2437 return -EINVAL; 2438 2439 if (!genpd_present(genpd)) 2440 return -EINVAL; 2441 2442 genpd->dev.of_node = np; 2443 2444 /* Parse genpd OPP table */ 2445 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2446 ret = dev_pm_opp_of_add_table(&genpd->dev); 2447 if (ret) 2448 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2449 2450 /* 2451 * Save table for faster processing while setting performance 2452 * state. 2453 */ 2454 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2455 WARN_ON(IS_ERR(genpd->opp_table)); 2456 } 2457 2458 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2459 if (ret) { 2460 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2461 dev_pm_opp_put_opp_table(genpd->opp_table); 2462 dev_pm_opp_of_remove_table(&genpd->dev); 2463 } 2464 2465 return ret; 2466 } 2467 2468 genpd->provider = &np->fwnode; 2469 genpd->has_provider = true; 2470 2471 return 0; 2472 } 2473 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2474 2475 /** 2476 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2477 * @np: Device node pointer associated with the PM domain provider. 2478 * @data: Pointer to the data associated with the PM domain provider. 2479 */ 2480 int of_genpd_add_provider_onecell(struct device_node *np, 2481 struct genpd_onecell_data *data) 2482 { 2483 struct generic_pm_domain *genpd; 2484 unsigned int i; 2485 int ret = -EINVAL; 2486 2487 if (!np || !data) 2488 return -EINVAL; 2489 2490 if (!data->xlate) 2491 data->xlate = genpd_xlate_onecell; 2492 2493 for (i = 0; i < data->num_domains; i++) { 2494 genpd = data->domains[i]; 2495 2496 if (!genpd) 2497 continue; 2498 if (!genpd_present(genpd)) 2499 goto error; 2500 2501 genpd->dev.of_node = np; 2502 2503 /* Parse genpd OPP table */ 2504 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2505 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2506 if (ret) { 2507 dev_err_probe(&genpd->dev, ret, 2508 "Failed to add OPP table for index %d\n", i); 2509 goto error; 2510 } 2511 2512 /* 2513 * Save table for faster processing while setting 2514 * performance state. 2515 */ 2516 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2517 WARN_ON(IS_ERR(genpd->opp_table)); 2518 } 2519 2520 genpd->provider = &np->fwnode; 2521 genpd->has_provider = true; 2522 } 2523 2524 ret = genpd_add_provider(np, data->xlate, data); 2525 if (ret < 0) 2526 goto error; 2527 2528 return 0; 2529 2530 error: 2531 while (i--) { 2532 genpd = data->domains[i]; 2533 2534 if (!genpd) 2535 continue; 2536 2537 genpd->provider = NULL; 2538 genpd->has_provider = false; 2539 2540 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2541 dev_pm_opp_put_opp_table(genpd->opp_table); 2542 dev_pm_opp_of_remove_table(&genpd->dev); 2543 } 2544 } 2545 2546 return ret; 2547 } 2548 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2549 2550 /** 2551 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2552 * @np: Device node pointer associated with the PM domain provider 2553 */ 2554 void of_genpd_del_provider(struct device_node *np) 2555 { 2556 struct of_genpd_provider *cp, *tmp; 2557 struct generic_pm_domain *gpd; 2558 2559 mutex_lock(&gpd_list_lock); 2560 mutex_lock(&of_genpd_mutex); 2561 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2562 if (cp->node == np) { 2563 /* 2564 * For each PM domain associated with the 2565 * provider, set the 'has_provider' to false 2566 * so that the PM domain can be safely removed. 2567 */ 2568 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2569 if (gpd->provider == &np->fwnode) { 2570 gpd->has_provider = false; 2571 2572 if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state) 2573 continue; 2574 2575 dev_pm_opp_put_opp_table(gpd->opp_table); 2576 dev_pm_opp_of_remove_table(&gpd->dev); 2577 } 2578 } 2579 2580 fwnode_dev_initialized(&cp->node->fwnode, false); 2581 list_del(&cp->link); 2582 of_node_put(cp->node); 2583 kfree(cp); 2584 break; 2585 } 2586 } 2587 mutex_unlock(&of_genpd_mutex); 2588 mutex_unlock(&gpd_list_lock); 2589 } 2590 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2591 2592 /** 2593 * genpd_get_from_provider() - Look-up PM domain 2594 * @genpdspec: OF phandle args to use for look-up 2595 * 2596 * Looks for a PM domain provider under the node specified by @genpdspec and if 2597 * found, uses xlate function of the provider to map phandle args to a PM 2598 * domain. 2599 * 2600 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2601 * on failure. 2602 */ 2603 static struct generic_pm_domain *genpd_get_from_provider( 2604 const struct of_phandle_args *genpdspec) 2605 { 2606 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2607 struct of_genpd_provider *provider; 2608 2609 if (!genpdspec) 2610 return ERR_PTR(-EINVAL); 2611 2612 mutex_lock(&of_genpd_mutex); 2613 2614 /* Check if we have such a provider in our array */ 2615 list_for_each_entry(provider, &of_genpd_providers, link) { 2616 if (provider->node == genpdspec->np) 2617 genpd = provider->xlate(genpdspec, provider->data); 2618 if (!IS_ERR(genpd)) 2619 break; 2620 } 2621 2622 mutex_unlock(&of_genpd_mutex); 2623 2624 return genpd; 2625 } 2626 2627 /** 2628 * of_genpd_add_device() - Add a device to an I/O PM domain 2629 * @genpdspec: OF phandle args to use for look-up PM domain 2630 * @dev: Device to be added. 2631 * 2632 * Looks-up an I/O PM domain based upon phandle args provided and adds 2633 * the device to the PM domain. Returns a negative error code on failure. 2634 */ 2635 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev) 2636 { 2637 struct generic_pm_domain *genpd; 2638 int ret; 2639 2640 if (!dev) 2641 return -EINVAL; 2642 2643 mutex_lock(&gpd_list_lock); 2644 2645 genpd = genpd_get_from_provider(genpdspec); 2646 if (IS_ERR(genpd)) { 2647 ret = PTR_ERR(genpd); 2648 goto out; 2649 } 2650 2651 ret = genpd_add_device(genpd, dev, dev); 2652 2653 out: 2654 mutex_unlock(&gpd_list_lock); 2655 2656 return ret; 2657 } 2658 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2659 2660 /** 2661 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2662 * @parent_spec: OF phandle args to use for parent PM domain look-up 2663 * @subdomain_spec: OF phandle args to use for subdomain look-up 2664 * 2665 * Looks-up a parent PM domain and subdomain based upon phandle args 2666 * provided and adds the subdomain to the parent PM domain. Returns a 2667 * negative error code on failure. 2668 */ 2669 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, 2670 const struct of_phandle_args *subdomain_spec) 2671 { 2672 struct generic_pm_domain *parent, *subdomain; 2673 int ret; 2674 2675 mutex_lock(&gpd_list_lock); 2676 2677 parent = genpd_get_from_provider(parent_spec); 2678 if (IS_ERR(parent)) { 2679 ret = PTR_ERR(parent); 2680 goto out; 2681 } 2682 2683 subdomain = genpd_get_from_provider(subdomain_spec); 2684 if (IS_ERR(subdomain)) { 2685 ret = PTR_ERR(subdomain); 2686 goto out; 2687 } 2688 2689 ret = genpd_add_subdomain(parent, subdomain); 2690 2691 out: 2692 mutex_unlock(&gpd_list_lock); 2693 2694 return ret == -ENOENT ? -EPROBE_DEFER : ret; 2695 } 2696 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2697 2698 /** 2699 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2700 * @parent_spec: OF phandle args to use for parent PM domain look-up 2701 * @subdomain_spec: OF phandle args to use for subdomain look-up 2702 * 2703 * Looks-up a parent PM domain and subdomain based upon phandle args 2704 * provided and removes the subdomain from the parent PM domain. Returns a 2705 * negative error code on failure. 2706 */ 2707 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, 2708 const struct of_phandle_args *subdomain_spec) 2709 { 2710 struct generic_pm_domain *parent, *subdomain; 2711 int ret; 2712 2713 mutex_lock(&gpd_list_lock); 2714 2715 parent = genpd_get_from_provider(parent_spec); 2716 if (IS_ERR(parent)) { 2717 ret = PTR_ERR(parent); 2718 goto out; 2719 } 2720 2721 subdomain = genpd_get_from_provider(subdomain_spec); 2722 if (IS_ERR(subdomain)) { 2723 ret = PTR_ERR(subdomain); 2724 goto out; 2725 } 2726 2727 ret = pm_genpd_remove_subdomain(parent, subdomain); 2728 2729 out: 2730 mutex_unlock(&gpd_list_lock); 2731 2732 return ret; 2733 } 2734 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2735 2736 /** 2737 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2738 * @np: Pointer to device node associated with provider 2739 * 2740 * Find the last PM domain that was added by a particular provider and 2741 * remove this PM domain from the list of PM domains. The provider is 2742 * identified by the 'provider' device structure that is passed. The PM 2743 * domain will only be removed, if the provider associated with domain 2744 * has been removed. 2745 * 2746 * Returns a valid pointer to struct generic_pm_domain on success or 2747 * ERR_PTR() on failure. 2748 */ 2749 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2750 { 2751 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2752 int ret; 2753 2754 if (IS_ERR_OR_NULL(np)) 2755 return ERR_PTR(-EINVAL); 2756 2757 mutex_lock(&gpd_list_lock); 2758 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2759 if (gpd->provider == &np->fwnode) { 2760 ret = genpd_remove(gpd); 2761 genpd = ret ? ERR_PTR(ret) : gpd; 2762 break; 2763 } 2764 } 2765 mutex_unlock(&gpd_list_lock); 2766 2767 return genpd; 2768 } 2769 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2770 2771 static void genpd_release_dev(struct device *dev) 2772 { 2773 of_node_put(dev->of_node); 2774 kfree(dev); 2775 } 2776 2777 static const struct bus_type genpd_bus_type = { 2778 .name = "genpd", 2779 }; 2780 2781 /** 2782 * genpd_dev_pm_detach - Detach a device from its PM domain. 2783 * @dev: Device to detach. 2784 * @power_off: Currently not used 2785 * 2786 * Try to locate a corresponding generic PM domain, which the device was 2787 * attached to previously. If such is found, the device is detached from it. 2788 */ 2789 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2790 { 2791 struct generic_pm_domain *pd; 2792 unsigned int i; 2793 int ret = 0; 2794 2795 pd = dev_to_genpd(dev); 2796 if (IS_ERR(pd)) 2797 return; 2798 2799 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2800 2801 /* Drop the default performance state */ 2802 if (dev_gpd_data(dev)->default_pstate) { 2803 dev_pm_genpd_set_performance_state(dev, 0); 2804 dev_gpd_data(dev)->default_pstate = 0; 2805 } 2806 2807 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2808 ret = genpd_remove_device(pd, dev); 2809 if (ret != -EAGAIN) 2810 break; 2811 2812 mdelay(i); 2813 cond_resched(); 2814 } 2815 2816 if (ret < 0) { 2817 dev_err(dev, "failed to remove from PM domain %s: %d", 2818 pd->name, ret); 2819 return; 2820 } 2821 2822 /* Check if PM domain can be powered off after removing this device. */ 2823 genpd_queue_power_off_work(pd); 2824 2825 /* Unregister the device if it was created by genpd. */ 2826 if (dev->bus == &genpd_bus_type) 2827 device_unregister(dev); 2828 } 2829 2830 static void genpd_dev_pm_sync(struct device *dev) 2831 { 2832 struct generic_pm_domain *pd; 2833 2834 pd = dev_to_genpd(dev); 2835 if (IS_ERR(pd)) 2836 return; 2837 2838 genpd_queue_power_off_work(pd); 2839 } 2840 2841 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 2842 unsigned int index, bool power_on) 2843 { 2844 struct of_phandle_args pd_args; 2845 struct generic_pm_domain *pd; 2846 int pstate; 2847 int ret; 2848 2849 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2850 "#power-domain-cells", index, &pd_args); 2851 if (ret < 0) 2852 return ret; 2853 2854 mutex_lock(&gpd_list_lock); 2855 pd = genpd_get_from_provider(&pd_args); 2856 of_node_put(pd_args.np); 2857 if (IS_ERR(pd)) { 2858 mutex_unlock(&gpd_list_lock); 2859 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2860 __func__, PTR_ERR(pd)); 2861 return driver_deferred_probe_check_state(base_dev); 2862 } 2863 2864 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2865 2866 ret = genpd_add_device(pd, dev, base_dev); 2867 mutex_unlock(&gpd_list_lock); 2868 2869 if (ret < 0) 2870 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 2871 2872 dev->pm_domain->detach = genpd_dev_pm_detach; 2873 dev->pm_domain->sync = genpd_dev_pm_sync; 2874 2875 /* Set the default performance state */ 2876 pstate = of_get_required_opp_performance_state(dev->of_node, index); 2877 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 2878 ret = pstate; 2879 goto err; 2880 } else if (pstate > 0) { 2881 ret = dev_pm_genpd_set_performance_state(dev, pstate); 2882 if (ret) 2883 goto err; 2884 dev_gpd_data(dev)->default_pstate = pstate; 2885 } 2886 2887 if (power_on) { 2888 genpd_lock(pd); 2889 ret = genpd_power_on(pd, 0); 2890 genpd_unlock(pd); 2891 } 2892 2893 if (ret) { 2894 /* Drop the default performance state */ 2895 if (dev_gpd_data(dev)->default_pstate) { 2896 dev_pm_genpd_set_performance_state(dev, 0); 2897 dev_gpd_data(dev)->default_pstate = 0; 2898 } 2899 2900 genpd_remove_device(pd, dev); 2901 return -EPROBE_DEFER; 2902 } 2903 2904 return 1; 2905 2906 err: 2907 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 2908 pd->name, ret); 2909 genpd_remove_device(pd, dev); 2910 return ret; 2911 } 2912 2913 /** 2914 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2915 * @dev: Device to attach. 2916 * 2917 * Parse device's OF node to find a PM domain specifier. If such is found, 2918 * attaches the device to retrieved pm_domain ops. 2919 * 2920 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2921 * PM domain or when multiple power-domains exists for it, else a negative error 2922 * code. Note that if a power-domain exists for the device, but it cannot be 2923 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2924 * not probed and to re-try again later. 2925 */ 2926 int genpd_dev_pm_attach(struct device *dev) 2927 { 2928 if (!dev->of_node) 2929 return 0; 2930 2931 /* 2932 * Devices with multiple PM domains must be attached separately, as we 2933 * can only attach one PM domain per device. 2934 */ 2935 if (of_count_phandle_with_args(dev->of_node, "power-domains", 2936 "#power-domain-cells") != 1) 2937 return 0; 2938 2939 return __genpd_dev_pm_attach(dev, dev, 0, true); 2940 } 2941 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2942 2943 /** 2944 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2945 * @dev: The device used to lookup the PM domain. 2946 * @index: The index of the PM domain. 2947 * 2948 * Parse device's OF node to find a PM domain specifier at the provided @index. 2949 * If such is found, creates a virtual device and attaches it to the retrieved 2950 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2951 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2952 * 2953 * Returns the created virtual device if successfully attached PM domain, NULL 2954 * when the device don't need a PM domain, else an ERR_PTR() in case of 2955 * failures. If a power-domain exists for the device, but cannot be found or 2956 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2957 * is not probed and to re-try again later. 2958 */ 2959 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2960 unsigned int index) 2961 { 2962 struct device *virt_dev; 2963 int num_domains; 2964 int ret; 2965 2966 if (!dev->of_node) 2967 return NULL; 2968 2969 /* Verify that the index is within a valid range. */ 2970 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2971 "#power-domain-cells"); 2972 if (index >= num_domains) 2973 return NULL; 2974 2975 /* Allocate and register device on the genpd bus. */ 2976 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 2977 if (!virt_dev) 2978 return ERR_PTR(-ENOMEM); 2979 2980 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2981 virt_dev->bus = &genpd_bus_type; 2982 virt_dev->release = genpd_release_dev; 2983 virt_dev->of_node = of_node_get(dev->of_node); 2984 2985 ret = device_register(virt_dev); 2986 if (ret) { 2987 put_device(virt_dev); 2988 return ERR_PTR(ret); 2989 } 2990 2991 /* Try to attach the device to the PM domain at the specified index. */ 2992 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); 2993 if (ret < 1) { 2994 device_unregister(virt_dev); 2995 return ret ? ERR_PTR(ret) : NULL; 2996 } 2997 2998 pm_runtime_enable(virt_dev); 2999 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 3000 3001 return virt_dev; 3002 } 3003 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 3004 3005 /** 3006 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 3007 * @dev: The device used to lookup the PM domain. 3008 * @name: The name of the PM domain. 3009 * 3010 * Parse device's OF node to find a PM domain specifier using the 3011 * power-domain-names DT property. For further description see 3012 * genpd_dev_pm_attach_by_id(). 3013 */ 3014 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 3015 { 3016 int index; 3017 3018 if (!dev->of_node) 3019 return NULL; 3020 3021 index = of_property_match_string(dev->of_node, "power-domain-names", 3022 name); 3023 if (index < 0) 3024 return NULL; 3025 3026 return genpd_dev_pm_attach_by_id(dev, index); 3027 } 3028 3029 static const struct of_device_id idle_state_match[] = { 3030 { .compatible = "domain-idle-state", }, 3031 { } 3032 }; 3033 3034 static int genpd_parse_state(struct genpd_power_state *genpd_state, 3035 struct device_node *state_node) 3036 { 3037 int err; 3038 u32 residency; 3039 u32 entry_latency, exit_latency; 3040 3041 err = of_property_read_u32(state_node, "entry-latency-us", 3042 &entry_latency); 3043 if (err) { 3044 pr_debug(" * %pOF missing entry-latency-us property\n", 3045 state_node); 3046 return -EINVAL; 3047 } 3048 3049 err = of_property_read_u32(state_node, "exit-latency-us", 3050 &exit_latency); 3051 if (err) { 3052 pr_debug(" * %pOF missing exit-latency-us property\n", 3053 state_node); 3054 return -EINVAL; 3055 } 3056 3057 err = of_property_read_u32(state_node, "min-residency-us", &residency); 3058 if (!err) 3059 genpd_state->residency_ns = 1000LL * residency; 3060 3061 genpd_state->power_on_latency_ns = 1000LL * exit_latency; 3062 genpd_state->power_off_latency_ns = 1000LL * entry_latency; 3063 genpd_state->fwnode = &state_node->fwnode; 3064 3065 return 0; 3066 } 3067 3068 static int genpd_iterate_idle_states(struct device_node *dn, 3069 struct genpd_power_state *states) 3070 { 3071 int ret; 3072 struct of_phandle_iterator it; 3073 struct device_node *np; 3074 int i = 0; 3075 3076 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 3077 if (ret <= 0) 3078 return ret == -ENOENT ? 0 : ret; 3079 3080 /* Loop over the phandles until all the requested entry is found */ 3081 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 3082 np = it.node; 3083 if (!of_match_node(idle_state_match, np)) 3084 continue; 3085 3086 if (!of_device_is_available(np)) 3087 continue; 3088 3089 if (states) { 3090 ret = genpd_parse_state(&states[i], np); 3091 if (ret) { 3092 pr_err("Parsing idle state node %pOF failed with err %d\n", 3093 np, ret); 3094 of_node_put(np); 3095 return ret; 3096 } 3097 } 3098 i++; 3099 } 3100 3101 return i; 3102 } 3103 3104 /** 3105 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 3106 * 3107 * @dn: The genpd device node 3108 * @states: The pointer to which the state array will be saved. 3109 * @n: The count of elements in the array returned from this function. 3110 * 3111 * Returns the device states parsed from the OF node. The memory for the states 3112 * is allocated by this function and is the responsibility of the caller to 3113 * free the memory after use. If any or zero compatible domain idle states is 3114 * found it returns 0 and in case of errors, a negative error code is returned. 3115 */ 3116 int of_genpd_parse_idle_states(struct device_node *dn, 3117 struct genpd_power_state **states, int *n) 3118 { 3119 struct genpd_power_state *st; 3120 int ret; 3121 3122 ret = genpd_iterate_idle_states(dn, NULL); 3123 if (ret < 0) 3124 return ret; 3125 3126 if (!ret) { 3127 *states = NULL; 3128 *n = 0; 3129 return 0; 3130 } 3131 3132 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 3133 if (!st) 3134 return -ENOMEM; 3135 3136 ret = genpd_iterate_idle_states(dn, st); 3137 if (ret <= 0) { 3138 kfree(st); 3139 return ret < 0 ? ret : -EINVAL; 3140 } 3141 3142 *states = st; 3143 *n = ret; 3144 3145 return 0; 3146 } 3147 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 3148 3149 static int __init genpd_bus_init(void) 3150 { 3151 return bus_register(&genpd_bus_type); 3152 } 3153 core_initcall(genpd_bus_init); 3154 3155 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 3156 3157 3158 /*** debugfs support ***/ 3159 3160 #ifdef CONFIG_DEBUG_FS 3161 /* 3162 * TODO: This function is a slightly modified version of rtpm_status_show 3163 * from sysfs.c, so generalize it. 3164 */ 3165 static void rtpm_status_str(struct seq_file *s, struct device *dev) 3166 { 3167 static const char * const status_lookup[] = { 3168 [RPM_ACTIVE] = "active", 3169 [RPM_RESUMING] = "resuming", 3170 [RPM_SUSPENDED] = "suspended", 3171 [RPM_SUSPENDING] = "suspending" 3172 }; 3173 const char *p = ""; 3174 3175 if (dev->power.runtime_error) 3176 p = "error"; 3177 else if (dev->power.disable_depth) 3178 p = "unsupported"; 3179 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3180 p = status_lookup[dev->power.runtime_status]; 3181 else 3182 WARN_ON(1); 3183 3184 seq_printf(s, "%-25s ", p); 3185 } 3186 3187 static void mode_status_str(struct seq_file *s, struct device *dev) 3188 { 3189 struct generic_pm_domain_data *gpd_data; 3190 3191 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3192 3193 seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW"); 3194 } 3195 3196 static void perf_status_str(struct seq_file *s, struct device *dev) 3197 { 3198 struct generic_pm_domain_data *gpd_data; 3199 3200 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3201 seq_put_decimal_ull(s, "", gpd_data->performance_state); 3202 } 3203 3204 static int genpd_summary_one(struct seq_file *s, 3205 struct generic_pm_domain *genpd) 3206 { 3207 static const char * const status_lookup[] = { 3208 [GENPD_STATE_ON] = "on", 3209 [GENPD_STATE_OFF] = "off" 3210 }; 3211 struct pm_domain_data *pm_data; 3212 const char *kobj_path; 3213 struct gpd_link *link; 3214 char state[16]; 3215 int ret; 3216 3217 ret = genpd_lock_interruptible(genpd); 3218 if (ret) 3219 return -ERESTARTSYS; 3220 3221 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3222 goto exit; 3223 if (!genpd_status_on(genpd)) 3224 snprintf(state, sizeof(state), "%s-%u", 3225 status_lookup[genpd->status], genpd->state_idx); 3226 else 3227 snprintf(state, sizeof(state), "%s", 3228 status_lookup[genpd->status]); 3229 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); 3230 3231 /* 3232 * Modifications on the list require holding locks on both 3233 * parent and child, so we are safe. 3234 * Also genpd->name is immutable. 3235 */ 3236 list_for_each_entry(link, &genpd->parent_links, parent_node) { 3237 if (list_is_first(&link->parent_node, &genpd->parent_links)) 3238 seq_printf(s, "\n%48s", " "); 3239 seq_printf(s, "%s", link->child->name); 3240 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3241 seq_puts(s, ", "); 3242 } 3243 3244 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3245 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3246 genpd_is_irq_safe(genpd) ? 3247 GFP_ATOMIC : GFP_KERNEL); 3248 if (kobj_path == NULL) 3249 continue; 3250 3251 seq_printf(s, "\n %-50s ", kobj_path); 3252 rtpm_status_str(s, pm_data->dev); 3253 perf_status_str(s, pm_data->dev); 3254 mode_status_str(s, pm_data->dev); 3255 kfree(kobj_path); 3256 } 3257 3258 seq_puts(s, "\n"); 3259 exit: 3260 genpd_unlock(genpd); 3261 3262 return 0; 3263 } 3264 3265 static int summary_show(struct seq_file *s, void *data) 3266 { 3267 struct generic_pm_domain *genpd; 3268 int ret = 0; 3269 3270 seq_puts(s, "domain status children performance\n"); 3271 seq_puts(s, " /device runtime status managed by\n"); 3272 seq_puts(s, "------------------------------------------------------------------------------------------------------------\n"); 3273 3274 ret = mutex_lock_interruptible(&gpd_list_lock); 3275 if (ret) 3276 return -ERESTARTSYS; 3277 3278 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3279 ret = genpd_summary_one(s, genpd); 3280 if (ret) 3281 break; 3282 } 3283 mutex_unlock(&gpd_list_lock); 3284 3285 return ret; 3286 } 3287 3288 static int status_show(struct seq_file *s, void *data) 3289 { 3290 static const char * const status_lookup[] = { 3291 [GENPD_STATE_ON] = "on", 3292 [GENPD_STATE_OFF] = "off" 3293 }; 3294 3295 struct generic_pm_domain *genpd = s->private; 3296 int ret = 0; 3297 3298 ret = genpd_lock_interruptible(genpd); 3299 if (ret) 3300 return -ERESTARTSYS; 3301 3302 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3303 goto exit; 3304 3305 if (genpd->status == GENPD_STATE_OFF) 3306 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3307 genpd->state_idx); 3308 else 3309 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3310 exit: 3311 genpd_unlock(genpd); 3312 return ret; 3313 } 3314 3315 static int sub_domains_show(struct seq_file *s, void *data) 3316 { 3317 struct generic_pm_domain *genpd = s->private; 3318 struct gpd_link *link; 3319 int ret = 0; 3320 3321 ret = genpd_lock_interruptible(genpd); 3322 if (ret) 3323 return -ERESTARTSYS; 3324 3325 list_for_each_entry(link, &genpd->parent_links, parent_node) 3326 seq_printf(s, "%s\n", link->child->name); 3327 3328 genpd_unlock(genpd); 3329 return ret; 3330 } 3331 3332 static int idle_states_show(struct seq_file *s, void *data) 3333 { 3334 struct generic_pm_domain *genpd = s->private; 3335 u64 now, delta, idle_time = 0; 3336 unsigned int i; 3337 int ret = 0; 3338 3339 ret = genpd_lock_interruptible(genpd); 3340 if (ret) 3341 return -ERESTARTSYS; 3342 3343 seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); 3344 3345 for (i = 0; i < genpd->state_count; i++) { 3346 idle_time += genpd->states[i].idle_time; 3347 3348 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3349 now = ktime_get_mono_fast_ns(); 3350 if (now > genpd->accounting_time) { 3351 delta = now - genpd->accounting_time; 3352 idle_time += delta; 3353 } 3354 } 3355 3356 do_div(idle_time, NSEC_PER_MSEC); 3357 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time, 3358 genpd->states[i].usage, genpd->states[i].rejected); 3359 } 3360 3361 genpd_unlock(genpd); 3362 return ret; 3363 } 3364 3365 static int active_time_show(struct seq_file *s, void *data) 3366 { 3367 struct generic_pm_domain *genpd = s->private; 3368 u64 now, on_time, delta = 0; 3369 int ret = 0; 3370 3371 ret = genpd_lock_interruptible(genpd); 3372 if (ret) 3373 return -ERESTARTSYS; 3374 3375 if (genpd->status == GENPD_STATE_ON) { 3376 now = ktime_get_mono_fast_ns(); 3377 if (now > genpd->accounting_time) 3378 delta = now - genpd->accounting_time; 3379 } 3380 3381 on_time = genpd->on_time + delta; 3382 do_div(on_time, NSEC_PER_MSEC); 3383 seq_printf(s, "%llu ms\n", on_time); 3384 3385 genpd_unlock(genpd); 3386 return ret; 3387 } 3388 3389 static int total_idle_time_show(struct seq_file *s, void *data) 3390 { 3391 struct generic_pm_domain *genpd = s->private; 3392 u64 now, delta, total = 0; 3393 unsigned int i; 3394 int ret = 0; 3395 3396 ret = genpd_lock_interruptible(genpd); 3397 if (ret) 3398 return -ERESTARTSYS; 3399 3400 for (i = 0; i < genpd->state_count; i++) { 3401 total += genpd->states[i].idle_time; 3402 3403 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3404 now = ktime_get_mono_fast_ns(); 3405 if (now > genpd->accounting_time) { 3406 delta = now - genpd->accounting_time; 3407 total += delta; 3408 } 3409 } 3410 } 3411 3412 do_div(total, NSEC_PER_MSEC); 3413 seq_printf(s, "%llu ms\n", total); 3414 3415 genpd_unlock(genpd); 3416 return ret; 3417 } 3418 3419 3420 static int devices_show(struct seq_file *s, void *data) 3421 { 3422 struct generic_pm_domain *genpd = s->private; 3423 struct pm_domain_data *pm_data; 3424 const char *kobj_path; 3425 int ret = 0; 3426 3427 ret = genpd_lock_interruptible(genpd); 3428 if (ret) 3429 return -ERESTARTSYS; 3430 3431 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3432 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3433 genpd_is_irq_safe(genpd) ? 3434 GFP_ATOMIC : GFP_KERNEL); 3435 if (kobj_path == NULL) 3436 continue; 3437 3438 seq_printf(s, "%s\n", kobj_path); 3439 kfree(kobj_path); 3440 } 3441 3442 genpd_unlock(genpd); 3443 return ret; 3444 } 3445 3446 static int perf_state_show(struct seq_file *s, void *data) 3447 { 3448 struct generic_pm_domain *genpd = s->private; 3449 3450 if (genpd_lock_interruptible(genpd)) 3451 return -ERESTARTSYS; 3452 3453 seq_printf(s, "%u\n", genpd->performance_state); 3454 3455 genpd_unlock(genpd); 3456 return 0; 3457 } 3458 3459 DEFINE_SHOW_ATTRIBUTE(summary); 3460 DEFINE_SHOW_ATTRIBUTE(status); 3461 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3462 DEFINE_SHOW_ATTRIBUTE(idle_states); 3463 DEFINE_SHOW_ATTRIBUTE(active_time); 3464 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3465 DEFINE_SHOW_ATTRIBUTE(devices); 3466 DEFINE_SHOW_ATTRIBUTE(perf_state); 3467 3468 static void genpd_debug_add(struct generic_pm_domain *genpd) 3469 { 3470 struct dentry *d; 3471 3472 if (!genpd_debugfs_dir) 3473 return; 3474 3475 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 3476 3477 debugfs_create_file("current_state", 0444, 3478 d, genpd, &status_fops); 3479 debugfs_create_file("sub_domains", 0444, 3480 d, genpd, &sub_domains_fops); 3481 debugfs_create_file("idle_states", 0444, 3482 d, genpd, &idle_states_fops); 3483 debugfs_create_file("active_time", 0444, 3484 d, genpd, &active_time_fops); 3485 debugfs_create_file("total_idle_time", 0444, 3486 d, genpd, &total_idle_time_fops); 3487 debugfs_create_file("devices", 0444, 3488 d, genpd, &devices_fops); 3489 if (genpd->set_performance_state) 3490 debugfs_create_file("perf_state", 0444, 3491 d, genpd, &perf_state_fops); 3492 } 3493 3494 static int __init genpd_debug_init(void) 3495 { 3496 struct generic_pm_domain *genpd; 3497 3498 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3499 3500 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3501 NULL, &summary_fops); 3502 3503 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3504 genpd_debug_add(genpd); 3505 3506 return 0; 3507 } 3508 late_initcall(genpd_debug_init); 3509 3510 static void __exit genpd_debug_exit(void) 3511 { 3512 debugfs_remove_recursive(genpd_debugfs_dir); 3513 } 3514 __exitcall(genpd_debug_exit); 3515 #endif /* CONFIG_DEBUG_FS */ 3516