1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/kernel.h> 12 #include <linux/io.h> 13 #include <linux/platform_device.h> 14 #include <linux/pm_opp.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/pm_domain.h> 17 #include <linux/pm_qos.h> 18 #include <linux/pm_clock.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/sched.h> 22 #include <linux/suspend.h> 23 #include <linux/export.h> 24 #include <linux/cpu.h> 25 #include <linux/debugfs.h> 26 27 /* Provides a unique ID for each genpd device */ 28 static DEFINE_IDA(genpd_ida); 29 30 /* The bus for genpd_providers. */ 31 static const struct bus_type genpd_provider_bus_type = { 32 .name = "genpd_provider", 33 }; 34 35 /* The parent for genpd_provider devices. */ 36 static struct device genpd_provider_bus = { 37 .init_name = "genpd_provider", 38 }; 39 40 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 41 42 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 43 ({ \ 44 type (*__routine)(struct device *__d); \ 45 type __ret = (type)0; \ 46 \ 47 __routine = genpd->dev_ops.callback; \ 48 if (__routine) { \ 49 __ret = __routine(dev); \ 50 } \ 51 __ret; \ 52 }) 53 54 static LIST_HEAD(gpd_list); 55 static DEFINE_MUTEX(gpd_list_lock); 56 57 struct genpd_lock_ops { 58 void (*lock)(struct generic_pm_domain *genpd); 59 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 60 int (*lock_interruptible)(struct generic_pm_domain *genpd); 61 void (*unlock)(struct generic_pm_domain *genpd); 62 }; 63 64 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 65 { 66 mutex_lock(&genpd->mlock); 67 } 68 69 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 70 int depth) 71 { 72 mutex_lock_nested(&genpd->mlock, depth); 73 } 74 75 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 76 { 77 return mutex_lock_interruptible(&genpd->mlock); 78 } 79 80 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 81 { 82 return mutex_unlock(&genpd->mlock); 83 } 84 85 static const struct genpd_lock_ops genpd_mtx_ops = { 86 .lock = genpd_lock_mtx, 87 .lock_nested = genpd_lock_nested_mtx, 88 .lock_interruptible = genpd_lock_interruptible_mtx, 89 .unlock = genpd_unlock_mtx, 90 }; 91 92 static void genpd_lock_spin(struct generic_pm_domain *genpd) 93 __acquires(&genpd->slock) 94 { 95 unsigned long flags; 96 97 spin_lock_irqsave(&genpd->slock, flags); 98 genpd->lock_flags = flags; 99 } 100 101 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 102 int depth) 103 __acquires(&genpd->slock) 104 { 105 unsigned long flags; 106 107 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 108 genpd->lock_flags = flags; 109 } 110 111 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 112 __acquires(&genpd->slock) 113 { 114 unsigned long flags; 115 116 spin_lock_irqsave(&genpd->slock, flags); 117 genpd->lock_flags = flags; 118 return 0; 119 } 120 121 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 122 __releases(&genpd->slock) 123 { 124 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 125 } 126 127 static const struct genpd_lock_ops genpd_spin_ops = { 128 .lock = genpd_lock_spin, 129 .lock_nested = genpd_lock_nested_spin, 130 .lock_interruptible = genpd_lock_interruptible_spin, 131 .unlock = genpd_unlock_spin, 132 }; 133 134 static void genpd_lock_raw_spin(struct generic_pm_domain *genpd) 135 __acquires(&genpd->raw_slock) 136 { 137 unsigned long flags; 138 139 raw_spin_lock_irqsave(&genpd->raw_slock, flags); 140 genpd->raw_lock_flags = flags; 141 } 142 143 static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd, 144 int depth) 145 __acquires(&genpd->raw_slock) 146 { 147 unsigned long flags; 148 149 raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth); 150 genpd->raw_lock_flags = flags; 151 } 152 153 static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd) 154 __acquires(&genpd->raw_slock) 155 { 156 unsigned long flags; 157 158 raw_spin_lock_irqsave(&genpd->raw_slock, flags); 159 genpd->raw_lock_flags = flags; 160 return 0; 161 } 162 163 static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd) 164 __releases(&genpd->raw_slock) 165 { 166 raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags); 167 } 168 169 static const struct genpd_lock_ops genpd_raw_spin_ops = { 170 .lock = genpd_lock_raw_spin, 171 .lock_nested = genpd_lock_nested_raw_spin, 172 .lock_interruptible = genpd_lock_interruptible_raw_spin, 173 .unlock = genpd_unlock_raw_spin, 174 }; 175 176 #define genpd_lock(p) p->lock_ops->lock(p) 177 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 178 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 179 #define genpd_unlock(p) p->lock_ops->unlock(p) 180 181 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 182 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 183 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 184 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 185 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 186 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 187 #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) 188 #define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW) 189 190 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 191 const struct generic_pm_domain *genpd) 192 { 193 bool ret; 194 195 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 196 197 /* 198 * Warn once if an IRQ safe device is attached to a domain, which 199 * callbacks are allowed to sleep. This indicates a suboptimal 200 * configuration for PM, but it doesn't matter for an always on domain. 201 */ 202 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) 203 return ret; 204 205 if (ret) 206 dev_warn_once(dev, "PM domain %s will not be powered off\n", 207 dev_name(&genpd->dev)); 208 209 return ret; 210 } 211 212 static int genpd_runtime_suspend(struct device *dev); 213 214 /* 215 * Get the generic PM domain for a particular struct device. 216 * This validates the struct device pointer, the PM domain pointer, 217 * and checks that the PM domain pointer is a real generic PM domain. 218 * Any failure results in NULL being returned. 219 */ 220 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 221 { 222 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 223 return NULL; 224 225 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 226 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 227 return pd_to_genpd(dev->pm_domain); 228 229 return NULL; 230 } 231 232 /* 233 * This should only be used where we are certain that the pm_domain 234 * attached to the device is a genpd domain. 235 */ 236 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 237 { 238 if (IS_ERR_OR_NULL(dev->pm_domain)) 239 return ERR_PTR(-EINVAL); 240 241 return pd_to_genpd(dev->pm_domain); 242 } 243 244 struct device *dev_to_genpd_dev(struct device *dev) 245 { 246 struct generic_pm_domain *genpd = dev_to_genpd(dev); 247 248 if (IS_ERR(genpd)) 249 return ERR_CAST(genpd); 250 251 return &genpd->dev; 252 } 253 254 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 255 struct device *dev) 256 { 257 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 258 } 259 260 static int genpd_start_dev(const struct generic_pm_domain *genpd, 261 struct device *dev) 262 { 263 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 264 } 265 266 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 267 { 268 bool ret = false; 269 270 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 271 ret = !!atomic_dec_and_test(&genpd->sd_count); 272 273 return ret; 274 } 275 276 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 277 { 278 atomic_inc(&genpd->sd_count); 279 smp_mb__after_atomic(); 280 } 281 282 #ifdef CONFIG_DEBUG_FS 283 static struct dentry *genpd_debugfs_dir; 284 285 static void genpd_debug_add(struct generic_pm_domain *genpd); 286 287 static void genpd_debug_remove(struct generic_pm_domain *genpd) 288 { 289 if (!genpd_debugfs_dir) 290 return; 291 292 debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir); 293 } 294 295 static void genpd_update_accounting(struct generic_pm_domain *genpd) 296 { 297 u64 delta, now; 298 299 now = ktime_get_mono_fast_ns(); 300 if (now <= genpd->accounting_time) 301 return; 302 303 delta = now - genpd->accounting_time; 304 305 /* 306 * If genpd->status is active, it means we are just 307 * out of off and so update the idle time and vice 308 * versa. 309 */ 310 if (genpd->status == GENPD_STATE_ON) 311 genpd->states[genpd->state_idx].idle_time += delta; 312 else 313 genpd->on_time += delta; 314 315 genpd->accounting_time = now; 316 } 317 318 static void genpd_reflect_residency(struct generic_pm_domain *genpd) 319 { 320 struct genpd_governor_data *gd = genpd->gd; 321 struct genpd_power_state *state, *next_state; 322 unsigned int state_idx; 323 s64 sleep_ns, target_ns; 324 325 if (!gd || !gd->reflect_residency) 326 return; 327 328 sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter)); 329 state_idx = genpd->state_idx; 330 state = &genpd->states[state_idx]; 331 target_ns = state->power_off_latency_ns + state->residency_ns; 332 333 if (sleep_ns < target_ns) { 334 state->above++; 335 } else if (state_idx < (genpd->state_count -1)) { 336 next_state = &genpd->states[state_idx + 1]; 337 target_ns = next_state->power_off_latency_ns + 338 next_state->residency_ns; 339 340 if (sleep_ns >= target_ns) 341 state->below++; 342 } 343 344 gd->reflect_residency = false; 345 } 346 #else 347 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 348 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 349 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 350 static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {} 351 #endif 352 353 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 354 unsigned int state) 355 { 356 struct generic_pm_domain_data *pd_data; 357 struct pm_domain_data *pdd; 358 struct gpd_link *link; 359 360 /* New requested state is same as Max requested state */ 361 if (state == genpd->performance_state) 362 return state; 363 364 /* New requested state is higher than Max requested state */ 365 if (state > genpd->performance_state) 366 return state; 367 368 /* Traverse all devices within the domain */ 369 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 370 pd_data = to_gpd_data(pdd); 371 372 if (pd_data->performance_state > state) 373 state = pd_data->performance_state; 374 } 375 376 /* 377 * Traverse all sub-domains within the domain. This can be 378 * done without any additional locking as the link->performance_state 379 * field is protected by the parent genpd->lock, which is already taken. 380 * 381 * Also note that link->performance_state (subdomain's performance state 382 * requirement to parent domain) is different from 383 * link->child->performance_state (current performance state requirement 384 * of the devices/sub-domains of the subdomain) and so can have a 385 * different value. 386 * 387 * Note that we also take vote from powered-off sub-domains into account 388 * as the same is done for devices right now. 389 */ 390 list_for_each_entry(link, &genpd->parent_links, parent_node) { 391 if (link->performance_state > state) 392 state = link->performance_state; 393 } 394 395 return state; 396 } 397 398 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 399 struct generic_pm_domain *parent, 400 unsigned int pstate) 401 { 402 if (!parent->set_performance_state) 403 return pstate; 404 405 return dev_pm_opp_xlate_performance_state(genpd->opp_table, 406 parent->opp_table, 407 pstate); 408 } 409 410 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 411 unsigned int state, int depth); 412 413 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth) 414 { 415 struct generic_pm_domain *parent = link->parent; 416 int parent_state; 417 418 genpd_lock_nested(parent, depth + 1); 419 420 parent_state = link->prev_performance_state; 421 link->performance_state = parent_state; 422 423 parent_state = _genpd_reeval_performance_state(parent, parent_state); 424 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 425 pr_err("%s: Failed to roll back to %d performance state\n", 426 parent->name, parent_state); 427 } 428 429 genpd_unlock(parent); 430 } 431 432 static int _genpd_set_parent_state(struct generic_pm_domain *genpd, 433 struct gpd_link *link, 434 unsigned int state, int depth) 435 { 436 struct generic_pm_domain *parent = link->parent; 437 int parent_state, ret; 438 439 /* Find parent's performance state */ 440 ret = genpd_xlate_performance_state(genpd, parent, state); 441 if (unlikely(ret < 0)) 442 return ret; 443 444 parent_state = ret; 445 446 genpd_lock_nested(parent, depth + 1); 447 448 link->prev_performance_state = link->performance_state; 449 link->performance_state = parent_state; 450 451 parent_state = _genpd_reeval_performance_state(parent, parent_state); 452 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 453 if (ret) 454 link->performance_state = link->prev_performance_state; 455 456 genpd_unlock(parent); 457 458 return ret; 459 } 460 461 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 462 unsigned int state, int depth) 463 { 464 struct gpd_link *link = NULL; 465 int ret; 466 467 if (state == genpd->performance_state) 468 return 0; 469 470 /* When scaling up, propagate to parents first in normal order */ 471 if (state > genpd->performance_state) { 472 list_for_each_entry(link, &genpd->child_links, child_node) { 473 ret = _genpd_set_parent_state(genpd, link, state, depth); 474 if (ret) 475 goto rollback_parents_up; 476 } 477 } 478 479 if (genpd->set_performance_state) { 480 ret = genpd->set_performance_state(genpd, state); 481 if (ret) { 482 if (link) 483 goto rollback_parents_up; 484 return ret; 485 } 486 } 487 488 /* When scaling down, propagate to parents last in reverse order */ 489 if (state < genpd->performance_state) { 490 list_for_each_entry_reverse(link, &genpd->child_links, child_node) { 491 ret = _genpd_set_parent_state(genpd, link, state, depth); 492 if (ret) 493 goto rollback_parents_down; 494 } 495 } 496 497 genpd->performance_state = state; 498 return 0; 499 500 rollback_parents_up: 501 list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node) 502 _genpd_rollback_parent_state(link, depth); 503 return ret; 504 rollback_parents_down: 505 list_for_each_entry_continue(link, &genpd->child_links, child_node) 506 _genpd_rollback_parent_state(link, depth); 507 return ret; 508 } 509 510 static int genpd_set_performance_state(struct device *dev, unsigned int state) 511 { 512 struct generic_pm_domain *genpd = dev_to_genpd(dev); 513 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 514 unsigned int prev_state; 515 int ret; 516 517 prev_state = gpd_data->performance_state; 518 if (prev_state == state) 519 return 0; 520 521 gpd_data->performance_state = state; 522 state = _genpd_reeval_performance_state(genpd, state); 523 524 ret = _genpd_set_performance_state(genpd, state, 0); 525 if (ret) 526 gpd_data->performance_state = prev_state; 527 528 return ret; 529 } 530 531 static int genpd_drop_performance_state(struct device *dev) 532 { 533 unsigned int prev_state = dev_gpd_data(dev)->performance_state; 534 535 if (!genpd_set_performance_state(dev, 0)) 536 return prev_state; 537 538 return 0; 539 } 540 541 static void genpd_restore_performance_state(struct device *dev, 542 unsigned int state) 543 { 544 if (state) 545 genpd_set_performance_state(dev, state); 546 } 547 548 static int genpd_dev_pm_set_performance_state(struct device *dev, 549 unsigned int state) 550 { 551 struct generic_pm_domain *genpd = dev_to_genpd(dev); 552 int ret = 0; 553 554 genpd_lock(genpd); 555 if (pm_runtime_suspended(dev)) { 556 dev_gpd_data(dev)->rpm_pstate = state; 557 } else { 558 ret = genpd_set_performance_state(dev, state); 559 if (!ret) 560 dev_gpd_data(dev)->rpm_pstate = 0; 561 } 562 genpd_unlock(genpd); 563 564 return ret; 565 } 566 567 /** 568 * dev_pm_genpd_set_performance_state- Set performance state of device's power 569 * domain. 570 * 571 * @dev: Device for which the performance-state needs to be set. 572 * @state: Target performance state of the device. This can be set as 0 when the 573 * device doesn't have any performance state constraints left (And so 574 * the device wouldn't participate anymore to find the target 575 * performance state of the genpd). 576 * 577 * It is assumed that the users guarantee that the genpd wouldn't be detached 578 * while this routine is getting called. 579 * 580 * Returns 0 on success and negative error values on failures. 581 */ 582 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 583 { 584 struct generic_pm_domain *genpd; 585 586 genpd = dev_to_genpd_safe(dev); 587 if (!genpd) 588 return -ENODEV; 589 590 if (WARN_ON(!dev->power.subsys_data || 591 !dev->power.subsys_data->domain_data)) 592 return -EINVAL; 593 594 return genpd_dev_pm_set_performance_state(dev, state); 595 } 596 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 597 598 /** 599 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 600 * 601 * @dev: Device to handle 602 * @next: impending interrupt/wakeup for the device 603 * 604 * 605 * Allow devices to inform of the next wakeup. It's assumed that the users 606 * guarantee that the genpd wouldn't be detached while this routine is getting 607 * called. Additionally, it's also assumed that @dev isn't runtime suspended 608 * (RPM_SUSPENDED)." 609 * Although devices are expected to update the next_wakeup after the end of 610 * their usecase as well, it is possible the devices themselves may not know 611 * about that, so stale @next will be ignored when powering off the domain. 612 */ 613 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 614 { 615 struct generic_pm_domain *genpd; 616 struct gpd_timing_data *td; 617 618 genpd = dev_to_genpd_safe(dev); 619 if (!genpd) 620 return; 621 622 td = to_gpd_data(dev->power.subsys_data->domain_data)->td; 623 if (td) 624 td->next_wakeup = next; 625 } 626 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 627 628 /** 629 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd 630 * @dev: A device that is attached to the genpd. 631 * 632 * This routine should typically be called for a device, at the point of when a 633 * GENPD_NOTIFY_PRE_OFF notification has been sent for it. 634 * 635 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no 636 * valid value have been set. 637 */ 638 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev) 639 { 640 struct generic_pm_domain *genpd; 641 642 genpd = dev_to_genpd_safe(dev); 643 if (!genpd) 644 return KTIME_MAX; 645 646 if (genpd->gd) 647 return genpd->gd->next_hrtimer; 648 649 return KTIME_MAX; 650 } 651 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer); 652 653 /* 654 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous 655 * 656 * @dev: A device that is attached to the genpd. 657 * 658 * Allows a consumer of the genpd to notify the provider that the next power off 659 * should be synchronous. 660 * 661 * It is assumed that the users guarantee that the genpd wouldn't be detached 662 * while this routine is getting called. 663 */ 664 void dev_pm_genpd_synced_poweroff(struct device *dev) 665 { 666 struct generic_pm_domain *genpd; 667 668 genpd = dev_to_genpd_safe(dev); 669 if (!genpd) 670 return; 671 672 genpd_lock(genpd); 673 genpd->synced_poweroff = true; 674 genpd_unlock(genpd); 675 } 676 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff); 677 678 /** 679 * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain. 680 * 681 * @dev: Device for which the HW-mode should be changed. 682 * @enable: Value to set or unset the HW-mode. 683 * 684 * Some PM domains can rely on HW signals to control the power for a device. To 685 * allow a consumer driver to switch the behaviour for its device in runtime, 686 * which may be beneficial from a latency or energy point of view, this function 687 * may be called. 688 * 689 * It is assumed that the users guarantee that the genpd wouldn't be detached 690 * while this routine is getting called. 691 * 692 * Return: Returns 0 on success and negative error values on failures. 693 */ 694 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable) 695 { 696 struct generic_pm_domain *genpd; 697 int ret = 0; 698 699 genpd = dev_to_genpd_safe(dev); 700 if (!genpd) 701 return -ENODEV; 702 703 if (!genpd->set_hwmode_dev) 704 return -EOPNOTSUPP; 705 706 genpd_lock(genpd); 707 708 if (dev_gpd_data(dev)->hw_mode == enable) 709 goto out; 710 711 ret = genpd->set_hwmode_dev(genpd, dev, enable); 712 if (!ret) 713 dev_gpd_data(dev)->hw_mode = enable; 714 715 out: 716 genpd_unlock(genpd); 717 return ret; 718 } 719 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode); 720 721 /** 722 * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device. 723 * 724 * @dev: Device for which the current HW-mode setting should be fetched. 725 * 726 * This helper function allows consumer drivers to fetch the current HW mode 727 * setting of its the device. 728 * 729 * It is assumed that the users guarantee that the genpd wouldn't be detached 730 * while this routine is getting called. 731 * 732 * Return: Returns the HW mode setting of device from SW cached hw_mode. 733 */ 734 bool dev_pm_genpd_get_hwmode(struct device *dev) 735 { 736 return dev_gpd_data(dev)->hw_mode; 737 } 738 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode); 739 740 /** 741 * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off. 742 * 743 * @dev: Device for which the PM domain may need to stay on for. 744 * @on: Value to set or unset for the condition. 745 * 746 * For some usecases a consumer driver requires its device to remain power-on 747 * from the PM domain perspective during runtime. This function allows the 748 * behaviour to be dynamically controlled for a device attached to a genpd. 749 * 750 * It is assumed that the users guarantee that the genpd wouldn't be detached 751 * while this routine is getting called. 752 * 753 * Return: Returns 0 on success and negative error values on failures. 754 */ 755 int dev_pm_genpd_rpm_always_on(struct device *dev, bool on) 756 { 757 struct generic_pm_domain *genpd; 758 759 genpd = dev_to_genpd_safe(dev); 760 if (!genpd) 761 return -ENODEV; 762 763 genpd_lock(genpd); 764 dev_gpd_data(dev)->rpm_always_on = on; 765 genpd_unlock(genpd); 766 767 return 0; 768 } 769 EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on); 770 771 /** 772 * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state. 773 * 774 * @genpd: The PM domain the idle-state belongs to. 775 * @state_idx: The index of the idle-state that failed. 776 * 777 * In some special cases the ->power_off() callback is asynchronously powering 778 * off the PM domain, leading to that it may return zero to indicate success, 779 * even though the actual power-off could fail. To account for this correctly in 780 * the rejected/usage counts for the idle-state statistics, users can call this 781 * function to adjust the values. 782 * 783 * It is assumed that the users guarantee that the genpd doesn't get removed 784 * while this routine is getting called. 785 */ 786 void pm_genpd_inc_rejected(struct generic_pm_domain *genpd, 787 unsigned int state_idx) 788 { 789 genpd_lock(genpd); 790 genpd->states[genpd->state_idx].rejected++; 791 genpd->states[genpd->state_idx].usage--; 792 genpd_unlock(genpd); 793 } 794 EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected); 795 796 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 797 { 798 unsigned int state_idx = genpd->state_idx; 799 ktime_t time_start; 800 s64 elapsed_ns; 801 int ret; 802 803 /* Notify consumers that we are about to power on. */ 804 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 805 GENPD_NOTIFY_PRE_ON, 806 GENPD_NOTIFY_OFF, NULL); 807 ret = notifier_to_errno(ret); 808 if (ret) 809 return ret; 810 811 if (!genpd->power_on) 812 goto out; 813 814 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 815 if (!timed) { 816 ret = genpd->power_on(genpd); 817 if (ret) 818 goto err; 819 820 goto out; 821 } 822 823 time_start = ktime_get(); 824 ret = genpd->power_on(genpd); 825 if (ret) 826 goto err; 827 828 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 829 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 830 goto out; 831 832 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 833 genpd->gd->max_off_time_changed = true; 834 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 835 dev_name(&genpd->dev), "on", elapsed_ns); 836 837 out: 838 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 839 genpd->synced_poweroff = false; 840 return 0; 841 err: 842 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 843 NULL); 844 return ret; 845 } 846 847 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 848 { 849 unsigned int state_idx = genpd->state_idx; 850 ktime_t time_start; 851 s64 elapsed_ns; 852 int ret; 853 854 /* Notify consumers that we are about to power off. */ 855 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 856 GENPD_NOTIFY_PRE_OFF, 857 GENPD_NOTIFY_ON, NULL); 858 ret = notifier_to_errno(ret); 859 if (ret) 860 return ret; 861 862 if (!genpd->power_off) 863 goto out; 864 865 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 866 if (!timed) { 867 ret = genpd->power_off(genpd); 868 if (ret) 869 goto busy; 870 871 goto out; 872 } 873 874 time_start = ktime_get(); 875 ret = genpd->power_off(genpd); 876 if (ret) 877 goto busy; 878 879 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 880 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 881 goto out; 882 883 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 884 genpd->gd->max_off_time_changed = true; 885 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 886 dev_name(&genpd->dev), "off", elapsed_ns); 887 888 out: 889 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 890 NULL); 891 return 0; 892 busy: 893 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 894 return ret; 895 } 896 897 /** 898 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 899 * @genpd: PM domain to power off. 900 * 901 * Queue up the execution of genpd_power_off() unless it's already been done 902 * before. 903 */ 904 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 905 { 906 queue_work(pm_wq, &genpd->power_off_work); 907 } 908 909 /** 910 * genpd_power_off - Remove power from a given PM domain. 911 * @genpd: PM domain to power down. 912 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 913 * RPM status of the releated device is in an intermediate state, not yet turned 914 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 915 * be RPM_SUSPENDED, while it tries to power off the PM domain. 916 * @depth: nesting count for lockdep. 917 * 918 * If all of the @genpd's devices have been suspended and all of its subdomains 919 * have been powered down, remove power from @genpd. 920 */ 921 static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 922 unsigned int depth) 923 { 924 struct pm_domain_data *pdd; 925 struct gpd_link *link; 926 unsigned int not_suspended = 0; 927 928 /* 929 * Do not try to power off the domain in the following situations: 930 * The domain is already in the "power off" state. 931 * System suspend is in progress. 932 * The domain is configured as always on. 933 * The domain has a subdomain being powered on. 934 */ 935 if (!genpd_status_on(genpd) || genpd->prepared_count > 0 || 936 genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) || 937 atomic_read(&genpd->sd_count) > 0) 938 return; 939 940 /* 941 * The children must be in their deepest (powered-off) states to allow 942 * the parent to be powered off. Note that, there's no need for 943 * additional locking, as powering on a child, requires the parent's 944 * lock to be acquired first. 945 */ 946 list_for_each_entry(link, &genpd->parent_links, parent_node) { 947 struct generic_pm_domain *child = link->child; 948 if (child->state_idx < child->state_count - 1) 949 return; 950 } 951 952 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 953 /* 954 * Do not allow PM domain to be powered off, when an IRQ safe 955 * device is part of a non-IRQ safe domain. 956 */ 957 if (!pm_runtime_suspended(pdd->dev) || 958 irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) 959 not_suspended++; 960 961 /* The device may need its PM domain to stay powered on. */ 962 if (to_gpd_data(pdd)->rpm_always_on) 963 return; 964 } 965 966 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 967 return; 968 969 if (genpd->gov && genpd->gov->power_down_ok) { 970 if (!genpd->gov->power_down_ok(&genpd->domain)) 971 return; 972 } 973 974 /* Default to shallowest state. */ 975 if (!genpd->gov) 976 genpd->state_idx = 0; 977 978 /* Don't power off, if a child domain is waiting to power on. */ 979 if (atomic_read(&genpd->sd_count) > 0) 980 return; 981 982 if (_genpd_power_off(genpd, true)) { 983 genpd->states[genpd->state_idx].rejected++; 984 return; 985 } 986 987 genpd->status = GENPD_STATE_OFF; 988 genpd_update_accounting(genpd); 989 genpd->states[genpd->state_idx].usage++; 990 991 list_for_each_entry(link, &genpd->child_links, child_node) { 992 genpd_sd_counter_dec(link->parent); 993 genpd_lock_nested(link->parent, depth + 1); 994 genpd_power_off(link->parent, false, depth + 1); 995 genpd_unlock(link->parent); 996 } 997 } 998 999 /** 1000 * genpd_power_on - Restore power to a given PM domain and its parents. 1001 * @genpd: PM domain to power up. 1002 * @depth: nesting count for lockdep. 1003 * 1004 * Restore power to @genpd and all of its parents so that it is possible to 1005 * resume a device belonging to it. 1006 */ 1007 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 1008 { 1009 struct gpd_link *link; 1010 int ret = 0; 1011 1012 if (genpd_status_on(genpd)) 1013 return 0; 1014 1015 /* Reflect over the entered idle-states residency for debugfs. */ 1016 genpd_reflect_residency(genpd); 1017 1018 /* 1019 * The list is guaranteed not to change while the loop below is being 1020 * executed, unless one of the parents' .power_on() callbacks fiddles 1021 * with it. 1022 */ 1023 list_for_each_entry(link, &genpd->child_links, child_node) { 1024 struct generic_pm_domain *parent = link->parent; 1025 1026 genpd_sd_counter_inc(parent); 1027 1028 genpd_lock_nested(parent, depth + 1); 1029 ret = genpd_power_on(parent, depth + 1); 1030 genpd_unlock(parent); 1031 1032 if (ret) { 1033 genpd_sd_counter_dec(parent); 1034 goto err; 1035 } 1036 } 1037 1038 ret = _genpd_power_on(genpd, true); 1039 if (ret) 1040 goto err; 1041 1042 genpd->status = GENPD_STATE_ON; 1043 genpd_update_accounting(genpd); 1044 1045 return 0; 1046 1047 err: 1048 list_for_each_entry_continue_reverse(link, 1049 &genpd->child_links, 1050 child_node) { 1051 genpd_sd_counter_dec(link->parent); 1052 genpd_lock_nested(link->parent, depth + 1); 1053 genpd_power_off(link->parent, false, depth + 1); 1054 genpd_unlock(link->parent); 1055 } 1056 1057 return ret; 1058 } 1059 1060 static int genpd_dev_pm_start(struct device *dev) 1061 { 1062 struct generic_pm_domain *genpd = dev_to_genpd(dev); 1063 1064 return genpd_start_dev(genpd, dev); 1065 } 1066 1067 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 1068 unsigned long val, void *ptr) 1069 { 1070 struct generic_pm_domain_data *gpd_data; 1071 struct device *dev; 1072 1073 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 1074 dev = gpd_data->base.dev; 1075 1076 for (;;) { 1077 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); 1078 struct pm_domain_data *pdd; 1079 struct gpd_timing_data *td; 1080 1081 spin_lock_irq(&dev->power.lock); 1082 1083 pdd = dev->power.subsys_data ? 1084 dev->power.subsys_data->domain_data : NULL; 1085 if (pdd) { 1086 td = to_gpd_data(pdd)->td; 1087 if (td) { 1088 td->constraint_changed = true; 1089 genpd = dev_to_genpd(dev); 1090 } 1091 } 1092 1093 spin_unlock_irq(&dev->power.lock); 1094 1095 if (!IS_ERR(genpd)) { 1096 genpd_lock(genpd); 1097 genpd->gd->max_off_time_changed = true; 1098 genpd_unlock(genpd); 1099 } 1100 1101 dev = dev->parent; 1102 if (!dev || dev->power.ignore_children) 1103 break; 1104 } 1105 1106 return NOTIFY_DONE; 1107 } 1108 1109 /** 1110 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 1111 * @work: Work structure used for scheduling the execution of this function. 1112 */ 1113 static void genpd_power_off_work_fn(struct work_struct *work) 1114 { 1115 struct generic_pm_domain *genpd; 1116 1117 genpd = container_of(work, struct generic_pm_domain, power_off_work); 1118 1119 genpd_lock(genpd); 1120 genpd_power_off(genpd, false, 0); 1121 genpd_unlock(genpd); 1122 } 1123 1124 /** 1125 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 1126 * @dev: Device to handle. 1127 */ 1128 static int __genpd_runtime_suspend(struct device *dev) 1129 { 1130 int (*cb)(struct device *__dev); 1131 1132 if (dev->type && dev->type->pm) 1133 cb = dev->type->pm->runtime_suspend; 1134 else if (dev->class && dev->class->pm) 1135 cb = dev->class->pm->runtime_suspend; 1136 else if (dev->bus && dev->bus->pm) 1137 cb = dev->bus->pm->runtime_suspend; 1138 else 1139 cb = NULL; 1140 1141 if (!cb && dev->driver && dev->driver->pm) 1142 cb = dev->driver->pm->runtime_suspend; 1143 1144 return cb ? cb(dev) : 0; 1145 } 1146 1147 /** 1148 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 1149 * @dev: Device to handle. 1150 */ 1151 static int __genpd_runtime_resume(struct device *dev) 1152 { 1153 int (*cb)(struct device *__dev); 1154 1155 if (dev->type && dev->type->pm) 1156 cb = dev->type->pm->runtime_resume; 1157 else if (dev->class && dev->class->pm) 1158 cb = dev->class->pm->runtime_resume; 1159 else if (dev->bus && dev->bus->pm) 1160 cb = dev->bus->pm->runtime_resume; 1161 else 1162 cb = NULL; 1163 1164 if (!cb && dev->driver && dev->driver->pm) 1165 cb = dev->driver->pm->runtime_resume; 1166 1167 return cb ? cb(dev) : 0; 1168 } 1169 1170 /** 1171 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 1172 * @dev: Device to suspend. 1173 * 1174 * Carry out a runtime suspend of a device under the assumption that its 1175 * pm_domain field points to the domain member of an object of type 1176 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1177 */ 1178 static int genpd_runtime_suspend(struct device *dev) 1179 { 1180 struct generic_pm_domain *genpd; 1181 bool (*suspend_ok)(struct device *__dev); 1182 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1183 struct gpd_timing_data *td = gpd_data->td; 1184 bool runtime_pm = pm_runtime_enabled(dev); 1185 ktime_t time_start = 0; 1186 s64 elapsed_ns; 1187 int ret; 1188 1189 dev_dbg(dev, "%s()\n", __func__); 1190 1191 genpd = dev_to_genpd(dev); 1192 if (IS_ERR(genpd)) 1193 return -EINVAL; 1194 1195 /* 1196 * A runtime PM centric subsystem/driver may re-use the runtime PM 1197 * callbacks for other purposes than runtime PM. In those scenarios 1198 * runtime PM is disabled. Under these circumstances, we shall skip 1199 * validating/measuring the PM QoS latency. 1200 */ 1201 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 1202 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 1203 return -EBUSY; 1204 1205 /* Measure suspend latency. */ 1206 if (td && runtime_pm) 1207 time_start = ktime_get(); 1208 1209 ret = __genpd_runtime_suspend(dev); 1210 if (ret) 1211 return ret; 1212 1213 ret = genpd_stop_dev(genpd, dev); 1214 if (ret) { 1215 __genpd_runtime_resume(dev); 1216 return ret; 1217 } 1218 1219 /* Update suspend latency value if the measured time exceeds it. */ 1220 if (td && runtime_pm) { 1221 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1222 if (elapsed_ns > td->suspend_latency_ns) { 1223 td->suspend_latency_ns = elapsed_ns; 1224 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 1225 elapsed_ns); 1226 genpd->gd->max_off_time_changed = true; 1227 td->constraint_changed = true; 1228 } 1229 } 1230 1231 /* 1232 * If power.irq_safe is set, this routine may be run with 1233 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 1234 */ 1235 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1236 return 0; 1237 1238 genpd_lock(genpd); 1239 genpd_power_off(genpd, true, 0); 1240 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1241 genpd_unlock(genpd); 1242 1243 return 0; 1244 } 1245 1246 /** 1247 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 1248 * @dev: Device to resume. 1249 * 1250 * Carry out a runtime resume of a device under the assumption that its 1251 * pm_domain field points to the domain member of an object of type 1252 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1253 */ 1254 static int genpd_runtime_resume(struct device *dev) 1255 { 1256 struct generic_pm_domain *genpd; 1257 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1258 struct gpd_timing_data *td = gpd_data->td; 1259 bool timed = td && pm_runtime_enabled(dev); 1260 ktime_t time_start = 0; 1261 s64 elapsed_ns; 1262 int ret; 1263 1264 dev_dbg(dev, "%s()\n", __func__); 1265 1266 genpd = dev_to_genpd(dev); 1267 if (IS_ERR(genpd)) 1268 return -EINVAL; 1269 1270 /* 1271 * As we don't power off a non IRQ safe domain, which holds 1272 * an IRQ safe device, we don't need to restore power to it. 1273 */ 1274 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1275 goto out; 1276 1277 genpd_lock(genpd); 1278 genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 1279 ret = genpd_power_on(genpd, 0); 1280 genpd_unlock(genpd); 1281 1282 if (ret) 1283 return ret; 1284 1285 out: 1286 /* Measure resume latency. */ 1287 if (timed) 1288 time_start = ktime_get(); 1289 1290 ret = genpd_start_dev(genpd, dev); 1291 if (ret) 1292 goto err_poweroff; 1293 1294 ret = __genpd_runtime_resume(dev); 1295 if (ret) 1296 goto err_stop; 1297 1298 /* Update resume latency value if the measured time exceeds it. */ 1299 if (timed) { 1300 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1301 if (elapsed_ns > td->resume_latency_ns) { 1302 td->resume_latency_ns = elapsed_ns; 1303 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1304 elapsed_ns); 1305 genpd->gd->max_off_time_changed = true; 1306 td->constraint_changed = true; 1307 } 1308 } 1309 1310 return 0; 1311 1312 err_stop: 1313 genpd_stop_dev(genpd, dev); 1314 err_poweroff: 1315 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1316 genpd_lock(genpd); 1317 genpd_power_off(genpd, true, 0); 1318 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1319 genpd_unlock(genpd); 1320 } 1321 1322 return ret; 1323 } 1324 1325 static bool pd_ignore_unused; 1326 static int __init pd_ignore_unused_setup(char *__unused) 1327 { 1328 pd_ignore_unused = true; 1329 return 1; 1330 } 1331 __setup("pd_ignore_unused", pd_ignore_unused_setup); 1332 1333 /** 1334 * genpd_power_off_unused - Power off all PM domains with no devices in use. 1335 */ 1336 static int __init genpd_power_off_unused(void) 1337 { 1338 struct generic_pm_domain *genpd; 1339 1340 if (pd_ignore_unused) { 1341 pr_warn("genpd: Not disabling unused power domains\n"); 1342 return 0; 1343 } 1344 1345 pr_info("genpd: Disabling unused power domains\n"); 1346 mutex_lock(&gpd_list_lock); 1347 1348 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 1349 genpd_queue_power_off_work(genpd); 1350 1351 mutex_unlock(&gpd_list_lock); 1352 1353 return 0; 1354 } 1355 late_initcall_sync(genpd_power_off_unused); 1356 1357 #ifdef CONFIG_PM_SLEEP 1358 1359 /** 1360 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1361 * @genpd: PM domain to power off, if possible. 1362 * @use_lock: use the lock. 1363 * @depth: nesting count for lockdep. 1364 * 1365 * Check if the given PM domain can be powered off (during system suspend or 1366 * hibernation) and do that if so. Also, in that case propagate to its parents. 1367 * 1368 * This function is only called in "noirq" and "syscore" stages of system power 1369 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1370 * these cases the lock must be held. 1371 */ 1372 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1373 unsigned int depth) 1374 { 1375 struct gpd_link *link; 1376 1377 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1378 return; 1379 1380 if (genpd->suspended_count != genpd->device_count 1381 || atomic_read(&genpd->sd_count) > 0) 1382 return; 1383 1384 /* Check that the children are in their deepest (powered-off) state. */ 1385 list_for_each_entry(link, &genpd->parent_links, parent_node) { 1386 struct generic_pm_domain *child = link->child; 1387 if (child->state_idx < child->state_count - 1) 1388 return; 1389 } 1390 1391 /* Choose the deepest state when suspending */ 1392 genpd->state_idx = genpd->state_count - 1; 1393 if (_genpd_power_off(genpd, false)) { 1394 genpd->states[genpd->state_idx].rejected++; 1395 return; 1396 } else { 1397 genpd->states[genpd->state_idx].usage++; 1398 } 1399 1400 genpd->status = GENPD_STATE_OFF; 1401 1402 list_for_each_entry(link, &genpd->child_links, child_node) { 1403 genpd_sd_counter_dec(link->parent); 1404 1405 if (use_lock) 1406 genpd_lock_nested(link->parent, depth + 1); 1407 1408 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1409 1410 if (use_lock) 1411 genpd_unlock(link->parent); 1412 } 1413 } 1414 1415 /** 1416 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1417 * @genpd: PM domain to power on. 1418 * @use_lock: use the lock. 1419 * @depth: nesting count for lockdep. 1420 * 1421 * This function is only called in "noirq" and "syscore" stages of system power 1422 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1423 * these cases the lock must be held. 1424 */ 1425 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1426 unsigned int depth) 1427 { 1428 struct gpd_link *link; 1429 1430 if (genpd_status_on(genpd)) 1431 return; 1432 1433 list_for_each_entry(link, &genpd->child_links, child_node) { 1434 genpd_sd_counter_inc(link->parent); 1435 1436 if (use_lock) 1437 genpd_lock_nested(link->parent, depth + 1); 1438 1439 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1440 1441 if (use_lock) 1442 genpd_unlock(link->parent); 1443 } 1444 1445 _genpd_power_on(genpd, false); 1446 genpd->status = GENPD_STATE_ON; 1447 } 1448 1449 /** 1450 * genpd_prepare - Start power transition of a device in a PM domain. 1451 * @dev: Device to start the transition of. 1452 * 1453 * Start a power transition of a device (during a system-wide power transition) 1454 * under the assumption that its pm_domain field points to the domain member of 1455 * an object of type struct generic_pm_domain representing a PM domain 1456 * consisting of I/O devices. 1457 */ 1458 static int genpd_prepare(struct device *dev) 1459 { 1460 struct generic_pm_domain *genpd; 1461 int ret; 1462 1463 dev_dbg(dev, "%s()\n", __func__); 1464 1465 genpd = dev_to_genpd(dev); 1466 if (IS_ERR(genpd)) 1467 return -EINVAL; 1468 1469 genpd_lock(genpd); 1470 genpd->prepared_count++; 1471 genpd_unlock(genpd); 1472 1473 ret = pm_generic_prepare(dev); 1474 if (ret < 0) { 1475 genpd_lock(genpd); 1476 1477 genpd->prepared_count--; 1478 1479 genpd_unlock(genpd); 1480 } 1481 1482 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1483 return ret >= 0 ? 0 : ret; 1484 } 1485 1486 /** 1487 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1488 * I/O pm domain. 1489 * @dev: Device to suspend. 1490 * @suspend_noirq: Generic suspend_noirq callback. 1491 * @resume_noirq: Generic resume_noirq callback. 1492 * 1493 * Stop the device and remove power from the domain if all devices in it have 1494 * been stopped. 1495 */ 1496 static int genpd_finish_suspend(struct device *dev, 1497 int (*suspend_noirq)(struct device *dev), 1498 int (*resume_noirq)(struct device *dev)) 1499 { 1500 struct generic_pm_domain *genpd; 1501 int ret = 0; 1502 1503 genpd = dev_to_genpd(dev); 1504 if (IS_ERR(genpd)) 1505 return -EINVAL; 1506 1507 ret = suspend_noirq(dev); 1508 if (ret) 1509 return ret; 1510 1511 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd)) 1512 return 0; 1513 1514 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1515 !pm_runtime_status_suspended(dev)) { 1516 ret = genpd_stop_dev(genpd, dev); 1517 if (ret) { 1518 resume_noirq(dev); 1519 return ret; 1520 } 1521 } 1522 1523 genpd_lock(genpd); 1524 genpd->suspended_count++; 1525 genpd_sync_power_off(genpd, true, 0); 1526 genpd_unlock(genpd); 1527 1528 return 0; 1529 } 1530 1531 /** 1532 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1533 * @dev: Device to suspend. 1534 * 1535 * Stop the device and remove power from the domain if all devices in it have 1536 * been stopped. 1537 */ 1538 static int genpd_suspend_noirq(struct device *dev) 1539 { 1540 dev_dbg(dev, "%s()\n", __func__); 1541 1542 return genpd_finish_suspend(dev, 1543 pm_generic_suspend_noirq, 1544 pm_generic_resume_noirq); 1545 } 1546 1547 /** 1548 * genpd_finish_resume - Completion of resume of device in an I/O PM domain. 1549 * @dev: Device to resume. 1550 * @resume_noirq: Generic resume_noirq callback. 1551 * 1552 * Restore power to the device's PM domain, if necessary, and start the device. 1553 */ 1554 static int genpd_finish_resume(struct device *dev, 1555 int (*resume_noirq)(struct device *dev)) 1556 { 1557 struct generic_pm_domain *genpd; 1558 int ret; 1559 1560 dev_dbg(dev, "%s()\n", __func__); 1561 1562 genpd = dev_to_genpd(dev); 1563 if (IS_ERR(genpd)) 1564 return -EINVAL; 1565 1566 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd)) 1567 return resume_noirq(dev); 1568 1569 genpd_lock(genpd); 1570 genpd_sync_power_on(genpd, true, 0); 1571 genpd->suspended_count--; 1572 genpd_unlock(genpd); 1573 1574 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1575 !pm_runtime_status_suspended(dev)) { 1576 ret = genpd_start_dev(genpd, dev); 1577 if (ret) 1578 return ret; 1579 } 1580 1581 return pm_generic_resume_noirq(dev); 1582 } 1583 1584 /** 1585 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1586 * @dev: Device to resume. 1587 * 1588 * Restore power to the device's PM domain, if necessary, and start the device. 1589 */ 1590 static int genpd_resume_noirq(struct device *dev) 1591 { 1592 dev_dbg(dev, "%s()\n", __func__); 1593 1594 return genpd_finish_resume(dev, pm_generic_resume_noirq); 1595 } 1596 1597 /** 1598 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1599 * @dev: Device to freeze. 1600 * 1601 * Carry out a late freeze of a device under the assumption that its 1602 * pm_domain field points to the domain member of an object of type 1603 * struct generic_pm_domain representing a power domain consisting of I/O 1604 * devices. 1605 */ 1606 static int genpd_freeze_noirq(struct device *dev) 1607 { 1608 dev_dbg(dev, "%s()\n", __func__); 1609 1610 return genpd_finish_suspend(dev, 1611 pm_generic_freeze_noirq, 1612 pm_generic_thaw_noirq); 1613 } 1614 1615 /** 1616 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1617 * @dev: Device to thaw. 1618 * 1619 * Start the device, unless power has been removed from the domain already 1620 * before the system transition. 1621 */ 1622 static int genpd_thaw_noirq(struct device *dev) 1623 { 1624 dev_dbg(dev, "%s()\n", __func__); 1625 1626 return genpd_finish_resume(dev, pm_generic_thaw_noirq); 1627 } 1628 1629 /** 1630 * genpd_poweroff_noirq - Completion of hibernation of device in an 1631 * I/O PM domain. 1632 * @dev: Device to poweroff. 1633 * 1634 * Stop the device and remove power from the domain if all devices in it have 1635 * been stopped. 1636 */ 1637 static int genpd_poweroff_noirq(struct device *dev) 1638 { 1639 dev_dbg(dev, "%s()\n", __func__); 1640 1641 return genpd_finish_suspend(dev, 1642 pm_generic_poweroff_noirq, 1643 pm_generic_restore_noirq); 1644 } 1645 1646 /** 1647 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1648 * @dev: Device to resume. 1649 * 1650 * Make sure the domain will be in the same power state as before the 1651 * hibernation the system is resuming from and start the device if necessary. 1652 */ 1653 static int genpd_restore_noirq(struct device *dev) 1654 { 1655 dev_dbg(dev, "%s()\n", __func__); 1656 1657 return genpd_finish_resume(dev, pm_generic_restore_noirq); 1658 } 1659 1660 /** 1661 * genpd_complete - Complete power transition of a device in a power domain. 1662 * @dev: Device to complete the transition of. 1663 * 1664 * Complete a power transition of a device (during a system-wide power 1665 * transition) under the assumption that its pm_domain field points to the 1666 * domain member of an object of type struct generic_pm_domain representing 1667 * a power domain consisting of I/O devices. 1668 */ 1669 static void genpd_complete(struct device *dev) 1670 { 1671 struct generic_pm_domain *genpd; 1672 1673 dev_dbg(dev, "%s()\n", __func__); 1674 1675 genpd = dev_to_genpd(dev); 1676 if (IS_ERR(genpd)) 1677 return; 1678 1679 pm_generic_complete(dev); 1680 1681 genpd_lock(genpd); 1682 1683 genpd->prepared_count--; 1684 if (!genpd->prepared_count) 1685 genpd_queue_power_off_work(genpd); 1686 1687 genpd_unlock(genpd); 1688 } 1689 1690 static void genpd_switch_state(struct device *dev, bool suspend) 1691 { 1692 struct generic_pm_domain *genpd; 1693 bool use_lock; 1694 1695 genpd = dev_to_genpd_safe(dev); 1696 if (!genpd) 1697 return; 1698 1699 use_lock = genpd_is_irq_safe(genpd); 1700 1701 if (use_lock) 1702 genpd_lock(genpd); 1703 1704 if (suspend) { 1705 genpd->suspended_count++; 1706 genpd_sync_power_off(genpd, use_lock, 0); 1707 } else { 1708 genpd_sync_power_on(genpd, use_lock, 0); 1709 genpd->suspended_count--; 1710 } 1711 1712 if (use_lock) 1713 genpd_unlock(genpd); 1714 } 1715 1716 /** 1717 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1718 * @dev: The device that is attached to the genpd, that can be suspended. 1719 * 1720 * This routine should typically be called for a device that needs to be 1721 * suspended during the syscore suspend phase. It may also be called during 1722 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1723 * genpd. 1724 */ 1725 void dev_pm_genpd_suspend(struct device *dev) 1726 { 1727 genpd_switch_state(dev, true); 1728 } 1729 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1730 1731 /** 1732 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1733 * @dev: The device that is attached to the genpd, which needs to be resumed. 1734 * 1735 * This routine should typically be called for a device that needs to be resumed 1736 * during the syscore resume phase. It may also be called during suspend-to-idle 1737 * to resume a corresponding CPU device that is attached to a genpd. 1738 */ 1739 void dev_pm_genpd_resume(struct device *dev) 1740 { 1741 genpd_switch_state(dev, false); 1742 } 1743 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1744 1745 #else /* !CONFIG_PM_SLEEP */ 1746 1747 #define genpd_prepare NULL 1748 #define genpd_suspend_noirq NULL 1749 #define genpd_resume_noirq NULL 1750 #define genpd_freeze_noirq NULL 1751 #define genpd_thaw_noirq NULL 1752 #define genpd_poweroff_noirq NULL 1753 #define genpd_restore_noirq NULL 1754 #define genpd_complete NULL 1755 1756 #endif /* CONFIG_PM_SLEEP */ 1757 1758 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1759 bool has_governor) 1760 { 1761 struct generic_pm_domain_data *gpd_data; 1762 struct gpd_timing_data *td; 1763 int ret; 1764 1765 ret = dev_pm_get_subsys_data(dev); 1766 if (ret) 1767 return ERR_PTR(ret); 1768 1769 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1770 if (!gpd_data) { 1771 ret = -ENOMEM; 1772 goto err_put; 1773 } 1774 1775 gpd_data->base.dev = dev; 1776 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1777 1778 /* Allocate data used by a governor. */ 1779 if (has_governor) { 1780 td = kzalloc(sizeof(*td), GFP_KERNEL); 1781 if (!td) { 1782 ret = -ENOMEM; 1783 goto err_free; 1784 } 1785 1786 td->constraint_changed = true; 1787 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1788 td->next_wakeup = KTIME_MAX; 1789 gpd_data->td = td; 1790 } 1791 1792 spin_lock_irq(&dev->power.lock); 1793 1794 if (dev->power.subsys_data->domain_data) 1795 ret = -EINVAL; 1796 else 1797 dev->power.subsys_data->domain_data = &gpd_data->base; 1798 1799 spin_unlock_irq(&dev->power.lock); 1800 1801 if (ret) 1802 goto err_free; 1803 1804 return gpd_data; 1805 1806 err_free: 1807 kfree(gpd_data->td); 1808 kfree(gpd_data); 1809 err_put: 1810 dev_pm_put_subsys_data(dev); 1811 return ERR_PTR(ret); 1812 } 1813 1814 static void genpd_free_dev_data(struct device *dev, 1815 struct generic_pm_domain_data *gpd_data) 1816 { 1817 spin_lock_irq(&dev->power.lock); 1818 1819 dev->power.subsys_data->domain_data = NULL; 1820 1821 spin_unlock_irq(&dev->power.lock); 1822 1823 dev_pm_opp_clear_config(gpd_data->opp_token); 1824 kfree(gpd_data->td); 1825 kfree(gpd_data); 1826 dev_pm_put_subsys_data(dev); 1827 } 1828 1829 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1830 int cpu, bool set, unsigned int depth) 1831 { 1832 struct gpd_link *link; 1833 1834 if (!genpd_is_cpu_domain(genpd)) 1835 return; 1836 1837 list_for_each_entry(link, &genpd->child_links, child_node) { 1838 struct generic_pm_domain *parent = link->parent; 1839 1840 genpd_lock_nested(parent, depth + 1); 1841 genpd_update_cpumask(parent, cpu, set, depth + 1); 1842 genpd_unlock(parent); 1843 } 1844 1845 if (set) 1846 cpumask_set_cpu(cpu, genpd->cpus); 1847 else 1848 cpumask_clear_cpu(cpu, genpd->cpus); 1849 } 1850 1851 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1852 { 1853 if (cpu >= 0) 1854 genpd_update_cpumask(genpd, cpu, true, 0); 1855 } 1856 1857 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1858 { 1859 if (cpu >= 0) 1860 genpd_update_cpumask(genpd, cpu, false, 0); 1861 } 1862 1863 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1864 { 1865 int cpu; 1866 1867 if (!genpd_is_cpu_domain(genpd)) 1868 return -1; 1869 1870 for_each_possible_cpu(cpu) { 1871 if (get_cpu_device(cpu) == dev) 1872 return cpu; 1873 } 1874 1875 return -1; 1876 } 1877 1878 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1879 struct device *base_dev) 1880 { 1881 struct genpd_governor_data *gd = genpd->gd; 1882 struct generic_pm_domain_data *gpd_data; 1883 int ret; 1884 1885 dev_dbg(dev, "%s()\n", __func__); 1886 1887 gpd_data = genpd_alloc_dev_data(dev, gd); 1888 if (IS_ERR(gpd_data)) 1889 return PTR_ERR(gpd_data); 1890 1891 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1892 1893 gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false; 1894 1895 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1896 if (ret) 1897 goto out; 1898 1899 genpd_lock(genpd); 1900 1901 genpd_set_cpumask(genpd, gpd_data->cpu); 1902 1903 genpd->device_count++; 1904 if (gd) 1905 gd->max_off_time_changed = true; 1906 1907 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1908 1909 genpd_unlock(genpd); 1910 dev_pm_domain_set(dev, &genpd->domain); 1911 out: 1912 if (ret) 1913 genpd_free_dev_data(dev, gpd_data); 1914 else 1915 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1916 DEV_PM_QOS_RESUME_LATENCY); 1917 1918 return ret; 1919 } 1920 1921 /** 1922 * pm_genpd_add_device - Add a device to an I/O PM domain. 1923 * @genpd: PM domain to add the device to. 1924 * @dev: Device to be added. 1925 */ 1926 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1927 { 1928 int ret; 1929 1930 if (!genpd || !dev) 1931 return -EINVAL; 1932 1933 mutex_lock(&gpd_list_lock); 1934 ret = genpd_add_device(genpd, dev, dev); 1935 mutex_unlock(&gpd_list_lock); 1936 1937 return ret; 1938 } 1939 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1940 1941 static int genpd_remove_device(struct generic_pm_domain *genpd, 1942 struct device *dev) 1943 { 1944 struct generic_pm_domain_data *gpd_data; 1945 struct pm_domain_data *pdd; 1946 int ret = 0; 1947 1948 dev_dbg(dev, "%s()\n", __func__); 1949 1950 pdd = dev->power.subsys_data->domain_data; 1951 gpd_data = to_gpd_data(pdd); 1952 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1953 DEV_PM_QOS_RESUME_LATENCY); 1954 1955 genpd_lock(genpd); 1956 1957 if (genpd->prepared_count > 0) { 1958 ret = -EAGAIN; 1959 goto out; 1960 } 1961 1962 genpd->device_count--; 1963 if (genpd->gd) 1964 genpd->gd->max_off_time_changed = true; 1965 1966 genpd_clear_cpumask(genpd, gpd_data->cpu); 1967 1968 list_del_init(&pdd->list_node); 1969 1970 genpd_unlock(genpd); 1971 1972 dev_pm_domain_set(dev, NULL); 1973 1974 if (genpd->detach_dev) 1975 genpd->detach_dev(genpd, dev); 1976 1977 genpd_free_dev_data(dev, gpd_data); 1978 1979 return 0; 1980 1981 out: 1982 genpd_unlock(genpd); 1983 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1984 1985 return ret; 1986 } 1987 1988 /** 1989 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1990 * @dev: Device to be removed. 1991 */ 1992 int pm_genpd_remove_device(struct device *dev) 1993 { 1994 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1995 1996 if (!genpd) 1997 return -EINVAL; 1998 1999 return genpd_remove_device(genpd, dev); 2000 } 2001 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 2002 2003 /** 2004 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 2005 * 2006 * @dev: Device that should be associated with the notifier 2007 * @nb: The notifier block to register 2008 * 2009 * Users may call this function to add a genpd power on/off notifier for an 2010 * attached @dev. Only one notifier per device is allowed. The notifier is 2011 * sent when genpd is powering on/off the PM domain. 2012 * 2013 * It is assumed that the user guarantee that the genpd wouldn't be detached 2014 * while this routine is getting called. 2015 * 2016 * Returns 0 on success and negative error values on failures. 2017 */ 2018 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 2019 { 2020 struct generic_pm_domain *genpd; 2021 struct generic_pm_domain_data *gpd_data; 2022 int ret; 2023 2024 genpd = dev_to_genpd_safe(dev); 2025 if (!genpd) 2026 return -ENODEV; 2027 2028 if (WARN_ON(!dev->power.subsys_data || 2029 !dev->power.subsys_data->domain_data)) 2030 return -EINVAL; 2031 2032 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 2033 if (gpd_data->power_nb) 2034 return -EEXIST; 2035 2036 genpd_lock(genpd); 2037 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 2038 genpd_unlock(genpd); 2039 2040 if (ret) { 2041 dev_warn(dev, "failed to add notifier for PM domain %s\n", 2042 dev_name(&genpd->dev)); 2043 return ret; 2044 } 2045 2046 gpd_data->power_nb = nb; 2047 return 0; 2048 } 2049 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 2050 2051 /** 2052 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 2053 * 2054 * @dev: Device that is associated with the notifier 2055 * 2056 * Users may call this function to remove a genpd power on/off notifier for an 2057 * attached @dev. 2058 * 2059 * It is assumed that the user guarantee that the genpd wouldn't be detached 2060 * while this routine is getting called. 2061 * 2062 * Returns 0 on success and negative error values on failures. 2063 */ 2064 int dev_pm_genpd_remove_notifier(struct device *dev) 2065 { 2066 struct generic_pm_domain *genpd; 2067 struct generic_pm_domain_data *gpd_data; 2068 int ret; 2069 2070 genpd = dev_to_genpd_safe(dev); 2071 if (!genpd) 2072 return -ENODEV; 2073 2074 if (WARN_ON(!dev->power.subsys_data || 2075 !dev->power.subsys_data->domain_data)) 2076 return -EINVAL; 2077 2078 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 2079 if (!gpd_data->power_nb) 2080 return -ENODEV; 2081 2082 genpd_lock(genpd); 2083 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 2084 gpd_data->power_nb); 2085 genpd_unlock(genpd); 2086 2087 if (ret) { 2088 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 2089 dev_name(&genpd->dev)); 2090 return ret; 2091 } 2092 2093 gpd_data->power_nb = NULL; 2094 return 0; 2095 } 2096 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 2097 2098 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 2099 struct generic_pm_domain *subdomain) 2100 { 2101 struct gpd_link *link, *itr; 2102 int ret = 0; 2103 2104 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 2105 || genpd == subdomain) 2106 return -EINVAL; 2107 2108 /* 2109 * If the domain can be powered on/off in an IRQ safe 2110 * context, ensure that the subdomain can also be 2111 * powered on/off in that context. 2112 */ 2113 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 2114 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 2115 dev_name(&genpd->dev), subdomain->name); 2116 return -EINVAL; 2117 } 2118 2119 link = kzalloc(sizeof(*link), GFP_KERNEL); 2120 if (!link) 2121 return -ENOMEM; 2122 2123 genpd_lock(subdomain); 2124 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 2125 2126 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 2127 ret = -EINVAL; 2128 goto out; 2129 } 2130 2131 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 2132 if (itr->child == subdomain && itr->parent == genpd) { 2133 ret = -EINVAL; 2134 goto out; 2135 } 2136 } 2137 2138 link->parent = genpd; 2139 list_add_tail(&link->parent_node, &genpd->parent_links); 2140 link->child = subdomain; 2141 list_add_tail(&link->child_node, &subdomain->child_links); 2142 if (genpd_status_on(subdomain)) 2143 genpd_sd_counter_inc(genpd); 2144 2145 out: 2146 genpd_unlock(genpd); 2147 genpd_unlock(subdomain); 2148 if (ret) 2149 kfree(link); 2150 return ret; 2151 } 2152 2153 /** 2154 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2155 * @genpd: Leader PM domain to add the subdomain to. 2156 * @subdomain: Subdomain to be added. 2157 */ 2158 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 2159 struct generic_pm_domain *subdomain) 2160 { 2161 int ret; 2162 2163 mutex_lock(&gpd_list_lock); 2164 ret = genpd_add_subdomain(genpd, subdomain); 2165 mutex_unlock(&gpd_list_lock); 2166 2167 return ret; 2168 } 2169 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 2170 2171 /** 2172 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2173 * @genpd: Leader PM domain to remove the subdomain from. 2174 * @subdomain: Subdomain to be removed. 2175 */ 2176 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 2177 struct generic_pm_domain *subdomain) 2178 { 2179 struct gpd_link *l, *link; 2180 int ret = -EINVAL; 2181 2182 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 2183 return -EINVAL; 2184 2185 genpd_lock(subdomain); 2186 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 2187 2188 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 2189 pr_warn("%s: unable to remove subdomain %s\n", 2190 dev_name(&genpd->dev), subdomain->name); 2191 ret = -EBUSY; 2192 goto out; 2193 } 2194 2195 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 2196 if (link->child != subdomain) 2197 continue; 2198 2199 list_del(&link->parent_node); 2200 list_del(&link->child_node); 2201 kfree(link); 2202 if (genpd_status_on(subdomain)) 2203 genpd_sd_counter_dec(genpd); 2204 2205 ret = 0; 2206 break; 2207 } 2208 2209 out: 2210 genpd_unlock(genpd); 2211 genpd_unlock(subdomain); 2212 2213 return ret; 2214 } 2215 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 2216 2217 static void genpd_free_default_power_state(struct genpd_power_state *states, 2218 unsigned int state_count) 2219 { 2220 kfree(states); 2221 } 2222 2223 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 2224 { 2225 struct genpd_power_state *state; 2226 2227 state = kzalloc(sizeof(*state), GFP_KERNEL); 2228 if (!state) 2229 return -ENOMEM; 2230 2231 genpd->states = state; 2232 genpd->state_count = 1; 2233 genpd->free_states = genpd_free_default_power_state; 2234 2235 return 0; 2236 } 2237 2238 static void genpd_provider_release(struct device *dev) 2239 { 2240 /* nothing to be done here */ 2241 } 2242 2243 static int genpd_alloc_data(struct generic_pm_domain *genpd) 2244 { 2245 struct genpd_governor_data *gd = NULL; 2246 int ret; 2247 2248 if (genpd_is_cpu_domain(genpd) && 2249 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 2250 return -ENOMEM; 2251 2252 if (genpd->gov) { 2253 gd = kzalloc(sizeof(*gd), GFP_KERNEL); 2254 if (!gd) { 2255 ret = -ENOMEM; 2256 goto free; 2257 } 2258 2259 gd->max_off_time_ns = -1; 2260 gd->max_off_time_changed = true; 2261 gd->next_wakeup = KTIME_MAX; 2262 gd->next_hrtimer = KTIME_MAX; 2263 } 2264 2265 /* Use only one "off" state if there were no states declared */ 2266 if (genpd->state_count == 0) { 2267 ret = genpd_set_default_power_state(genpd); 2268 if (ret) 2269 goto free; 2270 } 2271 2272 genpd->gd = gd; 2273 device_initialize(&genpd->dev); 2274 genpd->dev.release = genpd_provider_release; 2275 genpd->dev.bus = &genpd_provider_bus_type; 2276 genpd->dev.parent = &genpd_provider_bus; 2277 2278 if (!genpd_is_dev_name_fw(genpd)) { 2279 dev_set_name(&genpd->dev, "%s", genpd->name); 2280 } else { 2281 ret = ida_alloc(&genpd_ida, GFP_KERNEL); 2282 if (ret < 0) 2283 goto put; 2284 2285 genpd->device_id = ret; 2286 dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id); 2287 } 2288 2289 return 0; 2290 put: 2291 put_device(&genpd->dev); 2292 if (genpd->free_states == genpd_free_default_power_state) { 2293 kfree(genpd->states); 2294 genpd->states = NULL; 2295 } 2296 free: 2297 if (genpd_is_cpu_domain(genpd)) 2298 free_cpumask_var(genpd->cpus); 2299 kfree(gd); 2300 return ret; 2301 } 2302 2303 static void genpd_free_data(struct generic_pm_domain *genpd) 2304 { 2305 put_device(&genpd->dev); 2306 if (genpd->device_id != -ENXIO) 2307 ida_free(&genpd_ida, genpd->device_id); 2308 if (genpd_is_cpu_domain(genpd)) 2309 free_cpumask_var(genpd->cpus); 2310 if (genpd->free_states) 2311 genpd->free_states(genpd->states, genpd->state_count); 2312 kfree(genpd->gd); 2313 } 2314 2315 static void genpd_lock_init(struct generic_pm_domain *genpd) 2316 { 2317 if (genpd_is_cpu_domain(genpd)) { 2318 raw_spin_lock_init(&genpd->raw_slock); 2319 genpd->lock_ops = &genpd_raw_spin_ops; 2320 } else if (genpd_is_irq_safe(genpd)) { 2321 spin_lock_init(&genpd->slock); 2322 genpd->lock_ops = &genpd_spin_ops; 2323 } else { 2324 mutex_init(&genpd->mlock); 2325 genpd->lock_ops = &genpd_mtx_ops; 2326 } 2327 } 2328 2329 /** 2330 * pm_genpd_init - Initialize a generic I/O PM domain object. 2331 * @genpd: PM domain object to initialize. 2332 * @gov: PM domain governor to associate with the domain (may be NULL). 2333 * @is_off: Initial value of the domain's power_is_off field. 2334 * 2335 * Returns 0 on successful initialization, else a negative error code. 2336 */ 2337 int pm_genpd_init(struct generic_pm_domain *genpd, 2338 struct dev_power_governor *gov, bool is_off) 2339 { 2340 int ret; 2341 2342 if (IS_ERR_OR_NULL(genpd)) 2343 return -EINVAL; 2344 2345 INIT_LIST_HEAD(&genpd->parent_links); 2346 INIT_LIST_HEAD(&genpd->child_links); 2347 INIT_LIST_HEAD(&genpd->dev_list); 2348 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 2349 genpd_lock_init(genpd); 2350 genpd->gov = gov; 2351 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2352 atomic_set(&genpd->sd_count, 0); 2353 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2354 genpd->device_count = 0; 2355 genpd->provider = NULL; 2356 genpd->device_id = -ENXIO; 2357 genpd->has_provider = false; 2358 genpd->opp_table = NULL; 2359 genpd->accounting_time = ktime_get_mono_fast_ns(); 2360 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2361 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2362 genpd->domain.ops.prepare = genpd_prepare; 2363 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2364 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2365 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2366 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2367 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2368 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2369 genpd->domain.ops.complete = genpd_complete; 2370 genpd->domain.start = genpd_dev_pm_start; 2371 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state; 2372 2373 if (genpd->flags & GENPD_FLAG_PM_CLK) { 2374 genpd->dev_ops.stop = pm_clk_suspend; 2375 genpd->dev_ops.start = pm_clk_resume; 2376 } 2377 2378 /* The always-on governor works better with the corresponding flag. */ 2379 if (gov == &pm_domain_always_on_gov) 2380 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 2381 2382 /* Always-on domains must be powered on at initialization. */ 2383 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2384 !genpd_status_on(genpd)) { 2385 pr_err("always-on PM domain %s is not on\n", genpd->name); 2386 return -EINVAL; 2387 } 2388 2389 /* Multiple states but no governor doesn't make sense. */ 2390 if (!gov && genpd->state_count > 1) 2391 pr_warn("%s: no governor for states\n", genpd->name); 2392 2393 ret = genpd_alloc_data(genpd); 2394 if (ret) 2395 return ret; 2396 2397 mutex_lock(&gpd_list_lock); 2398 list_add(&genpd->gpd_list_node, &gpd_list); 2399 mutex_unlock(&gpd_list_lock); 2400 genpd_debug_add(genpd); 2401 2402 return 0; 2403 } 2404 EXPORT_SYMBOL_GPL(pm_genpd_init); 2405 2406 static int genpd_remove(struct generic_pm_domain *genpd) 2407 { 2408 struct gpd_link *l, *link; 2409 2410 if (IS_ERR_OR_NULL(genpd)) 2411 return -EINVAL; 2412 2413 genpd_lock(genpd); 2414 2415 if (genpd->has_provider) { 2416 genpd_unlock(genpd); 2417 pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev)); 2418 return -EBUSY; 2419 } 2420 2421 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2422 genpd_unlock(genpd); 2423 pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev)); 2424 return -EBUSY; 2425 } 2426 2427 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2428 list_del(&link->parent_node); 2429 list_del(&link->child_node); 2430 kfree(link); 2431 } 2432 2433 list_del(&genpd->gpd_list_node); 2434 genpd_unlock(genpd); 2435 genpd_debug_remove(genpd); 2436 cancel_work_sync(&genpd->power_off_work); 2437 genpd_free_data(genpd); 2438 2439 pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev)); 2440 2441 return 0; 2442 } 2443 2444 /** 2445 * pm_genpd_remove - Remove a generic I/O PM domain 2446 * @genpd: Pointer to PM domain that is to be removed. 2447 * 2448 * To remove the PM domain, this function: 2449 * - Removes the PM domain as a subdomain to any parent domains, 2450 * if it was added. 2451 * - Removes the PM domain from the list of registered PM domains. 2452 * 2453 * The PM domain will only be removed, if the associated provider has 2454 * been removed, it is not a parent to any other PM domain and has no 2455 * devices associated with it. 2456 */ 2457 int pm_genpd_remove(struct generic_pm_domain *genpd) 2458 { 2459 int ret; 2460 2461 mutex_lock(&gpd_list_lock); 2462 ret = genpd_remove(genpd); 2463 mutex_unlock(&gpd_list_lock); 2464 2465 return ret; 2466 } 2467 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2468 2469 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2470 2471 /* 2472 * Device Tree based PM domain providers. 2473 * 2474 * The code below implements generic device tree based PM domain providers that 2475 * bind device tree nodes with generic PM domains registered in the system. 2476 * 2477 * Any driver that registers generic PM domains and needs to support binding of 2478 * devices to these domains is supposed to register a PM domain provider, which 2479 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2480 * 2481 * Two simple mapping functions have been provided for convenience: 2482 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2483 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2484 * index. 2485 */ 2486 2487 /** 2488 * struct of_genpd_provider - PM domain provider registration structure 2489 * @link: Entry in global list of PM domain providers 2490 * @node: Pointer to device tree node of PM domain provider 2491 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2492 * into a PM domain. 2493 * @data: context pointer to be passed into @xlate callback 2494 */ 2495 struct of_genpd_provider { 2496 struct list_head link; 2497 struct device_node *node; 2498 genpd_xlate_t xlate; 2499 void *data; 2500 }; 2501 2502 /* List of registered PM domain providers. */ 2503 static LIST_HEAD(of_genpd_providers); 2504 /* Mutex to protect the list above. */ 2505 static DEFINE_MUTEX(of_genpd_mutex); 2506 /* Used to prevent registering devices before the bus. */ 2507 static bool genpd_bus_registered; 2508 2509 /** 2510 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2511 * @genpdspec: OF phandle args to map into a PM domain 2512 * @data: xlate function private data - pointer to struct generic_pm_domain 2513 * 2514 * This is a generic xlate function that can be used to model PM domains that 2515 * have their own device tree nodes. The private data of xlate function needs 2516 * to be a valid pointer to struct generic_pm_domain. 2517 */ 2518 static struct generic_pm_domain *genpd_xlate_simple( 2519 const struct of_phandle_args *genpdspec, 2520 void *data) 2521 { 2522 return data; 2523 } 2524 2525 /** 2526 * genpd_xlate_onecell() - Xlate function using a single index. 2527 * @genpdspec: OF phandle args to map into a PM domain 2528 * @data: xlate function private data - pointer to struct genpd_onecell_data 2529 * 2530 * This is a generic xlate function that can be used to model simple PM domain 2531 * controllers that have one device tree node and provide multiple PM domains. 2532 * A single cell is used as an index into an array of PM domains specified in 2533 * the genpd_onecell_data struct when registering the provider. 2534 */ 2535 static struct generic_pm_domain *genpd_xlate_onecell( 2536 const struct of_phandle_args *genpdspec, 2537 void *data) 2538 { 2539 struct genpd_onecell_data *genpd_data = data; 2540 unsigned int idx = genpdspec->args[0]; 2541 2542 if (genpdspec->args_count != 1) 2543 return ERR_PTR(-EINVAL); 2544 2545 if (idx >= genpd_data->num_domains) { 2546 pr_err("%s: invalid domain index %u\n", __func__, idx); 2547 return ERR_PTR(-EINVAL); 2548 } 2549 2550 if (!genpd_data->domains[idx]) 2551 return ERR_PTR(-ENOENT); 2552 2553 return genpd_data->domains[idx]; 2554 } 2555 2556 /** 2557 * genpd_add_provider() - Register a PM domain provider for a node 2558 * @np: Device node pointer associated with the PM domain provider. 2559 * @xlate: Callback for decoding PM domain from phandle arguments. 2560 * @data: Context pointer for @xlate callback. 2561 */ 2562 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2563 void *data) 2564 { 2565 struct of_genpd_provider *cp; 2566 2567 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2568 if (!cp) 2569 return -ENOMEM; 2570 2571 cp->node = of_node_get(np); 2572 cp->data = data; 2573 cp->xlate = xlate; 2574 fwnode_dev_initialized(of_fwnode_handle(np), true); 2575 2576 mutex_lock(&of_genpd_mutex); 2577 list_add(&cp->link, &of_genpd_providers); 2578 mutex_unlock(&of_genpd_mutex); 2579 pr_debug("Added domain provider from %pOF\n", np); 2580 2581 return 0; 2582 } 2583 2584 static bool genpd_present(const struct generic_pm_domain *genpd) 2585 { 2586 bool ret = false; 2587 const struct generic_pm_domain *gpd; 2588 2589 mutex_lock(&gpd_list_lock); 2590 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2591 if (gpd == genpd) { 2592 ret = true; 2593 break; 2594 } 2595 } 2596 mutex_unlock(&gpd_list_lock); 2597 2598 return ret; 2599 } 2600 2601 /** 2602 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2603 * @np: Device node pointer associated with the PM domain provider. 2604 * @genpd: Pointer to PM domain associated with the PM domain provider. 2605 */ 2606 int of_genpd_add_provider_simple(struct device_node *np, 2607 struct generic_pm_domain *genpd) 2608 { 2609 int ret; 2610 2611 if (!np || !genpd) 2612 return -EINVAL; 2613 2614 if (!genpd_bus_registered) 2615 return -ENODEV; 2616 2617 if (!genpd_present(genpd)) 2618 return -EINVAL; 2619 2620 genpd->dev.of_node = np; 2621 2622 ret = device_add(&genpd->dev); 2623 if (ret) 2624 return ret; 2625 2626 /* Parse genpd OPP table */ 2627 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2628 ret = dev_pm_opp_of_add_table(&genpd->dev); 2629 if (ret) { 2630 dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2631 goto err_del; 2632 } 2633 2634 /* 2635 * Save table for faster processing while setting performance 2636 * state. 2637 */ 2638 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2639 WARN_ON(IS_ERR(genpd->opp_table)); 2640 } 2641 2642 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2643 if (ret) 2644 goto err_opp; 2645 2646 genpd->provider = &np->fwnode; 2647 genpd->has_provider = true; 2648 2649 return 0; 2650 2651 err_opp: 2652 if (genpd->opp_table) { 2653 dev_pm_opp_put_opp_table(genpd->opp_table); 2654 dev_pm_opp_of_remove_table(&genpd->dev); 2655 } 2656 err_del: 2657 device_del(&genpd->dev); 2658 return ret; 2659 } 2660 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2661 2662 /** 2663 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2664 * @np: Device node pointer associated with the PM domain provider. 2665 * @data: Pointer to the data associated with the PM domain provider. 2666 */ 2667 int of_genpd_add_provider_onecell(struct device_node *np, 2668 struct genpd_onecell_data *data) 2669 { 2670 struct generic_pm_domain *genpd; 2671 unsigned int i; 2672 int ret = -EINVAL; 2673 2674 if (!np || !data) 2675 return -EINVAL; 2676 2677 if (!genpd_bus_registered) 2678 return -ENODEV; 2679 2680 if (!data->xlate) 2681 data->xlate = genpd_xlate_onecell; 2682 2683 for (i = 0; i < data->num_domains; i++) { 2684 genpd = data->domains[i]; 2685 2686 if (!genpd) 2687 continue; 2688 if (!genpd_present(genpd)) 2689 goto error; 2690 2691 genpd->dev.of_node = np; 2692 2693 ret = device_add(&genpd->dev); 2694 if (ret) 2695 goto error; 2696 2697 /* Parse genpd OPP table */ 2698 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2699 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2700 if (ret) { 2701 dev_err_probe(&genpd->dev, ret, 2702 "Failed to add OPP table for index %d\n", i); 2703 device_del(&genpd->dev); 2704 goto error; 2705 } 2706 2707 /* 2708 * Save table for faster processing while setting 2709 * performance state. 2710 */ 2711 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2712 WARN_ON(IS_ERR(genpd->opp_table)); 2713 } 2714 2715 genpd->provider = &np->fwnode; 2716 genpd->has_provider = true; 2717 } 2718 2719 ret = genpd_add_provider(np, data->xlate, data); 2720 if (ret < 0) 2721 goto error; 2722 2723 return 0; 2724 2725 error: 2726 while (i--) { 2727 genpd = data->domains[i]; 2728 2729 if (!genpd) 2730 continue; 2731 2732 genpd->provider = NULL; 2733 genpd->has_provider = false; 2734 2735 if (genpd->opp_table) { 2736 dev_pm_opp_put_opp_table(genpd->opp_table); 2737 dev_pm_opp_of_remove_table(&genpd->dev); 2738 } 2739 2740 device_del(&genpd->dev); 2741 } 2742 2743 return ret; 2744 } 2745 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2746 2747 /** 2748 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2749 * @np: Device node pointer associated with the PM domain provider 2750 */ 2751 void of_genpd_del_provider(struct device_node *np) 2752 { 2753 struct of_genpd_provider *cp, *tmp; 2754 struct generic_pm_domain *gpd; 2755 2756 mutex_lock(&gpd_list_lock); 2757 mutex_lock(&of_genpd_mutex); 2758 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2759 if (cp->node == np) { 2760 /* 2761 * For each PM domain associated with the 2762 * provider, set the 'has_provider' to false 2763 * so that the PM domain can be safely removed. 2764 */ 2765 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2766 if (gpd->provider == of_fwnode_handle(np)) { 2767 gpd->has_provider = false; 2768 2769 if (gpd->opp_table) { 2770 dev_pm_opp_put_opp_table(gpd->opp_table); 2771 dev_pm_opp_of_remove_table(&gpd->dev); 2772 } 2773 2774 device_del(&gpd->dev); 2775 } 2776 } 2777 2778 fwnode_dev_initialized(of_fwnode_handle(cp->node), false); 2779 list_del(&cp->link); 2780 of_node_put(cp->node); 2781 kfree(cp); 2782 break; 2783 } 2784 } 2785 mutex_unlock(&of_genpd_mutex); 2786 mutex_unlock(&gpd_list_lock); 2787 } 2788 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2789 2790 /** 2791 * genpd_get_from_provider() - Look-up PM domain 2792 * @genpdspec: OF phandle args to use for look-up 2793 * 2794 * Looks for a PM domain provider under the node specified by @genpdspec and if 2795 * found, uses xlate function of the provider to map phandle args to a PM 2796 * domain. 2797 * 2798 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2799 * on failure. 2800 */ 2801 static struct generic_pm_domain *genpd_get_from_provider( 2802 const struct of_phandle_args *genpdspec) 2803 { 2804 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2805 struct of_genpd_provider *provider; 2806 2807 if (!genpdspec) 2808 return ERR_PTR(-EINVAL); 2809 2810 mutex_lock(&of_genpd_mutex); 2811 2812 /* Check if we have such a provider in our array */ 2813 list_for_each_entry(provider, &of_genpd_providers, link) { 2814 if (provider->node == genpdspec->np) 2815 genpd = provider->xlate(genpdspec, provider->data); 2816 if (!IS_ERR(genpd)) 2817 break; 2818 } 2819 2820 mutex_unlock(&of_genpd_mutex); 2821 2822 return genpd; 2823 } 2824 2825 /** 2826 * of_genpd_add_device() - Add a device to an I/O PM domain 2827 * @genpdspec: OF phandle args to use for look-up PM domain 2828 * @dev: Device to be added. 2829 * 2830 * Looks-up an I/O PM domain based upon phandle args provided and adds 2831 * the device to the PM domain. Returns a negative error code on failure. 2832 */ 2833 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev) 2834 { 2835 struct generic_pm_domain *genpd; 2836 int ret; 2837 2838 if (!dev) 2839 return -EINVAL; 2840 2841 mutex_lock(&gpd_list_lock); 2842 2843 genpd = genpd_get_from_provider(genpdspec); 2844 if (IS_ERR(genpd)) { 2845 ret = PTR_ERR(genpd); 2846 goto out; 2847 } 2848 2849 ret = genpd_add_device(genpd, dev, dev); 2850 2851 out: 2852 mutex_unlock(&gpd_list_lock); 2853 2854 return ret; 2855 } 2856 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2857 2858 /** 2859 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2860 * @parent_spec: OF phandle args to use for parent PM domain look-up 2861 * @subdomain_spec: OF phandle args to use for subdomain look-up 2862 * 2863 * Looks-up a parent PM domain and subdomain based upon phandle args 2864 * provided and adds the subdomain to the parent PM domain. Returns a 2865 * negative error code on failure. 2866 */ 2867 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, 2868 const struct of_phandle_args *subdomain_spec) 2869 { 2870 struct generic_pm_domain *parent, *subdomain; 2871 int ret; 2872 2873 mutex_lock(&gpd_list_lock); 2874 2875 parent = genpd_get_from_provider(parent_spec); 2876 if (IS_ERR(parent)) { 2877 ret = PTR_ERR(parent); 2878 goto out; 2879 } 2880 2881 subdomain = genpd_get_from_provider(subdomain_spec); 2882 if (IS_ERR(subdomain)) { 2883 ret = PTR_ERR(subdomain); 2884 goto out; 2885 } 2886 2887 ret = genpd_add_subdomain(parent, subdomain); 2888 2889 out: 2890 mutex_unlock(&gpd_list_lock); 2891 2892 return ret == -ENOENT ? -EPROBE_DEFER : ret; 2893 } 2894 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2895 2896 /** 2897 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2898 * @parent_spec: OF phandle args to use for parent PM domain look-up 2899 * @subdomain_spec: OF phandle args to use for subdomain look-up 2900 * 2901 * Looks-up a parent PM domain and subdomain based upon phandle args 2902 * provided and removes the subdomain from the parent PM domain. Returns a 2903 * negative error code on failure. 2904 */ 2905 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, 2906 const struct of_phandle_args *subdomain_spec) 2907 { 2908 struct generic_pm_domain *parent, *subdomain; 2909 int ret; 2910 2911 mutex_lock(&gpd_list_lock); 2912 2913 parent = genpd_get_from_provider(parent_spec); 2914 if (IS_ERR(parent)) { 2915 ret = PTR_ERR(parent); 2916 goto out; 2917 } 2918 2919 subdomain = genpd_get_from_provider(subdomain_spec); 2920 if (IS_ERR(subdomain)) { 2921 ret = PTR_ERR(subdomain); 2922 goto out; 2923 } 2924 2925 ret = pm_genpd_remove_subdomain(parent, subdomain); 2926 2927 out: 2928 mutex_unlock(&gpd_list_lock); 2929 2930 return ret; 2931 } 2932 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2933 2934 /** 2935 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2936 * @np: Pointer to device node associated with provider 2937 * 2938 * Find the last PM domain that was added by a particular provider and 2939 * remove this PM domain from the list of PM domains. The provider is 2940 * identified by the 'provider' device structure that is passed. The PM 2941 * domain will only be removed, if the provider associated with domain 2942 * has been removed. 2943 * 2944 * Returns a valid pointer to struct generic_pm_domain on success or 2945 * ERR_PTR() on failure. 2946 */ 2947 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2948 { 2949 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2950 int ret; 2951 2952 if (IS_ERR_OR_NULL(np)) 2953 return ERR_PTR(-EINVAL); 2954 2955 mutex_lock(&gpd_list_lock); 2956 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2957 if (gpd->provider == of_fwnode_handle(np)) { 2958 ret = genpd_remove(gpd); 2959 genpd = ret ? ERR_PTR(ret) : gpd; 2960 break; 2961 } 2962 } 2963 mutex_unlock(&gpd_list_lock); 2964 2965 return genpd; 2966 } 2967 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2968 2969 static void genpd_release_dev(struct device *dev) 2970 { 2971 of_node_put(dev->of_node); 2972 kfree(dev); 2973 } 2974 2975 static const struct bus_type genpd_bus_type = { 2976 .name = "genpd", 2977 }; 2978 2979 /** 2980 * genpd_dev_pm_detach - Detach a device from its PM domain. 2981 * @dev: Device to detach. 2982 * @power_off: Currently not used 2983 * 2984 * Try to locate a corresponding generic PM domain, which the device was 2985 * attached to previously. If such is found, the device is detached from it. 2986 */ 2987 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2988 { 2989 struct generic_pm_domain *pd; 2990 unsigned int i; 2991 int ret = 0; 2992 2993 pd = dev_to_genpd(dev); 2994 if (IS_ERR(pd)) 2995 return; 2996 2997 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2998 2999 /* Drop the default performance state */ 3000 if (dev_gpd_data(dev)->default_pstate) { 3001 dev_pm_genpd_set_performance_state(dev, 0); 3002 dev_gpd_data(dev)->default_pstate = 0; 3003 } 3004 3005 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 3006 ret = genpd_remove_device(pd, dev); 3007 if (ret != -EAGAIN) 3008 break; 3009 3010 mdelay(i); 3011 cond_resched(); 3012 } 3013 3014 if (ret < 0) { 3015 dev_err(dev, "failed to remove from PM domain %s: %d", 3016 pd->name, ret); 3017 return; 3018 } 3019 3020 /* Check if PM domain can be powered off after removing this device. */ 3021 genpd_queue_power_off_work(pd); 3022 3023 /* Unregister the device if it was created by genpd. */ 3024 if (dev->bus == &genpd_bus_type) 3025 device_unregister(dev); 3026 } 3027 3028 static void genpd_dev_pm_sync(struct device *dev) 3029 { 3030 struct generic_pm_domain *pd; 3031 3032 pd = dev_to_genpd(dev); 3033 if (IS_ERR(pd)) 3034 return; 3035 3036 genpd_queue_power_off_work(pd); 3037 } 3038 3039 static int genpd_set_required_opp_dev(struct device *dev, 3040 struct device *base_dev) 3041 { 3042 struct dev_pm_opp_config config = { 3043 .required_dev = dev, 3044 }; 3045 int ret; 3046 3047 /* Limit support to non-providers for now. */ 3048 if (of_property_present(base_dev->of_node, "#power-domain-cells")) 3049 return 0; 3050 3051 if (!dev_pm_opp_of_has_required_opp(base_dev)) 3052 return 0; 3053 3054 ret = dev_pm_opp_set_config(base_dev, &config); 3055 if (ret < 0) 3056 return ret; 3057 3058 dev_gpd_data(dev)->opp_token = ret; 3059 return 0; 3060 } 3061 3062 static int genpd_set_required_opp(struct device *dev, unsigned int index) 3063 { 3064 int ret, pstate; 3065 3066 /* Set the default performance state */ 3067 pstate = of_get_required_opp_performance_state(dev->of_node, index); 3068 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 3069 ret = pstate; 3070 goto err; 3071 } else if (pstate > 0) { 3072 ret = dev_pm_genpd_set_performance_state(dev, pstate); 3073 if (ret) 3074 goto err; 3075 dev_gpd_data(dev)->default_pstate = pstate; 3076 } 3077 3078 return 0; 3079 err: 3080 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 3081 dev_to_genpd(dev)->name, ret); 3082 return ret; 3083 } 3084 3085 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 3086 unsigned int index, unsigned int num_domains, 3087 bool power_on) 3088 { 3089 struct of_phandle_args pd_args; 3090 struct generic_pm_domain *pd; 3091 int ret; 3092 3093 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 3094 "#power-domain-cells", index, &pd_args); 3095 if (ret < 0) 3096 return ret; 3097 3098 mutex_lock(&gpd_list_lock); 3099 pd = genpd_get_from_provider(&pd_args); 3100 of_node_put(pd_args.np); 3101 if (IS_ERR(pd)) { 3102 mutex_unlock(&gpd_list_lock); 3103 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 3104 __func__, PTR_ERR(pd)); 3105 return driver_deferred_probe_check_state(base_dev); 3106 } 3107 3108 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 3109 3110 ret = genpd_add_device(pd, dev, base_dev); 3111 mutex_unlock(&gpd_list_lock); 3112 3113 if (ret < 0) 3114 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 3115 3116 dev->pm_domain->detach = genpd_dev_pm_detach; 3117 dev->pm_domain->sync = genpd_dev_pm_sync; 3118 3119 /* 3120 * For a single PM domain the index of the required OPP must be zero, so 3121 * let's try to assign a required dev in that case. In the multiple PM 3122 * domains case, we need platform code to specify the index. 3123 */ 3124 if (num_domains == 1) { 3125 ret = genpd_set_required_opp_dev(dev, base_dev); 3126 if (ret) 3127 goto err; 3128 } 3129 3130 ret = genpd_set_required_opp(dev, index); 3131 if (ret) 3132 goto err; 3133 3134 if (power_on) { 3135 genpd_lock(pd); 3136 ret = genpd_power_on(pd, 0); 3137 genpd_unlock(pd); 3138 } 3139 3140 if (ret) { 3141 /* Drop the default performance state */ 3142 if (dev_gpd_data(dev)->default_pstate) { 3143 dev_pm_genpd_set_performance_state(dev, 0); 3144 dev_gpd_data(dev)->default_pstate = 0; 3145 } 3146 3147 genpd_remove_device(pd, dev); 3148 return -EPROBE_DEFER; 3149 } 3150 3151 return 1; 3152 3153 err: 3154 genpd_remove_device(pd, dev); 3155 return ret; 3156 } 3157 3158 /** 3159 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 3160 * @dev: Device to attach. 3161 * 3162 * Parse device's OF node to find a PM domain specifier. If such is found, 3163 * attaches the device to retrieved pm_domain ops. 3164 * 3165 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 3166 * PM domain or when multiple power-domains exists for it, else a negative error 3167 * code. Note that if a power-domain exists for the device, but it cannot be 3168 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 3169 * not probed and to re-try again later. 3170 */ 3171 int genpd_dev_pm_attach(struct device *dev) 3172 { 3173 if (!dev->of_node) 3174 return 0; 3175 3176 /* 3177 * Devices with multiple PM domains must be attached separately, as we 3178 * can only attach one PM domain per device. 3179 */ 3180 if (of_count_phandle_with_args(dev->of_node, "power-domains", 3181 "#power-domain-cells") != 1) 3182 return 0; 3183 3184 return __genpd_dev_pm_attach(dev, dev, 0, 1, true); 3185 } 3186 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 3187 3188 /** 3189 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 3190 * @dev: The device used to lookup the PM domain. 3191 * @index: The index of the PM domain. 3192 * 3193 * Parse device's OF node to find a PM domain specifier at the provided @index. 3194 * If such is found, creates a virtual device and attaches it to the retrieved 3195 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 3196 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 3197 * 3198 * Returns the created virtual device if successfully attached PM domain, NULL 3199 * when the device don't need a PM domain, else an ERR_PTR() in case of 3200 * failures. If a power-domain exists for the device, but cannot be found or 3201 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 3202 * is not probed and to re-try again later. 3203 */ 3204 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 3205 unsigned int index) 3206 { 3207 struct device *virt_dev; 3208 int num_domains; 3209 int ret; 3210 3211 if (!dev->of_node) 3212 return NULL; 3213 3214 /* Verify that the index is within a valid range. */ 3215 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 3216 "#power-domain-cells"); 3217 if (num_domains < 0 || index >= num_domains) 3218 return NULL; 3219 3220 if (!genpd_bus_registered) 3221 return ERR_PTR(-ENODEV); 3222 3223 /* Allocate and register device on the genpd bus. */ 3224 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 3225 if (!virt_dev) 3226 return ERR_PTR(-ENOMEM); 3227 3228 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 3229 virt_dev->bus = &genpd_bus_type; 3230 virt_dev->release = genpd_release_dev; 3231 virt_dev->of_node = of_node_get(dev->of_node); 3232 3233 ret = device_register(virt_dev); 3234 if (ret) { 3235 put_device(virt_dev); 3236 return ERR_PTR(ret); 3237 } 3238 3239 /* Try to attach the device to the PM domain at the specified index. */ 3240 ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false); 3241 if (ret < 1) { 3242 device_unregister(virt_dev); 3243 return ret ? ERR_PTR(ret) : NULL; 3244 } 3245 3246 pm_runtime_enable(virt_dev); 3247 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 3248 3249 return virt_dev; 3250 } 3251 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 3252 3253 /** 3254 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 3255 * @dev: The device used to lookup the PM domain. 3256 * @name: The name of the PM domain. 3257 * 3258 * Parse device's OF node to find a PM domain specifier using the 3259 * power-domain-names DT property. For further description see 3260 * genpd_dev_pm_attach_by_id(). 3261 */ 3262 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 3263 { 3264 int index; 3265 3266 if (!dev->of_node) 3267 return NULL; 3268 3269 index = of_property_match_string(dev->of_node, "power-domain-names", 3270 name); 3271 if (index < 0) 3272 return NULL; 3273 3274 return genpd_dev_pm_attach_by_id(dev, index); 3275 } 3276 3277 static const struct of_device_id idle_state_match[] = { 3278 { .compatible = "domain-idle-state", }, 3279 { } 3280 }; 3281 3282 static int genpd_parse_state(struct genpd_power_state *genpd_state, 3283 struct device_node *state_node) 3284 { 3285 int err; 3286 u32 residency; 3287 u32 entry_latency, exit_latency; 3288 3289 err = of_property_read_u32(state_node, "entry-latency-us", 3290 &entry_latency); 3291 if (err) { 3292 pr_debug(" * %pOF missing entry-latency-us property\n", 3293 state_node); 3294 return -EINVAL; 3295 } 3296 3297 err = of_property_read_u32(state_node, "exit-latency-us", 3298 &exit_latency); 3299 if (err) { 3300 pr_debug(" * %pOF missing exit-latency-us property\n", 3301 state_node); 3302 return -EINVAL; 3303 } 3304 3305 err = of_property_read_u32(state_node, "min-residency-us", &residency); 3306 if (!err) 3307 genpd_state->residency_ns = 1000LL * residency; 3308 3309 of_property_read_string(state_node, "idle-state-name", &genpd_state->name); 3310 3311 genpd_state->power_on_latency_ns = 1000LL * exit_latency; 3312 genpd_state->power_off_latency_ns = 1000LL * entry_latency; 3313 genpd_state->fwnode = of_fwnode_handle(state_node); 3314 3315 return 0; 3316 } 3317 3318 static int genpd_iterate_idle_states(struct device_node *dn, 3319 struct genpd_power_state *states) 3320 { 3321 int ret; 3322 struct of_phandle_iterator it; 3323 struct device_node *np; 3324 int i = 0; 3325 3326 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 3327 if (ret <= 0) 3328 return ret == -ENOENT ? 0 : ret; 3329 3330 /* Loop over the phandles until all the requested entry is found */ 3331 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 3332 np = it.node; 3333 if (!of_match_node(idle_state_match, np)) 3334 continue; 3335 3336 if (!of_device_is_available(np)) 3337 continue; 3338 3339 if (states) { 3340 ret = genpd_parse_state(&states[i], np); 3341 if (ret) { 3342 pr_err("Parsing idle state node %pOF failed with err %d\n", 3343 np, ret); 3344 of_node_put(np); 3345 return ret; 3346 } 3347 } 3348 i++; 3349 } 3350 3351 return i; 3352 } 3353 3354 /** 3355 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 3356 * 3357 * @dn: The genpd device node 3358 * @states: The pointer to which the state array will be saved. 3359 * @n: The count of elements in the array returned from this function. 3360 * 3361 * Returns the device states parsed from the OF node. The memory for the states 3362 * is allocated by this function and is the responsibility of the caller to 3363 * free the memory after use. If any or zero compatible domain idle states is 3364 * found it returns 0 and in case of errors, a negative error code is returned. 3365 */ 3366 int of_genpd_parse_idle_states(struct device_node *dn, 3367 struct genpd_power_state **states, int *n) 3368 { 3369 struct genpd_power_state *st; 3370 int ret; 3371 3372 ret = genpd_iterate_idle_states(dn, NULL); 3373 if (ret < 0) 3374 return ret; 3375 3376 if (!ret) { 3377 *states = NULL; 3378 *n = 0; 3379 return 0; 3380 } 3381 3382 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 3383 if (!st) 3384 return -ENOMEM; 3385 3386 ret = genpd_iterate_idle_states(dn, st); 3387 if (ret <= 0) { 3388 kfree(st); 3389 return ret < 0 ? ret : -EINVAL; 3390 } 3391 3392 *states = st; 3393 *n = ret; 3394 3395 return 0; 3396 } 3397 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 3398 3399 /** 3400 * of_genpd_sync_state() - A common sync_state function for genpd providers 3401 * @np: The device node the genpd provider is associated with. 3402 * 3403 * The @np that corresponds to a genpd provider may provide one or multiple 3404 * genpds. This function makes use @np to find the genpds that belongs to the 3405 * provider. For each genpd we try a power-off. 3406 */ 3407 void of_genpd_sync_state(struct device_node *np) 3408 { 3409 struct generic_pm_domain *genpd; 3410 3411 if (!np) 3412 return; 3413 3414 mutex_lock(&gpd_list_lock); 3415 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3416 if (genpd->provider == of_fwnode_handle(np)) { 3417 genpd_lock(genpd); 3418 genpd_power_off(genpd, false, 0); 3419 genpd_unlock(genpd); 3420 } 3421 } 3422 mutex_unlock(&gpd_list_lock); 3423 } 3424 EXPORT_SYMBOL_GPL(of_genpd_sync_state); 3425 3426 static int genpd_provider_probe(struct device *dev) 3427 { 3428 return 0; 3429 } 3430 3431 static void genpd_provider_sync_state(struct device *dev) 3432 { 3433 } 3434 3435 static struct device_driver genpd_provider_drv = { 3436 .name = "genpd_provider", 3437 .bus = &genpd_provider_bus_type, 3438 .probe = genpd_provider_probe, 3439 .sync_state = genpd_provider_sync_state, 3440 .suppress_bind_attrs = true, 3441 }; 3442 3443 static int __init genpd_bus_init(void) 3444 { 3445 int ret; 3446 3447 ret = device_register(&genpd_provider_bus); 3448 if (ret) { 3449 put_device(&genpd_provider_bus); 3450 return ret; 3451 } 3452 3453 ret = bus_register(&genpd_provider_bus_type); 3454 if (ret) 3455 goto err_dev; 3456 3457 ret = bus_register(&genpd_bus_type); 3458 if (ret) 3459 goto err_prov_bus; 3460 3461 ret = driver_register(&genpd_provider_drv); 3462 if (ret) 3463 goto err_bus; 3464 3465 genpd_bus_registered = true; 3466 return 0; 3467 3468 err_bus: 3469 bus_unregister(&genpd_bus_type); 3470 err_prov_bus: 3471 bus_unregister(&genpd_provider_bus_type); 3472 err_dev: 3473 device_unregister(&genpd_provider_bus); 3474 return ret; 3475 } 3476 core_initcall(genpd_bus_init); 3477 3478 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 3479 3480 3481 /*** debugfs support ***/ 3482 3483 #ifdef CONFIG_DEBUG_FS 3484 /* 3485 * TODO: This function is a slightly modified version of rtpm_status_show 3486 * from sysfs.c, so generalize it. 3487 */ 3488 static void rtpm_status_str(struct seq_file *s, struct device *dev) 3489 { 3490 static const char * const status_lookup[] = { 3491 [RPM_ACTIVE] = "active", 3492 [RPM_RESUMING] = "resuming", 3493 [RPM_SUSPENDED] = "suspended", 3494 [RPM_SUSPENDING] = "suspending" 3495 }; 3496 const char *p = ""; 3497 3498 if (dev->power.runtime_error) 3499 p = "error"; 3500 else if (dev->power.disable_depth) 3501 p = "unsupported"; 3502 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3503 p = status_lookup[dev->power.runtime_status]; 3504 else 3505 WARN_ON(1); 3506 3507 seq_printf(s, "%-26s ", p); 3508 } 3509 3510 static void perf_status_str(struct seq_file *s, struct device *dev) 3511 { 3512 struct generic_pm_domain_data *gpd_data; 3513 3514 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3515 3516 seq_printf(s, "%-10u ", gpd_data->performance_state); 3517 } 3518 3519 static void mode_status_str(struct seq_file *s, struct device *dev) 3520 { 3521 struct generic_pm_domain_data *gpd_data; 3522 3523 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3524 3525 seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW"); 3526 } 3527 3528 static int genpd_summary_one(struct seq_file *s, 3529 struct generic_pm_domain *genpd) 3530 { 3531 static const char * const status_lookup[] = { 3532 [GENPD_STATE_ON] = "on", 3533 [GENPD_STATE_OFF] = "off" 3534 }; 3535 struct pm_domain_data *pm_data; 3536 struct gpd_link *link; 3537 char state[16]; 3538 int ret; 3539 3540 ret = genpd_lock_interruptible(genpd); 3541 if (ret) 3542 return -ERESTARTSYS; 3543 3544 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3545 goto exit; 3546 if (!genpd_status_on(genpd)) 3547 snprintf(state, sizeof(state), "%s-%u", 3548 status_lookup[genpd->status], genpd->state_idx); 3549 else 3550 snprintf(state, sizeof(state), "%s", 3551 status_lookup[genpd->status]); 3552 seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state); 3553 3554 /* 3555 * Modifications on the list require holding locks on both 3556 * parent and child, so we are safe. 3557 * Also the device name is immutable. 3558 */ 3559 list_for_each_entry(link, &genpd->parent_links, parent_node) { 3560 if (list_is_first(&link->parent_node, &genpd->parent_links)) 3561 seq_printf(s, "\n%48s", " "); 3562 seq_printf(s, "%s", link->child->name); 3563 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3564 seq_puts(s, ", "); 3565 } 3566 3567 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3568 seq_printf(s, "\n %-30s ", dev_name(pm_data->dev)); 3569 rtpm_status_str(s, pm_data->dev); 3570 perf_status_str(s, pm_data->dev); 3571 mode_status_str(s, pm_data->dev); 3572 } 3573 3574 seq_puts(s, "\n"); 3575 exit: 3576 genpd_unlock(genpd); 3577 3578 return 0; 3579 } 3580 3581 static int summary_show(struct seq_file *s, void *data) 3582 { 3583 struct generic_pm_domain *genpd; 3584 int ret = 0; 3585 3586 seq_puts(s, "domain status children performance\n"); 3587 seq_puts(s, " /device runtime status managed by\n"); 3588 seq_puts(s, "------------------------------------------------------------------------------\n"); 3589 3590 ret = mutex_lock_interruptible(&gpd_list_lock); 3591 if (ret) 3592 return -ERESTARTSYS; 3593 3594 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3595 ret = genpd_summary_one(s, genpd); 3596 if (ret) 3597 break; 3598 } 3599 mutex_unlock(&gpd_list_lock); 3600 3601 return ret; 3602 } 3603 3604 static int status_show(struct seq_file *s, void *data) 3605 { 3606 static const char * const status_lookup[] = { 3607 [GENPD_STATE_ON] = "on", 3608 [GENPD_STATE_OFF] = "off" 3609 }; 3610 3611 struct generic_pm_domain *genpd = s->private; 3612 int ret = 0; 3613 3614 ret = genpd_lock_interruptible(genpd); 3615 if (ret) 3616 return -ERESTARTSYS; 3617 3618 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3619 goto exit; 3620 3621 if (genpd->status == GENPD_STATE_OFF) 3622 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3623 genpd->state_idx); 3624 else 3625 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3626 exit: 3627 genpd_unlock(genpd); 3628 return ret; 3629 } 3630 3631 static int sub_domains_show(struct seq_file *s, void *data) 3632 { 3633 struct generic_pm_domain *genpd = s->private; 3634 struct gpd_link *link; 3635 int ret = 0; 3636 3637 ret = genpd_lock_interruptible(genpd); 3638 if (ret) 3639 return -ERESTARTSYS; 3640 3641 list_for_each_entry(link, &genpd->parent_links, parent_node) 3642 seq_printf(s, "%s\n", link->child->name); 3643 3644 genpd_unlock(genpd); 3645 return ret; 3646 } 3647 3648 static int idle_states_show(struct seq_file *s, void *data) 3649 { 3650 struct generic_pm_domain *genpd = s->private; 3651 u64 now, delta, idle_time = 0; 3652 unsigned int i; 3653 int ret = 0; 3654 3655 ret = genpd_lock_interruptible(genpd); 3656 if (ret) 3657 return -ERESTARTSYS; 3658 3659 seq_puts(s, "State Time Spent(ms) Usage Rejected Above Below\n"); 3660 3661 for (i = 0; i < genpd->state_count; i++) { 3662 struct genpd_power_state *state = &genpd->states[i]; 3663 char state_name[15]; 3664 3665 idle_time += state->idle_time; 3666 3667 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3668 now = ktime_get_mono_fast_ns(); 3669 if (now > genpd->accounting_time) { 3670 delta = now - genpd->accounting_time; 3671 idle_time += delta; 3672 } 3673 } 3674 3675 if (!state->name) 3676 snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i); 3677 3678 do_div(idle_time, NSEC_PER_MSEC); 3679 seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n", 3680 state->name ?: state_name, idle_time, 3681 state->usage, state->rejected, state->above, 3682 state->below); 3683 } 3684 3685 genpd_unlock(genpd); 3686 return ret; 3687 } 3688 3689 static int active_time_show(struct seq_file *s, void *data) 3690 { 3691 struct generic_pm_domain *genpd = s->private; 3692 u64 now, on_time, delta = 0; 3693 int ret = 0; 3694 3695 ret = genpd_lock_interruptible(genpd); 3696 if (ret) 3697 return -ERESTARTSYS; 3698 3699 if (genpd->status == GENPD_STATE_ON) { 3700 now = ktime_get_mono_fast_ns(); 3701 if (now > genpd->accounting_time) 3702 delta = now - genpd->accounting_time; 3703 } 3704 3705 on_time = genpd->on_time + delta; 3706 do_div(on_time, NSEC_PER_MSEC); 3707 seq_printf(s, "%llu ms\n", on_time); 3708 3709 genpd_unlock(genpd); 3710 return ret; 3711 } 3712 3713 static int total_idle_time_show(struct seq_file *s, void *data) 3714 { 3715 struct generic_pm_domain *genpd = s->private; 3716 u64 now, delta, total = 0; 3717 unsigned int i; 3718 int ret = 0; 3719 3720 ret = genpd_lock_interruptible(genpd); 3721 if (ret) 3722 return -ERESTARTSYS; 3723 3724 for (i = 0; i < genpd->state_count; i++) { 3725 total += genpd->states[i].idle_time; 3726 3727 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3728 now = ktime_get_mono_fast_ns(); 3729 if (now > genpd->accounting_time) { 3730 delta = now - genpd->accounting_time; 3731 total += delta; 3732 } 3733 } 3734 } 3735 3736 do_div(total, NSEC_PER_MSEC); 3737 seq_printf(s, "%llu ms\n", total); 3738 3739 genpd_unlock(genpd); 3740 return ret; 3741 } 3742 3743 3744 static int devices_show(struct seq_file *s, void *data) 3745 { 3746 struct generic_pm_domain *genpd = s->private; 3747 struct pm_domain_data *pm_data; 3748 int ret = 0; 3749 3750 ret = genpd_lock_interruptible(genpd); 3751 if (ret) 3752 return -ERESTARTSYS; 3753 3754 list_for_each_entry(pm_data, &genpd->dev_list, list_node) 3755 seq_printf(s, "%s\n", dev_name(pm_data->dev)); 3756 3757 genpd_unlock(genpd); 3758 return ret; 3759 } 3760 3761 static int perf_state_show(struct seq_file *s, void *data) 3762 { 3763 struct generic_pm_domain *genpd = s->private; 3764 3765 if (genpd_lock_interruptible(genpd)) 3766 return -ERESTARTSYS; 3767 3768 seq_printf(s, "%u\n", genpd->performance_state); 3769 3770 genpd_unlock(genpd); 3771 return 0; 3772 } 3773 3774 DEFINE_SHOW_ATTRIBUTE(summary); 3775 DEFINE_SHOW_ATTRIBUTE(status); 3776 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3777 DEFINE_SHOW_ATTRIBUTE(idle_states); 3778 DEFINE_SHOW_ATTRIBUTE(active_time); 3779 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3780 DEFINE_SHOW_ATTRIBUTE(devices); 3781 DEFINE_SHOW_ATTRIBUTE(perf_state); 3782 3783 static void genpd_debug_add(struct generic_pm_domain *genpd) 3784 { 3785 struct dentry *d; 3786 3787 if (!genpd_debugfs_dir) 3788 return; 3789 3790 d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir); 3791 3792 debugfs_create_file("current_state", 0444, 3793 d, genpd, &status_fops); 3794 debugfs_create_file("sub_domains", 0444, 3795 d, genpd, &sub_domains_fops); 3796 debugfs_create_file("idle_states", 0444, 3797 d, genpd, &idle_states_fops); 3798 debugfs_create_file("active_time", 0444, 3799 d, genpd, &active_time_fops); 3800 debugfs_create_file("total_idle_time", 0444, 3801 d, genpd, &total_idle_time_fops); 3802 debugfs_create_file("devices", 0444, 3803 d, genpd, &devices_fops); 3804 if (genpd->set_performance_state) 3805 debugfs_create_file("perf_state", 0444, 3806 d, genpd, &perf_state_fops); 3807 } 3808 3809 static int __init genpd_debug_init(void) 3810 { 3811 struct generic_pm_domain *genpd; 3812 3813 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3814 3815 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3816 NULL, &summary_fops); 3817 3818 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3819 genpd_debug_add(genpd); 3820 3821 return 0; 3822 } 3823 late_initcall(genpd_debug_init); 3824 3825 static void __exit genpd_debug_exit(void) 3826 { 3827 debugfs_remove_recursive(genpd_debugfs_dir); 3828 } 3829 __exitcall(genpd_debug_exit); 3830 #endif /* CONFIG_DEBUG_FS */ 3831