1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/io.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_opp.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_domain.h> 16 #include <linux/pm_qos.h> 17 #include <linux/pm_clock.h> 18 #include <linux/slab.h> 19 #include <linux/err.h> 20 #include <linux/sched.h> 21 #include <linux/suspend.h> 22 #include <linux/export.h> 23 #include <linux/cpu.h> 24 #include <linux/debugfs.h> 25 26 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 27 28 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 29 ({ \ 30 type (*__routine)(struct device *__d); \ 31 type __ret = (type)0; \ 32 \ 33 __routine = genpd->dev_ops.callback; \ 34 if (__routine) { \ 35 __ret = __routine(dev); \ 36 } \ 37 __ret; \ 38 }) 39 40 static LIST_HEAD(gpd_list); 41 static DEFINE_MUTEX(gpd_list_lock); 42 43 struct genpd_lock_ops { 44 void (*lock)(struct generic_pm_domain *genpd); 45 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 46 int (*lock_interruptible)(struct generic_pm_domain *genpd); 47 void (*unlock)(struct generic_pm_domain *genpd); 48 }; 49 50 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 51 { 52 mutex_lock(&genpd->mlock); 53 } 54 55 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 56 int depth) 57 { 58 mutex_lock_nested(&genpd->mlock, depth); 59 } 60 61 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 62 { 63 return mutex_lock_interruptible(&genpd->mlock); 64 } 65 66 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 67 { 68 return mutex_unlock(&genpd->mlock); 69 } 70 71 static const struct genpd_lock_ops genpd_mtx_ops = { 72 .lock = genpd_lock_mtx, 73 .lock_nested = genpd_lock_nested_mtx, 74 .lock_interruptible = genpd_lock_interruptible_mtx, 75 .unlock = genpd_unlock_mtx, 76 }; 77 78 static void genpd_lock_spin(struct generic_pm_domain *genpd) 79 __acquires(&genpd->slock) 80 { 81 unsigned long flags; 82 83 spin_lock_irqsave(&genpd->slock, flags); 84 genpd->lock_flags = flags; 85 } 86 87 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 88 int depth) 89 __acquires(&genpd->slock) 90 { 91 unsigned long flags; 92 93 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 94 genpd->lock_flags = flags; 95 } 96 97 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 98 __acquires(&genpd->slock) 99 { 100 unsigned long flags; 101 102 spin_lock_irqsave(&genpd->slock, flags); 103 genpd->lock_flags = flags; 104 return 0; 105 } 106 107 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 108 __releases(&genpd->slock) 109 { 110 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 111 } 112 113 static const struct genpd_lock_ops genpd_spin_ops = { 114 .lock = genpd_lock_spin, 115 .lock_nested = genpd_lock_nested_spin, 116 .lock_interruptible = genpd_lock_interruptible_spin, 117 .unlock = genpd_unlock_spin, 118 }; 119 120 #define genpd_lock(p) p->lock_ops->lock(p) 121 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 122 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 123 #define genpd_unlock(p) p->lock_ops->unlock(p) 124 125 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 126 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 127 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 128 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 129 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 130 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 131 #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) 132 133 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 134 const struct generic_pm_domain *genpd) 135 { 136 bool ret; 137 138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 139 140 /* 141 * Warn once if an IRQ safe device is attached to a domain, which 142 * callbacks are allowed to sleep. This indicates a suboptimal 143 * configuration for PM, but it doesn't matter for an always on domain. 144 */ 145 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) 146 return ret; 147 148 if (ret) 149 dev_warn_once(dev, "PM domain %s will not be powered off\n", 150 genpd->name); 151 152 return ret; 153 } 154 155 static int genpd_runtime_suspend(struct device *dev); 156 157 /* 158 * Get the generic PM domain for a particular struct device. 159 * This validates the struct device pointer, the PM domain pointer, 160 * and checks that the PM domain pointer is a real generic PM domain. 161 * Any failure results in NULL being returned. 162 */ 163 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 164 { 165 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 166 return NULL; 167 168 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 169 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 170 return pd_to_genpd(dev->pm_domain); 171 172 return NULL; 173 } 174 175 /* 176 * This should only be used where we are certain that the pm_domain 177 * attached to the device is a genpd domain. 178 */ 179 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 180 { 181 if (IS_ERR_OR_NULL(dev->pm_domain)) 182 return ERR_PTR(-EINVAL); 183 184 return pd_to_genpd(dev->pm_domain); 185 } 186 187 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 188 struct device *dev) 189 { 190 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 191 } 192 193 static int genpd_start_dev(const struct generic_pm_domain *genpd, 194 struct device *dev) 195 { 196 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 197 } 198 199 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 200 { 201 bool ret = false; 202 203 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 204 ret = !!atomic_dec_and_test(&genpd->sd_count); 205 206 return ret; 207 } 208 209 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 210 { 211 atomic_inc(&genpd->sd_count); 212 smp_mb__after_atomic(); 213 } 214 215 #ifdef CONFIG_DEBUG_FS 216 static struct dentry *genpd_debugfs_dir; 217 218 static void genpd_debug_add(struct generic_pm_domain *genpd); 219 220 static void genpd_debug_remove(struct generic_pm_domain *genpd) 221 { 222 if (!genpd_debugfs_dir) 223 return; 224 225 debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir); 226 } 227 228 static void genpd_update_accounting(struct generic_pm_domain *genpd) 229 { 230 u64 delta, now; 231 232 now = ktime_get_mono_fast_ns(); 233 if (now <= genpd->accounting_time) 234 return; 235 236 delta = now - genpd->accounting_time; 237 238 /* 239 * If genpd->status is active, it means we are just 240 * out of off and so update the idle time and vice 241 * versa. 242 */ 243 if (genpd->status == GENPD_STATE_ON) 244 genpd->states[genpd->state_idx].idle_time += delta; 245 else 246 genpd->on_time += delta; 247 248 genpd->accounting_time = now; 249 } 250 #else 251 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 252 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 253 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 254 #endif 255 256 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 257 unsigned int state) 258 { 259 struct generic_pm_domain_data *pd_data; 260 struct pm_domain_data *pdd; 261 struct gpd_link *link; 262 263 /* New requested state is same as Max requested state */ 264 if (state == genpd->performance_state) 265 return state; 266 267 /* New requested state is higher than Max requested state */ 268 if (state > genpd->performance_state) 269 return state; 270 271 /* Traverse all devices within the domain */ 272 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 273 pd_data = to_gpd_data(pdd); 274 275 if (pd_data->performance_state > state) 276 state = pd_data->performance_state; 277 } 278 279 /* 280 * Traverse all sub-domains within the domain. This can be 281 * done without any additional locking as the link->performance_state 282 * field is protected by the parent genpd->lock, which is already taken. 283 * 284 * Also note that link->performance_state (subdomain's performance state 285 * requirement to parent domain) is different from 286 * link->child->performance_state (current performance state requirement 287 * of the devices/sub-domains of the subdomain) and so can have a 288 * different value. 289 * 290 * Note that we also take vote from powered-off sub-domains into account 291 * as the same is done for devices right now. 292 */ 293 list_for_each_entry(link, &genpd->parent_links, parent_node) { 294 if (link->performance_state > state) 295 state = link->performance_state; 296 } 297 298 return state; 299 } 300 301 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 302 struct generic_pm_domain *parent, 303 unsigned int pstate) 304 { 305 if (!parent->set_performance_state) 306 return pstate; 307 308 return dev_pm_opp_xlate_performance_state(genpd->opp_table, 309 parent->opp_table, 310 pstate); 311 } 312 313 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 314 unsigned int state, int depth) 315 { 316 struct generic_pm_domain *parent; 317 struct gpd_link *link; 318 int parent_state, ret; 319 320 if (state == genpd->performance_state) 321 return 0; 322 323 /* Propagate to parents of genpd */ 324 list_for_each_entry(link, &genpd->child_links, child_node) { 325 parent = link->parent; 326 327 /* Find parent's performance state */ 328 ret = genpd_xlate_performance_state(genpd, parent, state); 329 if (unlikely(ret < 0)) 330 goto err; 331 332 parent_state = ret; 333 334 genpd_lock_nested(parent, depth + 1); 335 336 link->prev_performance_state = link->performance_state; 337 link->performance_state = parent_state; 338 parent_state = _genpd_reeval_performance_state(parent, 339 parent_state); 340 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 341 if (ret) 342 link->performance_state = link->prev_performance_state; 343 344 genpd_unlock(parent); 345 346 if (ret) 347 goto err; 348 } 349 350 if (genpd->set_performance_state) { 351 ret = genpd->set_performance_state(genpd, state); 352 if (ret) 353 goto err; 354 } 355 356 genpd->performance_state = state; 357 return 0; 358 359 err: 360 /* Encountered an error, lets rollback */ 361 list_for_each_entry_continue_reverse(link, &genpd->child_links, 362 child_node) { 363 parent = link->parent; 364 365 genpd_lock_nested(parent, depth + 1); 366 367 parent_state = link->prev_performance_state; 368 link->performance_state = parent_state; 369 370 parent_state = _genpd_reeval_performance_state(parent, 371 parent_state); 372 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 373 pr_err("%s: Failed to roll back to %d performance state\n", 374 parent->name, parent_state); 375 } 376 377 genpd_unlock(parent); 378 } 379 380 return ret; 381 } 382 383 static int genpd_set_performance_state(struct device *dev, unsigned int state) 384 { 385 struct generic_pm_domain *genpd = dev_to_genpd(dev); 386 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 387 unsigned int prev_state; 388 int ret; 389 390 prev_state = gpd_data->performance_state; 391 if (prev_state == state) 392 return 0; 393 394 gpd_data->performance_state = state; 395 state = _genpd_reeval_performance_state(genpd, state); 396 397 ret = _genpd_set_performance_state(genpd, state, 0); 398 if (ret) 399 gpd_data->performance_state = prev_state; 400 401 return ret; 402 } 403 404 static int genpd_drop_performance_state(struct device *dev) 405 { 406 unsigned int prev_state = dev_gpd_data(dev)->performance_state; 407 408 if (!genpd_set_performance_state(dev, 0)) 409 return prev_state; 410 411 return 0; 412 } 413 414 static void genpd_restore_performance_state(struct device *dev, 415 unsigned int state) 416 { 417 if (state) 418 genpd_set_performance_state(dev, state); 419 } 420 421 static int genpd_dev_pm_set_performance_state(struct device *dev, 422 unsigned int state) 423 { 424 struct generic_pm_domain *genpd = dev_to_genpd(dev); 425 int ret = 0; 426 427 genpd_lock(genpd); 428 if (pm_runtime_suspended(dev)) { 429 dev_gpd_data(dev)->rpm_pstate = state; 430 } else { 431 ret = genpd_set_performance_state(dev, state); 432 if (!ret) 433 dev_gpd_data(dev)->rpm_pstate = 0; 434 } 435 genpd_unlock(genpd); 436 437 return ret; 438 } 439 440 /** 441 * dev_pm_genpd_set_performance_state- Set performance state of device's power 442 * domain. 443 * 444 * @dev: Device for which the performance-state needs to be set. 445 * @state: Target performance state of the device. This can be set as 0 when the 446 * device doesn't have any performance state constraints left (And so 447 * the device wouldn't participate anymore to find the target 448 * performance state of the genpd). 449 * 450 * It is assumed that the users guarantee that the genpd wouldn't be detached 451 * while this routine is getting called. 452 * 453 * Returns 0 on success and negative error values on failures. 454 */ 455 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 456 { 457 struct generic_pm_domain *genpd; 458 459 genpd = dev_to_genpd_safe(dev); 460 if (!genpd) 461 return -ENODEV; 462 463 if (WARN_ON(!dev->power.subsys_data || 464 !dev->power.subsys_data->domain_data)) 465 return -EINVAL; 466 467 return genpd_dev_pm_set_performance_state(dev, state); 468 } 469 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 470 471 /** 472 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 473 * 474 * @dev: Device to handle 475 * @next: impending interrupt/wakeup for the device 476 * 477 * 478 * Allow devices to inform of the next wakeup. It's assumed that the users 479 * guarantee that the genpd wouldn't be detached while this routine is getting 480 * called. Additionally, it's also assumed that @dev isn't runtime suspended 481 * (RPM_SUSPENDED)." 482 * Although devices are expected to update the next_wakeup after the end of 483 * their usecase as well, it is possible the devices themselves may not know 484 * about that, so stale @next will be ignored when powering off the domain. 485 */ 486 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 487 { 488 struct generic_pm_domain *genpd; 489 struct gpd_timing_data *td; 490 491 genpd = dev_to_genpd_safe(dev); 492 if (!genpd) 493 return; 494 495 td = to_gpd_data(dev->power.subsys_data->domain_data)->td; 496 if (td) 497 td->next_wakeup = next; 498 } 499 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 500 501 /** 502 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd 503 * @dev: A device that is attached to the genpd. 504 * 505 * This routine should typically be called for a device, at the point of when a 506 * GENPD_NOTIFY_PRE_OFF notification has been sent for it. 507 * 508 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no 509 * valid value have been set. 510 */ 511 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev) 512 { 513 struct generic_pm_domain *genpd; 514 515 genpd = dev_to_genpd_safe(dev); 516 if (!genpd) 517 return KTIME_MAX; 518 519 if (genpd->gd) 520 return genpd->gd->next_hrtimer; 521 522 return KTIME_MAX; 523 } 524 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer); 525 526 /* 527 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous 528 * 529 * @dev: A device that is attached to the genpd. 530 * 531 * Allows a consumer of the genpd to notify the provider that the next power off 532 * should be synchronous. 533 * 534 * It is assumed that the users guarantee that the genpd wouldn't be detached 535 * while this routine is getting called. 536 */ 537 void dev_pm_genpd_synced_poweroff(struct device *dev) 538 { 539 struct generic_pm_domain *genpd; 540 541 genpd = dev_to_genpd_safe(dev); 542 if (!genpd) 543 return; 544 545 genpd_lock(genpd); 546 genpd->synced_poweroff = true; 547 genpd_unlock(genpd); 548 } 549 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff); 550 551 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 552 { 553 unsigned int state_idx = genpd->state_idx; 554 ktime_t time_start; 555 s64 elapsed_ns; 556 int ret; 557 558 /* Notify consumers that we are about to power on. */ 559 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 560 GENPD_NOTIFY_PRE_ON, 561 GENPD_NOTIFY_OFF, NULL); 562 ret = notifier_to_errno(ret); 563 if (ret) 564 return ret; 565 566 if (!genpd->power_on) 567 goto out; 568 569 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 570 if (!timed) { 571 ret = genpd->power_on(genpd); 572 if (ret) 573 goto err; 574 575 goto out; 576 } 577 578 time_start = ktime_get(); 579 ret = genpd->power_on(genpd); 580 if (ret) 581 goto err; 582 583 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 584 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 585 goto out; 586 587 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 588 genpd->gd->max_off_time_changed = true; 589 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 590 genpd->name, "on", elapsed_ns); 591 592 out: 593 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 594 genpd->synced_poweroff = false; 595 return 0; 596 err: 597 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 598 NULL); 599 return ret; 600 } 601 602 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 603 { 604 unsigned int state_idx = genpd->state_idx; 605 ktime_t time_start; 606 s64 elapsed_ns; 607 int ret; 608 609 /* Notify consumers that we are about to power off. */ 610 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 611 GENPD_NOTIFY_PRE_OFF, 612 GENPD_NOTIFY_ON, NULL); 613 ret = notifier_to_errno(ret); 614 if (ret) 615 return ret; 616 617 if (!genpd->power_off) 618 goto out; 619 620 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 621 if (!timed) { 622 ret = genpd->power_off(genpd); 623 if (ret) 624 goto busy; 625 626 goto out; 627 } 628 629 time_start = ktime_get(); 630 ret = genpd->power_off(genpd); 631 if (ret) 632 goto busy; 633 634 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 635 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 636 goto out; 637 638 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 639 genpd->gd->max_off_time_changed = true; 640 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 641 genpd->name, "off", elapsed_ns); 642 643 out: 644 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 645 NULL); 646 return 0; 647 busy: 648 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 649 return ret; 650 } 651 652 /** 653 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 654 * @genpd: PM domain to power off. 655 * 656 * Queue up the execution of genpd_power_off() unless it's already been done 657 * before. 658 */ 659 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 660 { 661 queue_work(pm_wq, &genpd->power_off_work); 662 } 663 664 /** 665 * genpd_power_off - Remove power from a given PM domain. 666 * @genpd: PM domain to power down. 667 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 668 * RPM status of the releated device is in an intermediate state, not yet turned 669 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 670 * be RPM_SUSPENDED, while it tries to power off the PM domain. 671 * @depth: nesting count for lockdep. 672 * 673 * If all of the @genpd's devices have been suspended and all of its subdomains 674 * have been powered down, remove power from @genpd. 675 */ 676 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 677 unsigned int depth) 678 { 679 struct pm_domain_data *pdd; 680 struct gpd_link *link; 681 unsigned int not_suspended = 0; 682 int ret; 683 684 /* 685 * Do not try to power off the domain in the following situations: 686 * (1) The domain is already in the "power off" state. 687 * (2) System suspend is in progress. 688 */ 689 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) 690 return 0; 691 692 /* 693 * Abort power off for the PM domain in the following situations: 694 * (1) The domain is configured as always on. 695 * (2) When the domain has a subdomain being powered on. 696 */ 697 if (genpd_is_always_on(genpd) || 698 genpd_is_rpm_always_on(genpd) || 699 atomic_read(&genpd->sd_count) > 0) 700 return -EBUSY; 701 702 /* 703 * The children must be in their deepest (powered-off) states to allow 704 * the parent to be powered off. Note that, there's no need for 705 * additional locking, as powering on a child, requires the parent's 706 * lock to be acquired first. 707 */ 708 list_for_each_entry(link, &genpd->parent_links, parent_node) { 709 struct generic_pm_domain *child = link->child; 710 if (child->state_idx < child->state_count - 1) 711 return -EBUSY; 712 } 713 714 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 715 /* 716 * Do not allow PM domain to be powered off, when an IRQ safe 717 * device is part of a non-IRQ safe domain. 718 */ 719 if (!pm_runtime_suspended(pdd->dev) || 720 irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) 721 not_suspended++; 722 } 723 724 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 725 return -EBUSY; 726 727 if (genpd->gov && genpd->gov->power_down_ok) { 728 if (!genpd->gov->power_down_ok(&genpd->domain)) 729 return -EAGAIN; 730 } 731 732 /* Default to shallowest state. */ 733 if (!genpd->gov) 734 genpd->state_idx = 0; 735 736 /* Don't power off, if a child domain is waiting to power on. */ 737 if (atomic_read(&genpd->sd_count) > 0) 738 return -EBUSY; 739 740 ret = _genpd_power_off(genpd, true); 741 if (ret) { 742 genpd->states[genpd->state_idx].rejected++; 743 return ret; 744 } 745 746 genpd->status = GENPD_STATE_OFF; 747 genpd_update_accounting(genpd); 748 genpd->states[genpd->state_idx].usage++; 749 750 list_for_each_entry(link, &genpd->child_links, child_node) { 751 genpd_sd_counter_dec(link->parent); 752 genpd_lock_nested(link->parent, depth + 1); 753 genpd_power_off(link->parent, false, depth + 1); 754 genpd_unlock(link->parent); 755 } 756 757 return 0; 758 } 759 760 /** 761 * genpd_power_on - Restore power to a given PM domain and its parents. 762 * @genpd: PM domain to power up. 763 * @depth: nesting count for lockdep. 764 * 765 * Restore power to @genpd and all of its parents so that it is possible to 766 * resume a device belonging to it. 767 */ 768 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 769 { 770 struct gpd_link *link; 771 int ret = 0; 772 773 if (genpd_status_on(genpd)) 774 return 0; 775 776 /* 777 * The list is guaranteed not to change while the loop below is being 778 * executed, unless one of the parents' .power_on() callbacks fiddles 779 * with it. 780 */ 781 list_for_each_entry(link, &genpd->child_links, child_node) { 782 struct generic_pm_domain *parent = link->parent; 783 784 genpd_sd_counter_inc(parent); 785 786 genpd_lock_nested(parent, depth + 1); 787 ret = genpd_power_on(parent, depth + 1); 788 genpd_unlock(parent); 789 790 if (ret) { 791 genpd_sd_counter_dec(parent); 792 goto err; 793 } 794 } 795 796 ret = _genpd_power_on(genpd, true); 797 if (ret) 798 goto err; 799 800 genpd->status = GENPD_STATE_ON; 801 genpd_update_accounting(genpd); 802 803 return 0; 804 805 err: 806 list_for_each_entry_continue_reverse(link, 807 &genpd->child_links, 808 child_node) { 809 genpd_sd_counter_dec(link->parent); 810 genpd_lock_nested(link->parent, depth + 1); 811 genpd_power_off(link->parent, false, depth + 1); 812 genpd_unlock(link->parent); 813 } 814 815 return ret; 816 } 817 818 static int genpd_dev_pm_start(struct device *dev) 819 { 820 struct generic_pm_domain *genpd = dev_to_genpd(dev); 821 822 return genpd_start_dev(genpd, dev); 823 } 824 825 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 826 unsigned long val, void *ptr) 827 { 828 struct generic_pm_domain_data *gpd_data; 829 struct device *dev; 830 831 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 832 dev = gpd_data->base.dev; 833 834 for (;;) { 835 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); 836 struct pm_domain_data *pdd; 837 struct gpd_timing_data *td; 838 839 spin_lock_irq(&dev->power.lock); 840 841 pdd = dev->power.subsys_data ? 842 dev->power.subsys_data->domain_data : NULL; 843 if (pdd) { 844 td = to_gpd_data(pdd)->td; 845 if (td) { 846 td->constraint_changed = true; 847 genpd = dev_to_genpd(dev); 848 } 849 } 850 851 spin_unlock_irq(&dev->power.lock); 852 853 if (!IS_ERR(genpd)) { 854 genpd_lock(genpd); 855 genpd->gd->max_off_time_changed = true; 856 genpd_unlock(genpd); 857 } 858 859 dev = dev->parent; 860 if (!dev || dev->power.ignore_children) 861 break; 862 } 863 864 return NOTIFY_DONE; 865 } 866 867 /** 868 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 869 * @work: Work structure used for scheduling the execution of this function. 870 */ 871 static void genpd_power_off_work_fn(struct work_struct *work) 872 { 873 struct generic_pm_domain *genpd; 874 875 genpd = container_of(work, struct generic_pm_domain, power_off_work); 876 877 genpd_lock(genpd); 878 genpd_power_off(genpd, false, 0); 879 genpd_unlock(genpd); 880 } 881 882 /** 883 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 884 * @dev: Device to handle. 885 */ 886 static int __genpd_runtime_suspend(struct device *dev) 887 { 888 int (*cb)(struct device *__dev); 889 890 if (dev->type && dev->type->pm) 891 cb = dev->type->pm->runtime_suspend; 892 else if (dev->class && dev->class->pm) 893 cb = dev->class->pm->runtime_suspend; 894 else if (dev->bus && dev->bus->pm) 895 cb = dev->bus->pm->runtime_suspend; 896 else 897 cb = NULL; 898 899 if (!cb && dev->driver && dev->driver->pm) 900 cb = dev->driver->pm->runtime_suspend; 901 902 return cb ? cb(dev) : 0; 903 } 904 905 /** 906 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 907 * @dev: Device to handle. 908 */ 909 static int __genpd_runtime_resume(struct device *dev) 910 { 911 int (*cb)(struct device *__dev); 912 913 if (dev->type && dev->type->pm) 914 cb = dev->type->pm->runtime_resume; 915 else if (dev->class && dev->class->pm) 916 cb = dev->class->pm->runtime_resume; 917 else if (dev->bus && dev->bus->pm) 918 cb = dev->bus->pm->runtime_resume; 919 else 920 cb = NULL; 921 922 if (!cb && dev->driver && dev->driver->pm) 923 cb = dev->driver->pm->runtime_resume; 924 925 return cb ? cb(dev) : 0; 926 } 927 928 /** 929 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 930 * @dev: Device to suspend. 931 * 932 * Carry out a runtime suspend of a device under the assumption that its 933 * pm_domain field points to the domain member of an object of type 934 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 935 */ 936 static int genpd_runtime_suspend(struct device *dev) 937 { 938 struct generic_pm_domain *genpd; 939 bool (*suspend_ok)(struct device *__dev); 940 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 941 struct gpd_timing_data *td = gpd_data->td; 942 bool runtime_pm = pm_runtime_enabled(dev); 943 ktime_t time_start = 0; 944 s64 elapsed_ns; 945 int ret; 946 947 dev_dbg(dev, "%s()\n", __func__); 948 949 genpd = dev_to_genpd(dev); 950 if (IS_ERR(genpd)) 951 return -EINVAL; 952 953 /* 954 * A runtime PM centric subsystem/driver may re-use the runtime PM 955 * callbacks for other purposes than runtime PM. In those scenarios 956 * runtime PM is disabled. Under these circumstances, we shall skip 957 * validating/measuring the PM QoS latency. 958 */ 959 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 960 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 961 return -EBUSY; 962 963 /* Measure suspend latency. */ 964 if (td && runtime_pm) 965 time_start = ktime_get(); 966 967 ret = __genpd_runtime_suspend(dev); 968 if (ret) 969 return ret; 970 971 ret = genpd_stop_dev(genpd, dev); 972 if (ret) { 973 __genpd_runtime_resume(dev); 974 return ret; 975 } 976 977 /* Update suspend latency value if the measured time exceeds it. */ 978 if (td && runtime_pm) { 979 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 980 if (elapsed_ns > td->suspend_latency_ns) { 981 td->suspend_latency_ns = elapsed_ns; 982 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 983 elapsed_ns); 984 genpd->gd->max_off_time_changed = true; 985 td->constraint_changed = true; 986 } 987 } 988 989 /* 990 * If power.irq_safe is set, this routine may be run with 991 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 992 */ 993 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 994 return 0; 995 996 genpd_lock(genpd); 997 genpd_power_off(genpd, true, 0); 998 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 999 genpd_unlock(genpd); 1000 1001 return 0; 1002 } 1003 1004 /** 1005 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 1006 * @dev: Device to resume. 1007 * 1008 * Carry out a runtime resume of a device under the assumption that its 1009 * pm_domain field points to the domain member of an object of type 1010 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1011 */ 1012 static int genpd_runtime_resume(struct device *dev) 1013 { 1014 struct generic_pm_domain *genpd; 1015 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1016 struct gpd_timing_data *td = gpd_data->td; 1017 bool timed = td && pm_runtime_enabled(dev); 1018 ktime_t time_start = 0; 1019 s64 elapsed_ns; 1020 int ret; 1021 1022 dev_dbg(dev, "%s()\n", __func__); 1023 1024 genpd = dev_to_genpd(dev); 1025 if (IS_ERR(genpd)) 1026 return -EINVAL; 1027 1028 /* 1029 * As we don't power off a non IRQ safe domain, which holds 1030 * an IRQ safe device, we don't need to restore power to it. 1031 */ 1032 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1033 goto out; 1034 1035 genpd_lock(genpd); 1036 genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 1037 ret = genpd_power_on(genpd, 0); 1038 genpd_unlock(genpd); 1039 1040 if (ret) 1041 return ret; 1042 1043 out: 1044 /* Measure resume latency. */ 1045 if (timed) 1046 time_start = ktime_get(); 1047 1048 ret = genpd_start_dev(genpd, dev); 1049 if (ret) 1050 goto err_poweroff; 1051 1052 ret = __genpd_runtime_resume(dev); 1053 if (ret) 1054 goto err_stop; 1055 1056 /* Update resume latency value if the measured time exceeds it. */ 1057 if (timed) { 1058 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1059 if (elapsed_ns > td->resume_latency_ns) { 1060 td->resume_latency_ns = elapsed_ns; 1061 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1062 elapsed_ns); 1063 genpd->gd->max_off_time_changed = true; 1064 td->constraint_changed = true; 1065 } 1066 } 1067 1068 return 0; 1069 1070 err_stop: 1071 genpd_stop_dev(genpd, dev); 1072 err_poweroff: 1073 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1074 genpd_lock(genpd); 1075 genpd_power_off(genpd, true, 0); 1076 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1077 genpd_unlock(genpd); 1078 } 1079 1080 return ret; 1081 } 1082 1083 static bool pd_ignore_unused; 1084 static int __init pd_ignore_unused_setup(char *__unused) 1085 { 1086 pd_ignore_unused = true; 1087 return 1; 1088 } 1089 __setup("pd_ignore_unused", pd_ignore_unused_setup); 1090 1091 /** 1092 * genpd_power_off_unused - Power off all PM domains with no devices in use. 1093 */ 1094 static int __init genpd_power_off_unused(void) 1095 { 1096 struct generic_pm_domain *genpd; 1097 1098 if (pd_ignore_unused) { 1099 pr_warn("genpd: Not disabling unused power domains\n"); 1100 return 0; 1101 } 1102 1103 mutex_lock(&gpd_list_lock); 1104 1105 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 1106 genpd_queue_power_off_work(genpd); 1107 1108 mutex_unlock(&gpd_list_lock); 1109 1110 return 0; 1111 } 1112 late_initcall(genpd_power_off_unused); 1113 1114 #ifdef CONFIG_PM_SLEEP 1115 1116 /** 1117 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1118 * @genpd: PM domain to power off, if possible. 1119 * @use_lock: use the lock. 1120 * @depth: nesting count for lockdep. 1121 * 1122 * Check if the given PM domain can be powered off (during system suspend or 1123 * hibernation) and do that if so. Also, in that case propagate to its parents. 1124 * 1125 * This function is only called in "noirq" and "syscore" stages of system power 1126 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1127 * these cases the lock must be held. 1128 */ 1129 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1130 unsigned int depth) 1131 { 1132 struct gpd_link *link; 1133 1134 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1135 return; 1136 1137 if (genpd->suspended_count != genpd->device_count 1138 || atomic_read(&genpd->sd_count) > 0) 1139 return; 1140 1141 /* Check that the children are in their deepest (powered-off) state. */ 1142 list_for_each_entry(link, &genpd->parent_links, parent_node) { 1143 struct generic_pm_domain *child = link->child; 1144 if (child->state_idx < child->state_count - 1) 1145 return; 1146 } 1147 1148 /* Choose the deepest state when suspending */ 1149 genpd->state_idx = genpd->state_count - 1; 1150 if (_genpd_power_off(genpd, false)) 1151 return; 1152 1153 genpd->status = GENPD_STATE_OFF; 1154 1155 list_for_each_entry(link, &genpd->child_links, child_node) { 1156 genpd_sd_counter_dec(link->parent); 1157 1158 if (use_lock) 1159 genpd_lock_nested(link->parent, depth + 1); 1160 1161 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1162 1163 if (use_lock) 1164 genpd_unlock(link->parent); 1165 } 1166 } 1167 1168 /** 1169 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1170 * @genpd: PM domain to power on. 1171 * @use_lock: use the lock. 1172 * @depth: nesting count for lockdep. 1173 * 1174 * This function is only called in "noirq" and "syscore" stages of system power 1175 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1176 * these cases the lock must be held. 1177 */ 1178 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1179 unsigned int depth) 1180 { 1181 struct gpd_link *link; 1182 1183 if (genpd_status_on(genpd)) 1184 return; 1185 1186 list_for_each_entry(link, &genpd->child_links, child_node) { 1187 genpd_sd_counter_inc(link->parent); 1188 1189 if (use_lock) 1190 genpd_lock_nested(link->parent, depth + 1); 1191 1192 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1193 1194 if (use_lock) 1195 genpd_unlock(link->parent); 1196 } 1197 1198 _genpd_power_on(genpd, false); 1199 genpd->status = GENPD_STATE_ON; 1200 } 1201 1202 /** 1203 * genpd_prepare - Start power transition of a device in a PM domain. 1204 * @dev: Device to start the transition of. 1205 * 1206 * Start a power transition of a device (during a system-wide power transition) 1207 * under the assumption that its pm_domain field points to the domain member of 1208 * an object of type struct generic_pm_domain representing a PM domain 1209 * consisting of I/O devices. 1210 */ 1211 static int genpd_prepare(struct device *dev) 1212 { 1213 struct generic_pm_domain *genpd; 1214 int ret; 1215 1216 dev_dbg(dev, "%s()\n", __func__); 1217 1218 genpd = dev_to_genpd(dev); 1219 if (IS_ERR(genpd)) 1220 return -EINVAL; 1221 1222 genpd_lock(genpd); 1223 1224 if (genpd->prepared_count++ == 0) 1225 genpd->suspended_count = 0; 1226 1227 genpd_unlock(genpd); 1228 1229 ret = pm_generic_prepare(dev); 1230 if (ret < 0) { 1231 genpd_lock(genpd); 1232 1233 genpd->prepared_count--; 1234 1235 genpd_unlock(genpd); 1236 } 1237 1238 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1239 return ret >= 0 ? 0 : ret; 1240 } 1241 1242 /** 1243 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1244 * I/O pm domain. 1245 * @dev: Device to suspend. 1246 * @suspend_noirq: Generic suspend_noirq callback. 1247 * @resume_noirq: Generic resume_noirq callback. 1248 * 1249 * Stop the device and remove power from the domain if all devices in it have 1250 * been stopped. 1251 */ 1252 static int genpd_finish_suspend(struct device *dev, 1253 int (*suspend_noirq)(struct device *dev), 1254 int (*resume_noirq)(struct device *dev)) 1255 { 1256 struct generic_pm_domain *genpd; 1257 int ret = 0; 1258 1259 genpd = dev_to_genpd(dev); 1260 if (IS_ERR(genpd)) 1261 return -EINVAL; 1262 1263 ret = suspend_noirq(dev); 1264 if (ret) 1265 return ret; 1266 1267 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1268 return 0; 1269 1270 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1271 !pm_runtime_status_suspended(dev)) { 1272 ret = genpd_stop_dev(genpd, dev); 1273 if (ret) { 1274 resume_noirq(dev); 1275 return ret; 1276 } 1277 } 1278 1279 genpd_lock(genpd); 1280 genpd->suspended_count++; 1281 genpd_sync_power_off(genpd, true, 0); 1282 genpd_unlock(genpd); 1283 1284 return 0; 1285 } 1286 1287 /** 1288 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1289 * @dev: Device to suspend. 1290 * 1291 * Stop the device and remove power from the domain if all devices in it have 1292 * been stopped. 1293 */ 1294 static int genpd_suspend_noirq(struct device *dev) 1295 { 1296 dev_dbg(dev, "%s()\n", __func__); 1297 1298 return genpd_finish_suspend(dev, 1299 pm_generic_suspend_noirq, 1300 pm_generic_resume_noirq); 1301 } 1302 1303 /** 1304 * genpd_finish_resume - Completion of resume of device in an I/O PM domain. 1305 * @dev: Device to resume. 1306 * @resume_noirq: Generic resume_noirq callback. 1307 * 1308 * Restore power to the device's PM domain, if necessary, and start the device. 1309 */ 1310 static int genpd_finish_resume(struct device *dev, 1311 int (*resume_noirq)(struct device *dev)) 1312 { 1313 struct generic_pm_domain *genpd; 1314 int ret; 1315 1316 dev_dbg(dev, "%s()\n", __func__); 1317 1318 genpd = dev_to_genpd(dev); 1319 if (IS_ERR(genpd)) 1320 return -EINVAL; 1321 1322 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd)) 1323 return resume_noirq(dev); 1324 1325 genpd_lock(genpd); 1326 genpd_sync_power_on(genpd, true, 0); 1327 genpd->suspended_count--; 1328 genpd_unlock(genpd); 1329 1330 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1331 !pm_runtime_status_suspended(dev)) { 1332 ret = genpd_start_dev(genpd, dev); 1333 if (ret) 1334 return ret; 1335 } 1336 1337 return pm_generic_resume_noirq(dev); 1338 } 1339 1340 /** 1341 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1342 * @dev: Device to resume. 1343 * 1344 * Restore power to the device's PM domain, if necessary, and start the device. 1345 */ 1346 static int genpd_resume_noirq(struct device *dev) 1347 { 1348 dev_dbg(dev, "%s()\n", __func__); 1349 1350 return genpd_finish_resume(dev, pm_generic_resume_noirq); 1351 } 1352 1353 /** 1354 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1355 * @dev: Device to freeze. 1356 * 1357 * Carry out a late freeze of a device under the assumption that its 1358 * pm_domain field points to the domain member of an object of type 1359 * struct generic_pm_domain representing a power domain consisting of I/O 1360 * devices. 1361 */ 1362 static int genpd_freeze_noirq(struct device *dev) 1363 { 1364 dev_dbg(dev, "%s()\n", __func__); 1365 1366 return genpd_finish_suspend(dev, 1367 pm_generic_freeze_noirq, 1368 pm_generic_thaw_noirq); 1369 } 1370 1371 /** 1372 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1373 * @dev: Device to thaw. 1374 * 1375 * Start the device, unless power has been removed from the domain already 1376 * before the system transition. 1377 */ 1378 static int genpd_thaw_noirq(struct device *dev) 1379 { 1380 dev_dbg(dev, "%s()\n", __func__); 1381 1382 return genpd_finish_resume(dev, pm_generic_thaw_noirq); 1383 } 1384 1385 /** 1386 * genpd_poweroff_noirq - Completion of hibernation of device in an 1387 * I/O PM domain. 1388 * @dev: Device to poweroff. 1389 * 1390 * Stop the device and remove power from the domain if all devices in it have 1391 * been stopped. 1392 */ 1393 static int genpd_poweroff_noirq(struct device *dev) 1394 { 1395 dev_dbg(dev, "%s()\n", __func__); 1396 1397 return genpd_finish_suspend(dev, 1398 pm_generic_poweroff_noirq, 1399 pm_generic_restore_noirq); 1400 } 1401 1402 /** 1403 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1404 * @dev: Device to resume. 1405 * 1406 * Make sure the domain will be in the same power state as before the 1407 * hibernation the system is resuming from and start the device if necessary. 1408 */ 1409 static int genpd_restore_noirq(struct device *dev) 1410 { 1411 dev_dbg(dev, "%s()\n", __func__); 1412 1413 return genpd_finish_resume(dev, pm_generic_restore_noirq); 1414 } 1415 1416 /** 1417 * genpd_complete - Complete power transition of a device in a power domain. 1418 * @dev: Device to complete the transition of. 1419 * 1420 * Complete a power transition of a device (during a system-wide power 1421 * transition) under the assumption that its pm_domain field points to the 1422 * domain member of an object of type struct generic_pm_domain representing 1423 * a power domain consisting of I/O devices. 1424 */ 1425 static void genpd_complete(struct device *dev) 1426 { 1427 struct generic_pm_domain *genpd; 1428 1429 dev_dbg(dev, "%s()\n", __func__); 1430 1431 genpd = dev_to_genpd(dev); 1432 if (IS_ERR(genpd)) 1433 return; 1434 1435 pm_generic_complete(dev); 1436 1437 genpd_lock(genpd); 1438 1439 genpd->prepared_count--; 1440 if (!genpd->prepared_count) 1441 genpd_queue_power_off_work(genpd); 1442 1443 genpd_unlock(genpd); 1444 } 1445 1446 static void genpd_switch_state(struct device *dev, bool suspend) 1447 { 1448 struct generic_pm_domain *genpd; 1449 bool use_lock; 1450 1451 genpd = dev_to_genpd_safe(dev); 1452 if (!genpd) 1453 return; 1454 1455 use_lock = genpd_is_irq_safe(genpd); 1456 1457 if (use_lock) 1458 genpd_lock(genpd); 1459 1460 if (suspend) { 1461 genpd->suspended_count++; 1462 genpd_sync_power_off(genpd, use_lock, 0); 1463 } else { 1464 genpd_sync_power_on(genpd, use_lock, 0); 1465 genpd->suspended_count--; 1466 } 1467 1468 if (use_lock) 1469 genpd_unlock(genpd); 1470 } 1471 1472 /** 1473 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1474 * @dev: The device that is attached to the genpd, that can be suspended. 1475 * 1476 * This routine should typically be called for a device that needs to be 1477 * suspended during the syscore suspend phase. It may also be called during 1478 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1479 * genpd. 1480 */ 1481 void dev_pm_genpd_suspend(struct device *dev) 1482 { 1483 genpd_switch_state(dev, true); 1484 } 1485 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1486 1487 /** 1488 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1489 * @dev: The device that is attached to the genpd, which needs to be resumed. 1490 * 1491 * This routine should typically be called for a device that needs to be resumed 1492 * during the syscore resume phase. It may also be called during suspend-to-idle 1493 * to resume a corresponding CPU device that is attached to a genpd. 1494 */ 1495 void dev_pm_genpd_resume(struct device *dev) 1496 { 1497 genpd_switch_state(dev, false); 1498 } 1499 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1500 1501 #else /* !CONFIG_PM_SLEEP */ 1502 1503 #define genpd_prepare NULL 1504 #define genpd_suspend_noirq NULL 1505 #define genpd_resume_noirq NULL 1506 #define genpd_freeze_noirq NULL 1507 #define genpd_thaw_noirq NULL 1508 #define genpd_poweroff_noirq NULL 1509 #define genpd_restore_noirq NULL 1510 #define genpd_complete NULL 1511 1512 #endif /* CONFIG_PM_SLEEP */ 1513 1514 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1515 bool has_governor) 1516 { 1517 struct generic_pm_domain_data *gpd_data; 1518 struct gpd_timing_data *td; 1519 int ret; 1520 1521 ret = dev_pm_get_subsys_data(dev); 1522 if (ret) 1523 return ERR_PTR(ret); 1524 1525 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1526 if (!gpd_data) { 1527 ret = -ENOMEM; 1528 goto err_put; 1529 } 1530 1531 gpd_data->base.dev = dev; 1532 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1533 1534 /* Allocate data used by a governor. */ 1535 if (has_governor) { 1536 td = kzalloc(sizeof(*td), GFP_KERNEL); 1537 if (!td) { 1538 ret = -ENOMEM; 1539 goto err_free; 1540 } 1541 1542 td->constraint_changed = true; 1543 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1544 td->next_wakeup = KTIME_MAX; 1545 gpd_data->td = td; 1546 } 1547 1548 spin_lock_irq(&dev->power.lock); 1549 1550 if (dev->power.subsys_data->domain_data) 1551 ret = -EINVAL; 1552 else 1553 dev->power.subsys_data->domain_data = &gpd_data->base; 1554 1555 spin_unlock_irq(&dev->power.lock); 1556 1557 if (ret) 1558 goto err_free; 1559 1560 return gpd_data; 1561 1562 err_free: 1563 kfree(gpd_data->td); 1564 kfree(gpd_data); 1565 err_put: 1566 dev_pm_put_subsys_data(dev); 1567 return ERR_PTR(ret); 1568 } 1569 1570 static void genpd_free_dev_data(struct device *dev, 1571 struct generic_pm_domain_data *gpd_data) 1572 { 1573 spin_lock_irq(&dev->power.lock); 1574 1575 dev->power.subsys_data->domain_data = NULL; 1576 1577 spin_unlock_irq(&dev->power.lock); 1578 1579 kfree(gpd_data->td); 1580 kfree(gpd_data); 1581 dev_pm_put_subsys_data(dev); 1582 } 1583 1584 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1585 int cpu, bool set, unsigned int depth) 1586 { 1587 struct gpd_link *link; 1588 1589 if (!genpd_is_cpu_domain(genpd)) 1590 return; 1591 1592 list_for_each_entry(link, &genpd->child_links, child_node) { 1593 struct generic_pm_domain *parent = link->parent; 1594 1595 genpd_lock_nested(parent, depth + 1); 1596 genpd_update_cpumask(parent, cpu, set, depth + 1); 1597 genpd_unlock(parent); 1598 } 1599 1600 if (set) 1601 cpumask_set_cpu(cpu, genpd->cpus); 1602 else 1603 cpumask_clear_cpu(cpu, genpd->cpus); 1604 } 1605 1606 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1607 { 1608 if (cpu >= 0) 1609 genpd_update_cpumask(genpd, cpu, true, 0); 1610 } 1611 1612 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1613 { 1614 if (cpu >= 0) 1615 genpd_update_cpumask(genpd, cpu, false, 0); 1616 } 1617 1618 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1619 { 1620 int cpu; 1621 1622 if (!genpd_is_cpu_domain(genpd)) 1623 return -1; 1624 1625 for_each_possible_cpu(cpu) { 1626 if (get_cpu_device(cpu) == dev) 1627 return cpu; 1628 } 1629 1630 return -1; 1631 } 1632 1633 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1634 struct device *base_dev) 1635 { 1636 struct genpd_governor_data *gd = genpd->gd; 1637 struct generic_pm_domain_data *gpd_data; 1638 int ret; 1639 1640 dev_dbg(dev, "%s()\n", __func__); 1641 1642 gpd_data = genpd_alloc_dev_data(dev, gd); 1643 if (IS_ERR(gpd_data)) 1644 return PTR_ERR(gpd_data); 1645 1646 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1647 1648 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1649 if (ret) 1650 goto out; 1651 1652 genpd_lock(genpd); 1653 1654 genpd_set_cpumask(genpd, gpd_data->cpu); 1655 dev_pm_domain_set(dev, &genpd->domain); 1656 1657 genpd->device_count++; 1658 if (gd) 1659 gd->max_off_time_changed = true; 1660 1661 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1662 1663 genpd_unlock(genpd); 1664 out: 1665 if (ret) 1666 genpd_free_dev_data(dev, gpd_data); 1667 else 1668 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1669 DEV_PM_QOS_RESUME_LATENCY); 1670 1671 return ret; 1672 } 1673 1674 /** 1675 * pm_genpd_add_device - Add a device to an I/O PM domain. 1676 * @genpd: PM domain to add the device to. 1677 * @dev: Device to be added. 1678 */ 1679 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1680 { 1681 int ret; 1682 1683 if (!genpd || !dev) 1684 return -EINVAL; 1685 1686 mutex_lock(&gpd_list_lock); 1687 ret = genpd_add_device(genpd, dev, dev); 1688 mutex_unlock(&gpd_list_lock); 1689 1690 return ret; 1691 } 1692 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1693 1694 static int genpd_remove_device(struct generic_pm_domain *genpd, 1695 struct device *dev) 1696 { 1697 struct generic_pm_domain_data *gpd_data; 1698 struct pm_domain_data *pdd; 1699 int ret = 0; 1700 1701 dev_dbg(dev, "%s()\n", __func__); 1702 1703 pdd = dev->power.subsys_data->domain_data; 1704 gpd_data = to_gpd_data(pdd); 1705 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1706 DEV_PM_QOS_RESUME_LATENCY); 1707 1708 genpd_lock(genpd); 1709 1710 if (genpd->prepared_count > 0) { 1711 ret = -EAGAIN; 1712 goto out; 1713 } 1714 1715 genpd->device_count--; 1716 if (genpd->gd) 1717 genpd->gd->max_off_time_changed = true; 1718 1719 genpd_clear_cpumask(genpd, gpd_data->cpu); 1720 dev_pm_domain_set(dev, NULL); 1721 1722 list_del_init(&pdd->list_node); 1723 1724 genpd_unlock(genpd); 1725 1726 if (genpd->detach_dev) 1727 genpd->detach_dev(genpd, dev); 1728 1729 genpd_free_dev_data(dev, gpd_data); 1730 1731 return 0; 1732 1733 out: 1734 genpd_unlock(genpd); 1735 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 1736 1737 return ret; 1738 } 1739 1740 /** 1741 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1742 * @dev: Device to be removed. 1743 */ 1744 int pm_genpd_remove_device(struct device *dev) 1745 { 1746 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 1747 1748 if (!genpd) 1749 return -EINVAL; 1750 1751 return genpd_remove_device(genpd, dev); 1752 } 1753 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 1754 1755 /** 1756 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 1757 * 1758 * @dev: Device that should be associated with the notifier 1759 * @nb: The notifier block to register 1760 * 1761 * Users may call this function to add a genpd power on/off notifier for an 1762 * attached @dev. Only one notifier per device is allowed. The notifier is 1763 * sent when genpd is powering on/off the PM domain. 1764 * 1765 * It is assumed that the user guarantee that the genpd wouldn't be detached 1766 * while this routine is getting called. 1767 * 1768 * Returns 0 on success and negative error values on failures. 1769 */ 1770 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 1771 { 1772 struct generic_pm_domain *genpd; 1773 struct generic_pm_domain_data *gpd_data; 1774 int ret; 1775 1776 genpd = dev_to_genpd_safe(dev); 1777 if (!genpd) 1778 return -ENODEV; 1779 1780 if (WARN_ON(!dev->power.subsys_data || 1781 !dev->power.subsys_data->domain_data)) 1782 return -EINVAL; 1783 1784 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1785 if (gpd_data->power_nb) 1786 return -EEXIST; 1787 1788 genpd_lock(genpd); 1789 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 1790 genpd_unlock(genpd); 1791 1792 if (ret) { 1793 dev_warn(dev, "failed to add notifier for PM domain %s\n", 1794 genpd->name); 1795 return ret; 1796 } 1797 1798 gpd_data->power_nb = nb; 1799 return 0; 1800 } 1801 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 1802 1803 /** 1804 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 1805 * 1806 * @dev: Device that is associated with the notifier 1807 * 1808 * Users may call this function to remove a genpd power on/off notifier for an 1809 * attached @dev. 1810 * 1811 * It is assumed that the user guarantee that the genpd wouldn't be detached 1812 * while this routine is getting called. 1813 * 1814 * Returns 0 on success and negative error values on failures. 1815 */ 1816 int dev_pm_genpd_remove_notifier(struct device *dev) 1817 { 1818 struct generic_pm_domain *genpd; 1819 struct generic_pm_domain_data *gpd_data; 1820 int ret; 1821 1822 genpd = dev_to_genpd_safe(dev); 1823 if (!genpd) 1824 return -ENODEV; 1825 1826 if (WARN_ON(!dev->power.subsys_data || 1827 !dev->power.subsys_data->domain_data)) 1828 return -EINVAL; 1829 1830 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1831 if (!gpd_data->power_nb) 1832 return -ENODEV; 1833 1834 genpd_lock(genpd); 1835 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 1836 gpd_data->power_nb); 1837 genpd_unlock(genpd); 1838 1839 if (ret) { 1840 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 1841 genpd->name); 1842 return ret; 1843 } 1844 1845 gpd_data->power_nb = NULL; 1846 return 0; 1847 } 1848 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 1849 1850 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 1851 struct generic_pm_domain *subdomain) 1852 { 1853 struct gpd_link *link, *itr; 1854 int ret = 0; 1855 1856 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1857 || genpd == subdomain) 1858 return -EINVAL; 1859 1860 /* 1861 * If the domain can be powered on/off in an IRQ safe 1862 * context, ensure that the subdomain can also be 1863 * powered on/off in that context. 1864 */ 1865 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 1866 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 1867 genpd->name, subdomain->name); 1868 return -EINVAL; 1869 } 1870 1871 link = kzalloc(sizeof(*link), GFP_KERNEL); 1872 if (!link) 1873 return -ENOMEM; 1874 1875 genpd_lock(subdomain); 1876 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1877 1878 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 1879 ret = -EINVAL; 1880 goto out; 1881 } 1882 1883 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 1884 if (itr->child == subdomain && itr->parent == genpd) { 1885 ret = -EINVAL; 1886 goto out; 1887 } 1888 } 1889 1890 link->parent = genpd; 1891 list_add_tail(&link->parent_node, &genpd->parent_links); 1892 link->child = subdomain; 1893 list_add_tail(&link->child_node, &subdomain->child_links); 1894 if (genpd_status_on(subdomain)) 1895 genpd_sd_counter_inc(genpd); 1896 1897 out: 1898 genpd_unlock(genpd); 1899 genpd_unlock(subdomain); 1900 if (ret) 1901 kfree(link); 1902 return ret; 1903 } 1904 1905 /** 1906 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1907 * @genpd: Leader PM domain to add the subdomain to. 1908 * @subdomain: Subdomain to be added. 1909 */ 1910 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1911 struct generic_pm_domain *subdomain) 1912 { 1913 int ret; 1914 1915 mutex_lock(&gpd_list_lock); 1916 ret = genpd_add_subdomain(genpd, subdomain); 1917 mutex_unlock(&gpd_list_lock); 1918 1919 return ret; 1920 } 1921 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 1922 1923 /** 1924 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1925 * @genpd: Leader PM domain to remove the subdomain from. 1926 * @subdomain: Subdomain to be removed. 1927 */ 1928 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1929 struct generic_pm_domain *subdomain) 1930 { 1931 struct gpd_link *l, *link; 1932 int ret = -EINVAL; 1933 1934 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1935 return -EINVAL; 1936 1937 genpd_lock(subdomain); 1938 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 1939 1940 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 1941 pr_warn("%s: unable to remove subdomain %s\n", 1942 genpd->name, subdomain->name); 1943 ret = -EBUSY; 1944 goto out; 1945 } 1946 1947 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 1948 if (link->child != subdomain) 1949 continue; 1950 1951 list_del(&link->parent_node); 1952 list_del(&link->child_node); 1953 kfree(link); 1954 if (genpd_status_on(subdomain)) 1955 genpd_sd_counter_dec(genpd); 1956 1957 ret = 0; 1958 break; 1959 } 1960 1961 out: 1962 genpd_unlock(genpd); 1963 genpd_unlock(subdomain); 1964 1965 return ret; 1966 } 1967 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 1968 1969 static void genpd_free_default_power_state(struct genpd_power_state *states, 1970 unsigned int state_count) 1971 { 1972 kfree(states); 1973 } 1974 1975 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 1976 { 1977 struct genpd_power_state *state; 1978 1979 state = kzalloc(sizeof(*state), GFP_KERNEL); 1980 if (!state) 1981 return -ENOMEM; 1982 1983 genpd->states = state; 1984 genpd->state_count = 1; 1985 genpd->free_states = genpd_free_default_power_state; 1986 1987 return 0; 1988 } 1989 1990 static int genpd_alloc_data(struct generic_pm_domain *genpd) 1991 { 1992 struct genpd_governor_data *gd = NULL; 1993 int ret; 1994 1995 if (genpd_is_cpu_domain(genpd) && 1996 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 1997 return -ENOMEM; 1998 1999 if (genpd->gov) { 2000 gd = kzalloc(sizeof(*gd), GFP_KERNEL); 2001 if (!gd) { 2002 ret = -ENOMEM; 2003 goto free; 2004 } 2005 2006 gd->max_off_time_ns = -1; 2007 gd->max_off_time_changed = true; 2008 gd->next_wakeup = KTIME_MAX; 2009 gd->next_hrtimer = KTIME_MAX; 2010 } 2011 2012 /* Use only one "off" state if there were no states declared */ 2013 if (genpd->state_count == 0) { 2014 ret = genpd_set_default_power_state(genpd); 2015 if (ret) 2016 goto free; 2017 } 2018 2019 genpd->gd = gd; 2020 return 0; 2021 2022 free: 2023 if (genpd_is_cpu_domain(genpd)) 2024 free_cpumask_var(genpd->cpus); 2025 kfree(gd); 2026 return ret; 2027 } 2028 2029 static void genpd_free_data(struct generic_pm_domain *genpd) 2030 { 2031 if (genpd_is_cpu_domain(genpd)) 2032 free_cpumask_var(genpd->cpus); 2033 if (genpd->free_states) 2034 genpd->free_states(genpd->states, genpd->state_count); 2035 kfree(genpd->gd); 2036 } 2037 2038 static void genpd_lock_init(struct generic_pm_domain *genpd) 2039 { 2040 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { 2041 spin_lock_init(&genpd->slock); 2042 genpd->lock_ops = &genpd_spin_ops; 2043 } else { 2044 mutex_init(&genpd->mlock); 2045 genpd->lock_ops = &genpd_mtx_ops; 2046 } 2047 } 2048 2049 /** 2050 * pm_genpd_init - Initialize a generic I/O PM domain object. 2051 * @genpd: PM domain object to initialize. 2052 * @gov: PM domain governor to associate with the domain (may be NULL). 2053 * @is_off: Initial value of the domain's power_is_off field. 2054 * 2055 * Returns 0 on successful initialization, else a negative error code. 2056 */ 2057 int pm_genpd_init(struct generic_pm_domain *genpd, 2058 struct dev_power_governor *gov, bool is_off) 2059 { 2060 int ret; 2061 2062 if (IS_ERR_OR_NULL(genpd)) 2063 return -EINVAL; 2064 2065 INIT_LIST_HEAD(&genpd->parent_links); 2066 INIT_LIST_HEAD(&genpd->child_links); 2067 INIT_LIST_HEAD(&genpd->dev_list); 2068 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 2069 genpd_lock_init(genpd); 2070 genpd->gov = gov; 2071 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2072 atomic_set(&genpd->sd_count, 0); 2073 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2074 genpd->device_count = 0; 2075 genpd->provider = NULL; 2076 genpd->has_provider = false; 2077 genpd->accounting_time = ktime_get_mono_fast_ns(); 2078 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2079 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2080 genpd->domain.ops.prepare = genpd_prepare; 2081 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2082 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2083 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2084 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2085 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2086 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2087 genpd->domain.ops.complete = genpd_complete; 2088 genpd->domain.start = genpd_dev_pm_start; 2089 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state; 2090 2091 if (genpd->flags & GENPD_FLAG_PM_CLK) { 2092 genpd->dev_ops.stop = pm_clk_suspend; 2093 genpd->dev_ops.start = pm_clk_resume; 2094 } 2095 2096 /* The always-on governor works better with the corresponding flag. */ 2097 if (gov == &pm_domain_always_on_gov) 2098 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 2099 2100 /* Always-on domains must be powered on at initialization. */ 2101 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2102 !genpd_status_on(genpd)) { 2103 pr_err("always-on PM domain %s is not on\n", genpd->name); 2104 return -EINVAL; 2105 } 2106 2107 /* Multiple states but no governor doesn't make sense. */ 2108 if (!gov && genpd->state_count > 1) 2109 pr_warn("%s: no governor for states\n", genpd->name); 2110 2111 ret = genpd_alloc_data(genpd); 2112 if (ret) 2113 return ret; 2114 2115 device_initialize(&genpd->dev); 2116 dev_set_name(&genpd->dev, "%s", genpd->name); 2117 2118 mutex_lock(&gpd_list_lock); 2119 list_add(&genpd->gpd_list_node, &gpd_list); 2120 mutex_unlock(&gpd_list_lock); 2121 genpd_debug_add(genpd); 2122 2123 return 0; 2124 } 2125 EXPORT_SYMBOL_GPL(pm_genpd_init); 2126 2127 static int genpd_remove(struct generic_pm_domain *genpd) 2128 { 2129 struct gpd_link *l, *link; 2130 2131 if (IS_ERR_OR_NULL(genpd)) 2132 return -EINVAL; 2133 2134 genpd_lock(genpd); 2135 2136 if (genpd->has_provider) { 2137 genpd_unlock(genpd); 2138 pr_err("Provider present, unable to remove %s\n", genpd->name); 2139 return -EBUSY; 2140 } 2141 2142 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2143 genpd_unlock(genpd); 2144 pr_err("%s: unable to remove %s\n", __func__, genpd->name); 2145 return -EBUSY; 2146 } 2147 2148 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2149 list_del(&link->parent_node); 2150 list_del(&link->child_node); 2151 kfree(link); 2152 } 2153 2154 list_del(&genpd->gpd_list_node); 2155 genpd_unlock(genpd); 2156 genpd_debug_remove(genpd); 2157 cancel_work_sync(&genpd->power_off_work); 2158 genpd_free_data(genpd); 2159 2160 pr_debug("%s: removed %s\n", __func__, genpd->name); 2161 2162 return 0; 2163 } 2164 2165 /** 2166 * pm_genpd_remove - Remove a generic I/O PM domain 2167 * @genpd: Pointer to PM domain that is to be removed. 2168 * 2169 * To remove the PM domain, this function: 2170 * - Removes the PM domain as a subdomain to any parent domains, 2171 * if it was added. 2172 * - Removes the PM domain from the list of registered PM domains. 2173 * 2174 * The PM domain will only be removed, if the associated provider has 2175 * been removed, it is not a parent to any other PM domain and has no 2176 * devices associated with it. 2177 */ 2178 int pm_genpd_remove(struct generic_pm_domain *genpd) 2179 { 2180 int ret; 2181 2182 mutex_lock(&gpd_list_lock); 2183 ret = genpd_remove(genpd); 2184 mutex_unlock(&gpd_list_lock); 2185 2186 return ret; 2187 } 2188 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2189 2190 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2191 2192 /* 2193 * Device Tree based PM domain providers. 2194 * 2195 * The code below implements generic device tree based PM domain providers that 2196 * bind device tree nodes with generic PM domains registered in the system. 2197 * 2198 * Any driver that registers generic PM domains and needs to support binding of 2199 * devices to these domains is supposed to register a PM domain provider, which 2200 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2201 * 2202 * Two simple mapping functions have been provided for convenience: 2203 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2204 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2205 * index. 2206 */ 2207 2208 /** 2209 * struct of_genpd_provider - PM domain provider registration structure 2210 * @link: Entry in global list of PM domain providers 2211 * @node: Pointer to device tree node of PM domain provider 2212 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2213 * into a PM domain. 2214 * @data: context pointer to be passed into @xlate callback 2215 */ 2216 struct of_genpd_provider { 2217 struct list_head link; 2218 struct device_node *node; 2219 genpd_xlate_t xlate; 2220 void *data; 2221 }; 2222 2223 /* List of registered PM domain providers. */ 2224 static LIST_HEAD(of_genpd_providers); 2225 /* Mutex to protect the list above. */ 2226 static DEFINE_MUTEX(of_genpd_mutex); 2227 2228 /** 2229 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2230 * @genpdspec: OF phandle args to map into a PM domain 2231 * @data: xlate function private data - pointer to struct generic_pm_domain 2232 * 2233 * This is a generic xlate function that can be used to model PM domains that 2234 * have their own device tree nodes. The private data of xlate function needs 2235 * to be a valid pointer to struct generic_pm_domain. 2236 */ 2237 static struct generic_pm_domain *genpd_xlate_simple( 2238 struct of_phandle_args *genpdspec, 2239 void *data) 2240 { 2241 return data; 2242 } 2243 2244 /** 2245 * genpd_xlate_onecell() - Xlate function using a single index. 2246 * @genpdspec: OF phandle args to map into a PM domain 2247 * @data: xlate function private data - pointer to struct genpd_onecell_data 2248 * 2249 * This is a generic xlate function that can be used to model simple PM domain 2250 * controllers that have one device tree node and provide multiple PM domains. 2251 * A single cell is used as an index into an array of PM domains specified in 2252 * the genpd_onecell_data struct when registering the provider. 2253 */ 2254 static struct generic_pm_domain *genpd_xlate_onecell( 2255 struct of_phandle_args *genpdspec, 2256 void *data) 2257 { 2258 struct genpd_onecell_data *genpd_data = data; 2259 unsigned int idx = genpdspec->args[0]; 2260 2261 if (genpdspec->args_count != 1) 2262 return ERR_PTR(-EINVAL); 2263 2264 if (idx >= genpd_data->num_domains) { 2265 pr_err("%s: invalid domain index %u\n", __func__, idx); 2266 return ERR_PTR(-EINVAL); 2267 } 2268 2269 if (!genpd_data->domains[idx]) 2270 return ERR_PTR(-ENOENT); 2271 2272 return genpd_data->domains[idx]; 2273 } 2274 2275 /** 2276 * genpd_add_provider() - Register a PM domain provider for a node 2277 * @np: Device node pointer associated with the PM domain provider. 2278 * @xlate: Callback for decoding PM domain from phandle arguments. 2279 * @data: Context pointer for @xlate callback. 2280 */ 2281 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2282 void *data) 2283 { 2284 struct of_genpd_provider *cp; 2285 2286 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2287 if (!cp) 2288 return -ENOMEM; 2289 2290 cp->node = of_node_get(np); 2291 cp->data = data; 2292 cp->xlate = xlate; 2293 fwnode_dev_initialized(&np->fwnode, true); 2294 2295 mutex_lock(&of_genpd_mutex); 2296 list_add(&cp->link, &of_genpd_providers); 2297 mutex_unlock(&of_genpd_mutex); 2298 pr_debug("Added domain provider from %pOF\n", np); 2299 2300 return 0; 2301 } 2302 2303 static bool genpd_present(const struct generic_pm_domain *genpd) 2304 { 2305 bool ret = false; 2306 const struct generic_pm_domain *gpd; 2307 2308 mutex_lock(&gpd_list_lock); 2309 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2310 if (gpd == genpd) { 2311 ret = true; 2312 break; 2313 } 2314 } 2315 mutex_unlock(&gpd_list_lock); 2316 2317 return ret; 2318 } 2319 2320 /** 2321 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2322 * @np: Device node pointer associated with the PM domain provider. 2323 * @genpd: Pointer to PM domain associated with the PM domain provider. 2324 */ 2325 int of_genpd_add_provider_simple(struct device_node *np, 2326 struct generic_pm_domain *genpd) 2327 { 2328 int ret; 2329 2330 if (!np || !genpd) 2331 return -EINVAL; 2332 2333 if (!genpd_present(genpd)) 2334 return -EINVAL; 2335 2336 genpd->dev.of_node = np; 2337 2338 /* Parse genpd OPP table */ 2339 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2340 ret = dev_pm_opp_of_add_table(&genpd->dev); 2341 if (ret) 2342 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2343 2344 /* 2345 * Save table for faster processing while setting performance 2346 * state. 2347 */ 2348 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2349 WARN_ON(IS_ERR(genpd->opp_table)); 2350 } 2351 2352 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2353 if (ret) { 2354 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2355 dev_pm_opp_put_opp_table(genpd->opp_table); 2356 dev_pm_opp_of_remove_table(&genpd->dev); 2357 } 2358 2359 return ret; 2360 } 2361 2362 genpd->provider = &np->fwnode; 2363 genpd->has_provider = true; 2364 2365 return 0; 2366 } 2367 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2368 2369 /** 2370 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2371 * @np: Device node pointer associated with the PM domain provider. 2372 * @data: Pointer to the data associated with the PM domain provider. 2373 */ 2374 int of_genpd_add_provider_onecell(struct device_node *np, 2375 struct genpd_onecell_data *data) 2376 { 2377 struct generic_pm_domain *genpd; 2378 unsigned int i; 2379 int ret = -EINVAL; 2380 2381 if (!np || !data) 2382 return -EINVAL; 2383 2384 if (!data->xlate) 2385 data->xlate = genpd_xlate_onecell; 2386 2387 for (i = 0; i < data->num_domains; i++) { 2388 genpd = data->domains[i]; 2389 2390 if (!genpd) 2391 continue; 2392 if (!genpd_present(genpd)) 2393 goto error; 2394 2395 genpd->dev.of_node = np; 2396 2397 /* Parse genpd OPP table */ 2398 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2399 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2400 if (ret) { 2401 dev_err_probe(&genpd->dev, ret, 2402 "Failed to add OPP table for index %d\n", i); 2403 goto error; 2404 } 2405 2406 /* 2407 * Save table for faster processing while setting 2408 * performance state. 2409 */ 2410 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2411 WARN_ON(IS_ERR(genpd->opp_table)); 2412 } 2413 2414 genpd->provider = &np->fwnode; 2415 genpd->has_provider = true; 2416 } 2417 2418 ret = genpd_add_provider(np, data->xlate, data); 2419 if (ret < 0) 2420 goto error; 2421 2422 return 0; 2423 2424 error: 2425 while (i--) { 2426 genpd = data->domains[i]; 2427 2428 if (!genpd) 2429 continue; 2430 2431 genpd->provider = NULL; 2432 genpd->has_provider = false; 2433 2434 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2435 dev_pm_opp_put_opp_table(genpd->opp_table); 2436 dev_pm_opp_of_remove_table(&genpd->dev); 2437 } 2438 } 2439 2440 return ret; 2441 } 2442 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2443 2444 /** 2445 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2446 * @np: Device node pointer associated with the PM domain provider 2447 */ 2448 void of_genpd_del_provider(struct device_node *np) 2449 { 2450 struct of_genpd_provider *cp, *tmp; 2451 struct generic_pm_domain *gpd; 2452 2453 mutex_lock(&gpd_list_lock); 2454 mutex_lock(&of_genpd_mutex); 2455 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2456 if (cp->node == np) { 2457 /* 2458 * For each PM domain associated with the 2459 * provider, set the 'has_provider' to false 2460 * so that the PM domain can be safely removed. 2461 */ 2462 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2463 if (gpd->provider == &np->fwnode) { 2464 gpd->has_provider = false; 2465 2466 if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state) 2467 continue; 2468 2469 dev_pm_opp_put_opp_table(gpd->opp_table); 2470 dev_pm_opp_of_remove_table(&gpd->dev); 2471 } 2472 } 2473 2474 fwnode_dev_initialized(&cp->node->fwnode, false); 2475 list_del(&cp->link); 2476 of_node_put(cp->node); 2477 kfree(cp); 2478 break; 2479 } 2480 } 2481 mutex_unlock(&of_genpd_mutex); 2482 mutex_unlock(&gpd_list_lock); 2483 } 2484 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2485 2486 /** 2487 * genpd_get_from_provider() - Look-up PM domain 2488 * @genpdspec: OF phandle args to use for look-up 2489 * 2490 * Looks for a PM domain provider under the node specified by @genpdspec and if 2491 * found, uses xlate function of the provider to map phandle args to a PM 2492 * domain. 2493 * 2494 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2495 * on failure. 2496 */ 2497 static struct generic_pm_domain *genpd_get_from_provider( 2498 struct of_phandle_args *genpdspec) 2499 { 2500 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2501 struct of_genpd_provider *provider; 2502 2503 if (!genpdspec) 2504 return ERR_PTR(-EINVAL); 2505 2506 mutex_lock(&of_genpd_mutex); 2507 2508 /* Check if we have such a provider in our array */ 2509 list_for_each_entry(provider, &of_genpd_providers, link) { 2510 if (provider->node == genpdspec->np) 2511 genpd = provider->xlate(genpdspec, provider->data); 2512 if (!IS_ERR(genpd)) 2513 break; 2514 } 2515 2516 mutex_unlock(&of_genpd_mutex); 2517 2518 return genpd; 2519 } 2520 2521 /** 2522 * of_genpd_add_device() - Add a device to an I/O PM domain 2523 * @genpdspec: OF phandle args to use for look-up PM domain 2524 * @dev: Device to be added. 2525 * 2526 * Looks-up an I/O PM domain based upon phandle args provided and adds 2527 * the device to the PM domain. Returns a negative error code on failure. 2528 */ 2529 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) 2530 { 2531 struct generic_pm_domain *genpd; 2532 int ret; 2533 2534 if (!dev) 2535 return -EINVAL; 2536 2537 mutex_lock(&gpd_list_lock); 2538 2539 genpd = genpd_get_from_provider(genpdspec); 2540 if (IS_ERR(genpd)) { 2541 ret = PTR_ERR(genpd); 2542 goto out; 2543 } 2544 2545 ret = genpd_add_device(genpd, dev, dev); 2546 2547 out: 2548 mutex_unlock(&gpd_list_lock); 2549 2550 return ret; 2551 } 2552 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2553 2554 /** 2555 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2556 * @parent_spec: OF phandle args to use for parent PM domain look-up 2557 * @subdomain_spec: OF phandle args to use for subdomain look-up 2558 * 2559 * Looks-up a parent PM domain and subdomain based upon phandle args 2560 * provided and adds the subdomain to the parent PM domain. Returns a 2561 * negative error code on failure. 2562 */ 2563 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 2564 struct of_phandle_args *subdomain_spec) 2565 { 2566 struct generic_pm_domain *parent, *subdomain; 2567 int ret; 2568 2569 mutex_lock(&gpd_list_lock); 2570 2571 parent = genpd_get_from_provider(parent_spec); 2572 if (IS_ERR(parent)) { 2573 ret = PTR_ERR(parent); 2574 goto out; 2575 } 2576 2577 subdomain = genpd_get_from_provider(subdomain_spec); 2578 if (IS_ERR(subdomain)) { 2579 ret = PTR_ERR(subdomain); 2580 goto out; 2581 } 2582 2583 ret = genpd_add_subdomain(parent, subdomain); 2584 2585 out: 2586 mutex_unlock(&gpd_list_lock); 2587 2588 return ret == -ENOENT ? -EPROBE_DEFER : ret; 2589 } 2590 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2591 2592 /** 2593 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2594 * @parent_spec: OF phandle args to use for parent PM domain look-up 2595 * @subdomain_spec: OF phandle args to use for subdomain look-up 2596 * 2597 * Looks-up a parent PM domain and subdomain based upon phandle args 2598 * provided and removes the subdomain from the parent PM domain. Returns a 2599 * negative error code on failure. 2600 */ 2601 int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 2602 struct of_phandle_args *subdomain_spec) 2603 { 2604 struct generic_pm_domain *parent, *subdomain; 2605 int ret; 2606 2607 mutex_lock(&gpd_list_lock); 2608 2609 parent = genpd_get_from_provider(parent_spec); 2610 if (IS_ERR(parent)) { 2611 ret = PTR_ERR(parent); 2612 goto out; 2613 } 2614 2615 subdomain = genpd_get_from_provider(subdomain_spec); 2616 if (IS_ERR(subdomain)) { 2617 ret = PTR_ERR(subdomain); 2618 goto out; 2619 } 2620 2621 ret = pm_genpd_remove_subdomain(parent, subdomain); 2622 2623 out: 2624 mutex_unlock(&gpd_list_lock); 2625 2626 return ret; 2627 } 2628 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 2629 2630 /** 2631 * of_genpd_remove_last - Remove the last PM domain registered for a provider 2632 * @np: Pointer to device node associated with provider 2633 * 2634 * Find the last PM domain that was added by a particular provider and 2635 * remove this PM domain from the list of PM domains. The provider is 2636 * identified by the 'provider' device structure that is passed. The PM 2637 * domain will only be removed, if the provider associated with domain 2638 * has been removed. 2639 * 2640 * Returns a valid pointer to struct generic_pm_domain on success or 2641 * ERR_PTR() on failure. 2642 */ 2643 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 2644 { 2645 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 2646 int ret; 2647 2648 if (IS_ERR_OR_NULL(np)) 2649 return ERR_PTR(-EINVAL); 2650 2651 mutex_lock(&gpd_list_lock); 2652 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 2653 if (gpd->provider == &np->fwnode) { 2654 ret = genpd_remove(gpd); 2655 genpd = ret ? ERR_PTR(ret) : gpd; 2656 break; 2657 } 2658 } 2659 mutex_unlock(&gpd_list_lock); 2660 2661 return genpd; 2662 } 2663 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 2664 2665 static void genpd_release_dev(struct device *dev) 2666 { 2667 of_node_put(dev->of_node); 2668 kfree(dev); 2669 } 2670 2671 static const struct bus_type genpd_bus_type = { 2672 .name = "genpd", 2673 }; 2674 2675 /** 2676 * genpd_dev_pm_detach - Detach a device from its PM domain. 2677 * @dev: Device to detach. 2678 * @power_off: Currently not used 2679 * 2680 * Try to locate a corresponding generic PM domain, which the device was 2681 * attached to previously. If such is found, the device is detached from it. 2682 */ 2683 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2684 { 2685 struct generic_pm_domain *pd; 2686 unsigned int i; 2687 int ret = 0; 2688 2689 pd = dev_to_genpd(dev); 2690 if (IS_ERR(pd)) 2691 return; 2692 2693 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2694 2695 /* Drop the default performance state */ 2696 if (dev_gpd_data(dev)->default_pstate) { 2697 dev_pm_genpd_set_performance_state(dev, 0); 2698 dev_gpd_data(dev)->default_pstate = 0; 2699 } 2700 2701 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2702 ret = genpd_remove_device(pd, dev); 2703 if (ret != -EAGAIN) 2704 break; 2705 2706 mdelay(i); 2707 cond_resched(); 2708 } 2709 2710 if (ret < 0) { 2711 dev_err(dev, "failed to remove from PM domain %s: %d", 2712 pd->name, ret); 2713 return; 2714 } 2715 2716 /* Check if PM domain can be powered off after removing this device. */ 2717 genpd_queue_power_off_work(pd); 2718 2719 /* Unregister the device if it was created by genpd. */ 2720 if (dev->bus == &genpd_bus_type) 2721 device_unregister(dev); 2722 } 2723 2724 static void genpd_dev_pm_sync(struct device *dev) 2725 { 2726 struct generic_pm_domain *pd; 2727 2728 pd = dev_to_genpd(dev); 2729 if (IS_ERR(pd)) 2730 return; 2731 2732 genpd_queue_power_off_work(pd); 2733 } 2734 2735 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 2736 unsigned int index, bool power_on) 2737 { 2738 struct of_phandle_args pd_args; 2739 struct generic_pm_domain *pd; 2740 int pstate; 2741 int ret; 2742 2743 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2744 "#power-domain-cells", index, &pd_args); 2745 if (ret < 0) 2746 return ret; 2747 2748 mutex_lock(&gpd_list_lock); 2749 pd = genpd_get_from_provider(&pd_args); 2750 of_node_put(pd_args.np); 2751 if (IS_ERR(pd)) { 2752 mutex_unlock(&gpd_list_lock); 2753 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 2754 __func__, PTR_ERR(pd)); 2755 return driver_deferred_probe_check_state(base_dev); 2756 } 2757 2758 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2759 2760 ret = genpd_add_device(pd, dev, base_dev); 2761 mutex_unlock(&gpd_list_lock); 2762 2763 if (ret < 0) 2764 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 2765 2766 dev->pm_domain->detach = genpd_dev_pm_detach; 2767 dev->pm_domain->sync = genpd_dev_pm_sync; 2768 2769 /* Set the default performance state */ 2770 pstate = of_get_required_opp_performance_state(dev->of_node, index); 2771 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 2772 ret = pstate; 2773 goto err; 2774 } else if (pstate > 0) { 2775 ret = dev_pm_genpd_set_performance_state(dev, pstate); 2776 if (ret) 2777 goto err; 2778 dev_gpd_data(dev)->default_pstate = pstate; 2779 } 2780 2781 if (power_on) { 2782 genpd_lock(pd); 2783 ret = genpd_power_on(pd, 0); 2784 genpd_unlock(pd); 2785 } 2786 2787 if (ret) { 2788 /* Drop the default performance state */ 2789 if (dev_gpd_data(dev)->default_pstate) { 2790 dev_pm_genpd_set_performance_state(dev, 0); 2791 dev_gpd_data(dev)->default_pstate = 0; 2792 } 2793 2794 genpd_remove_device(pd, dev); 2795 return -EPROBE_DEFER; 2796 } 2797 2798 return 1; 2799 2800 err: 2801 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 2802 pd->name, ret); 2803 genpd_remove_device(pd, dev); 2804 return ret; 2805 } 2806 2807 /** 2808 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 2809 * @dev: Device to attach. 2810 * 2811 * Parse device's OF node to find a PM domain specifier. If such is found, 2812 * attaches the device to retrieved pm_domain ops. 2813 * 2814 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 2815 * PM domain or when multiple power-domains exists for it, else a negative error 2816 * code. Note that if a power-domain exists for the device, but it cannot be 2817 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 2818 * not probed and to re-try again later. 2819 */ 2820 int genpd_dev_pm_attach(struct device *dev) 2821 { 2822 if (!dev->of_node) 2823 return 0; 2824 2825 /* 2826 * Devices with multiple PM domains must be attached separately, as we 2827 * can only attach one PM domain per device. 2828 */ 2829 if (of_count_phandle_with_args(dev->of_node, "power-domains", 2830 "#power-domain-cells") != 1) 2831 return 0; 2832 2833 return __genpd_dev_pm_attach(dev, dev, 0, true); 2834 } 2835 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2836 2837 /** 2838 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 2839 * @dev: The device used to lookup the PM domain. 2840 * @index: The index of the PM domain. 2841 * 2842 * Parse device's OF node to find a PM domain specifier at the provided @index. 2843 * If such is found, creates a virtual device and attaches it to the retrieved 2844 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 2845 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 2846 * 2847 * Returns the created virtual device if successfully attached PM domain, NULL 2848 * when the device don't need a PM domain, else an ERR_PTR() in case of 2849 * failures. If a power-domain exists for the device, but cannot be found or 2850 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 2851 * is not probed and to re-try again later. 2852 */ 2853 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 2854 unsigned int index) 2855 { 2856 struct device *virt_dev; 2857 int num_domains; 2858 int ret; 2859 2860 if (!dev->of_node) 2861 return NULL; 2862 2863 /* Verify that the index is within a valid range. */ 2864 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 2865 "#power-domain-cells"); 2866 if (index >= num_domains) 2867 return NULL; 2868 2869 /* Allocate and register device on the genpd bus. */ 2870 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 2871 if (!virt_dev) 2872 return ERR_PTR(-ENOMEM); 2873 2874 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 2875 virt_dev->bus = &genpd_bus_type; 2876 virt_dev->release = genpd_release_dev; 2877 virt_dev->of_node = of_node_get(dev->of_node); 2878 2879 ret = device_register(virt_dev); 2880 if (ret) { 2881 put_device(virt_dev); 2882 return ERR_PTR(ret); 2883 } 2884 2885 /* Try to attach the device to the PM domain at the specified index. */ 2886 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); 2887 if (ret < 1) { 2888 device_unregister(virt_dev); 2889 return ret ? ERR_PTR(ret) : NULL; 2890 } 2891 2892 pm_runtime_enable(virt_dev); 2893 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 2894 2895 return virt_dev; 2896 } 2897 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 2898 2899 /** 2900 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 2901 * @dev: The device used to lookup the PM domain. 2902 * @name: The name of the PM domain. 2903 * 2904 * Parse device's OF node to find a PM domain specifier using the 2905 * power-domain-names DT property. For further description see 2906 * genpd_dev_pm_attach_by_id(). 2907 */ 2908 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 2909 { 2910 int index; 2911 2912 if (!dev->of_node) 2913 return NULL; 2914 2915 index = of_property_match_string(dev->of_node, "power-domain-names", 2916 name); 2917 if (index < 0) 2918 return NULL; 2919 2920 return genpd_dev_pm_attach_by_id(dev, index); 2921 } 2922 2923 static const struct of_device_id idle_state_match[] = { 2924 { .compatible = "domain-idle-state", }, 2925 { } 2926 }; 2927 2928 static int genpd_parse_state(struct genpd_power_state *genpd_state, 2929 struct device_node *state_node) 2930 { 2931 int err; 2932 u32 residency; 2933 u32 entry_latency, exit_latency; 2934 2935 err = of_property_read_u32(state_node, "entry-latency-us", 2936 &entry_latency); 2937 if (err) { 2938 pr_debug(" * %pOF missing entry-latency-us property\n", 2939 state_node); 2940 return -EINVAL; 2941 } 2942 2943 err = of_property_read_u32(state_node, "exit-latency-us", 2944 &exit_latency); 2945 if (err) { 2946 pr_debug(" * %pOF missing exit-latency-us property\n", 2947 state_node); 2948 return -EINVAL; 2949 } 2950 2951 err = of_property_read_u32(state_node, "min-residency-us", &residency); 2952 if (!err) 2953 genpd_state->residency_ns = 1000LL * residency; 2954 2955 genpd_state->power_on_latency_ns = 1000LL * exit_latency; 2956 genpd_state->power_off_latency_ns = 1000LL * entry_latency; 2957 genpd_state->fwnode = &state_node->fwnode; 2958 2959 return 0; 2960 } 2961 2962 static int genpd_iterate_idle_states(struct device_node *dn, 2963 struct genpd_power_state *states) 2964 { 2965 int ret; 2966 struct of_phandle_iterator it; 2967 struct device_node *np; 2968 int i = 0; 2969 2970 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 2971 if (ret <= 0) 2972 return ret == -ENOENT ? 0 : ret; 2973 2974 /* Loop over the phandles until all the requested entry is found */ 2975 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 2976 np = it.node; 2977 if (!of_match_node(idle_state_match, np)) 2978 continue; 2979 2980 if (!of_device_is_available(np)) 2981 continue; 2982 2983 if (states) { 2984 ret = genpd_parse_state(&states[i], np); 2985 if (ret) { 2986 pr_err("Parsing idle state node %pOF failed with err %d\n", 2987 np, ret); 2988 of_node_put(np); 2989 return ret; 2990 } 2991 } 2992 i++; 2993 } 2994 2995 return i; 2996 } 2997 2998 /** 2999 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 3000 * 3001 * @dn: The genpd device node 3002 * @states: The pointer to which the state array will be saved. 3003 * @n: The count of elements in the array returned from this function. 3004 * 3005 * Returns the device states parsed from the OF node. The memory for the states 3006 * is allocated by this function and is the responsibility of the caller to 3007 * free the memory after use. If any or zero compatible domain idle states is 3008 * found it returns 0 and in case of errors, a negative error code is returned. 3009 */ 3010 int of_genpd_parse_idle_states(struct device_node *dn, 3011 struct genpd_power_state **states, int *n) 3012 { 3013 struct genpd_power_state *st; 3014 int ret; 3015 3016 ret = genpd_iterate_idle_states(dn, NULL); 3017 if (ret < 0) 3018 return ret; 3019 3020 if (!ret) { 3021 *states = NULL; 3022 *n = 0; 3023 return 0; 3024 } 3025 3026 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 3027 if (!st) 3028 return -ENOMEM; 3029 3030 ret = genpd_iterate_idle_states(dn, st); 3031 if (ret <= 0) { 3032 kfree(st); 3033 return ret < 0 ? ret : -EINVAL; 3034 } 3035 3036 *states = st; 3037 *n = ret; 3038 3039 return 0; 3040 } 3041 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 3042 3043 static int __init genpd_bus_init(void) 3044 { 3045 return bus_register(&genpd_bus_type); 3046 } 3047 core_initcall(genpd_bus_init); 3048 3049 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 3050 3051 3052 /*** debugfs support ***/ 3053 3054 #ifdef CONFIG_DEBUG_FS 3055 /* 3056 * TODO: This function is a slightly modified version of rtpm_status_show 3057 * from sysfs.c, so generalize it. 3058 */ 3059 static void rtpm_status_str(struct seq_file *s, struct device *dev) 3060 { 3061 static const char * const status_lookup[] = { 3062 [RPM_ACTIVE] = "active", 3063 [RPM_RESUMING] = "resuming", 3064 [RPM_SUSPENDED] = "suspended", 3065 [RPM_SUSPENDING] = "suspending" 3066 }; 3067 const char *p = ""; 3068 3069 if (dev->power.runtime_error) 3070 p = "error"; 3071 else if (dev->power.disable_depth) 3072 p = "unsupported"; 3073 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3074 p = status_lookup[dev->power.runtime_status]; 3075 else 3076 WARN_ON(1); 3077 3078 seq_printf(s, "%-25s ", p); 3079 } 3080 3081 static void perf_status_str(struct seq_file *s, struct device *dev) 3082 { 3083 struct generic_pm_domain_data *gpd_data; 3084 3085 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3086 seq_put_decimal_ull(s, "", gpd_data->performance_state); 3087 } 3088 3089 static int genpd_summary_one(struct seq_file *s, 3090 struct generic_pm_domain *genpd) 3091 { 3092 static const char * const status_lookup[] = { 3093 [GENPD_STATE_ON] = "on", 3094 [GENPD_STATE_OFF] = "off" 3095 }; 3096 struct pm_domain_data *pm_data; 3097 const char *kobj_path; 3098 struct gpd_link *link; 3099 char state[16]; 3100 int ret; 3101 3102 ret = genpd_lock_interruptible(genpd); 3103 if (ret) 3104 return -ERESTARTSYS; 3105 3106 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3107 goto exit; 3108 if (!genpd_status_on(genpd)) 3109 snprintf(state, sizeof(state), "%s-%u", 3110 status_lookup[genpd->status], genpd->state_idx); 3111 else 3112 snprintf(state, sizeof(state), "%s", 3113 status_lookup[genpd->status]); 3114 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); 3115 3116 /* 3117 * Modifications on the list require holding locks on both 3118 * parent and child, so we are safe. 3119 * Also genpd->name is immutable. 3120 */ 3121 list_for_each_entry(link, &genpd->parent_links, parent_node) { 3122 if (list_is_first(&link->parent_node, &genpd->parent_links)) 3123 seq_printf(s, "\n%48s", " "); 3124 seq_printf(s, "%s", link->child->name); 3125 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3126 seq_puts(s, ", "); 3127 } 3128 3129 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3130 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3131 genpd_is_irq_safe(genpd) ? 3132 GFP_ATOMIC : GFP_KERNEL); 3133 if (kobj_path == NULL) 3134 continue; 3135 3136 seq_printf(s, "\n %-50s ", kobj_path); 3137 rtpm_status_str(s, pm_data->dev); 3138 perf_status_str(s, pm_data->dev); 3139 kfree(kobj_path); 3140 } 3141 3142 seq_puts(s, "\n"); 3143 exit: 3144 genpd_unlock(genpd); 3145 3146 return 0; 3147 } 3148 3149 static int summary_show(struct seq_file *s, void *data) 3150 { 3151 struct generic_pm_domain *genpd; 3152 int ret = 0; 3153 3154 seq_puts(s, "domain status children performance\n"); 3155 seq_puts(s, " /device runtime status\n"); 3156 seq_puts(s, "----------------------------------------------------------------------------------------------\n"); 3157 3158 ret = mutex_lock_interruptible(&gpd_list_lock); 3159 if (ret) 3160 return -ERESTARTSYS; 3161 3162 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3163 ret = genpd_summary_one(s, genpd); 3164 if (ret) 3165 break; 3166 } 3167 mutex_unlock(&gpd_list_lock); 3168 3169 return ret; 3170 } 3171 3172 static int status_show(struct seq_file *s, void *data) 3173 { 3174 static const char * const status_lookup[] = { 3175 [GENPD_STATE_ON] = "on", 3176 [GENPD_STATE_OFF] = "off" 3177 }; 3178 3179 struct generic_pm_domain *genpd = s->private; 3180 int ret = 0; 3181 3182 ret = genpd_lock_interruptible(genpd); 3183 if (ret) 3184 return -ERESTARTSYS; 3185 3186 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3187 goto exit; 3188 3189 if (genpd->status == GENPD_STATE_OFF) 3190 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3191 genpd->state_idx); 3192 else 3193 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3194 exit: 3195 genpd_unlock(genpd); 3196 return ret; 3197 } 3198 3199 static int sub_domains_show(struct seq_file *s, void *data) 3200 { 3201 struct generic_pm_domain *genpd = s->private; 3202 struct gpd_link *link; 3203 int ret = 0; 3204 3205 ret = genpd_lock_interruptible(genpd); 3206 if (ret) 3207 return -ERESTARTSYS; 3208 3209 list_for_each_entry(link, &genpd->parent_links, parent_node) 3210 seq_printf(s, "%s\n", link->child->name); 3211 3212 genpd_unlock(genpd); 3213 return ret; 3214 } 3215 3216 static int idle_states_show(struct seq_file *s, void *data) 3217 { 3218 struct generic_pm_domain *genpd = s->private; 3219 u64 now, delta, idle_time = 0; 3220 unsigned int i; 3221 int ret = 0; 3222 3223 ret = genpd_lock_interruptible(genpd); 3224 if (ret) 3225 return -ERESTARTSYS; 3226 3227 seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); 3228 3229 for (i = 0; i < genpd->state_count; i++) { 3230 idle_time += genpd->states[i].idle_time; 3231 3232 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3233 now = ktime_get_mono_fast_ns(); 3234 if (now > genpd->accounting_time) { 3235 delta = now - genpd->accounting_time; 3236 idle_time += delta; 3237 } 3238 } 3239 3240 do_div(idle_time, NSEC_PER_MSEC); 3241 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time, 3242 genpd->states[i].usage, genpd->states[i].rejected); 3243 } 3244 3245 genpd_unlock(genpd); 3246 return ret; 3247 } 3248 3249 static int active_time_show(struct seq_file *s, void *data) 3250 { 3251 struct generic_pm_domain *genpd = s->private; 3252 u64 now, on_time, delta = 0; 3253 int ret = 0; 3254 3255 ret = genpd_lock_interruptible(genpd); 3256 if (ret) 3257 return -ERESTARTSYS; 3258 3259 if (genpd->status == GENPD_STATE_ON) { 3260 now = ktime_get_mono_fast_ns(); 3261 if (now > genpd->accounting_time) 3262 delta = now - genpd->accounting_time; 3263 } 3264 3265 on_time = genpd->on_time + delta; 3266 do_div(on_time, NSEC_PER_MSEC); 3267 seq_printf(s, "%llu ms\n", on_time); 3268 3269 genpd_unlock(genpd); 3270 return ret; 3271 } 3272 3273 static int total_idle_time_show(struct seq_file *s, void *data) 3274 { 3275 struct generic_pm_domain *genpd = s->private; 3276 u64 now, delta, total = 0; 3277 unsigned int i; 3278 int ret = 0; 3279 3280 ret = genpd_lock_interruptible(genpd); 3281 if (ret) 3282 return -ERESTARTSYS; 3283 3284 for (i = 0; i < genpd->state_count; i++) { 3285 total += genpd->states[i].idle_time; 3286 3287 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3288 now = ktime_get_mono_fast_ns(); 3289 if (now > genpd->accounting_time) { 3290 delta = now - genpd->accounting_time; 3291 total += delta; 3292 } 3293 } 3294 } 3295 3296 do_div(total, NSEC_PER_MSEC); 3297 seq_printf(s, "%llu ms\n", total); 3298 3299 genpd_unlock(genpd); 3300 return ret; 3301 } 3302 3303 3304 static int devices_show(struct seq_file *s, void *data) 3305 { 3306 struct generic_pm_domain *genpd = s->private; 3307 struct pm_domain_data *pm_data; 3308 const char *kobj_path; 3309 int ret = 0; 3310 3311 ret = genpd_lock_interruptible(genpd); 3312 if (ret) 3313 return -ERESTARTSYS; 3314 3315 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3316 kobj_path = kobject_get_path(&pm_data->dev->kobj, 3317 genpd_is_irq_safe(genpd) ? 3318 GFP_ATOMIC : GFP_KERNEL); 3319 if (kobj_path == NULL) 3320 continue; 3321 3322 seq_printf(s, "%s\n", kobj_path); 3323 kfree(kobj_path); 3324 } 3325 3326 genpd_unlock(genpd); 3327 return ret; 3328 } 3329 3330 static int perf_state_show(struct seq_file *s, void *data) 3331 { 3332 struct generic_pm_domain *genpd = s->private; 3333 3334 if (genpd_lock_interruptible(genpd)) 3335 return -ERESTARTSYS; 3336 3337 seq_printf(s, "%u\n", genpd->performance_state); 3338 3339 genpd_unlock(genpd); 3340 return 0; 3341 } 3342 3343 DEFINE_SHOW_ATTRIBUTE(summary); 3344 DEFINE_SHOW_ATTRIBUTE(status); 3345 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3346 DEFINE_SHOW_ATTRIBUTE(idle_states); 3347 DEFINE_SHOW_ATTRIBUTE(active_time); 3348 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3349 DEFINE_SHOW_ATTRIBUTE(devices); 3350 DEFINE_SHOW_ATTRIBUTE(perf_state); 3351 3352 static void genpd_debug_add(struct generic_pm_domain *genpd) 3353 { 3354 struct dentry *d; 3355 3356 if (!genpd_debugfs_dir) 3357 return; 3358 3359 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); 3360 3361 debugfs_create_file("current_state", 0444, 3362 d, genpd, &status_fops); 3363 debugfs_create_file("sub_domains", 0444, 3364 d, genpd, &sub_domains_fops); 3365 debugfs_create_file("idle_states", 0444, 3366 d, genpd, &idle_states_fops); 3367 debugfs_create_file("active_time", 0444, 3368 d, genpd, &active_time_fops); 3369 debugfs_create_file("total_idle_time", 0444, 3370 d, genpd, &total_idle_time_fops); 3371 debugfs_create_file("devices", 0444, 3372 d, genpd, &devices_fops); 3373 if (genpd->set_performance_state) 3374 debugfs_create_file("perf_state", 0444, 3375 d, genpd, &perf_state_fops); 3376 } 3377 3378 static int __init genpd_debug_init(void) 3379 { 3380 struct generic_pm_domain *genpd; 3381 3382 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3383 3384 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3385 NULL, &summary_fops); 3386 3387 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3388 genpd_debug_add(genpd); 3389 3390 return 0; 3391 } 3392 late_initcall(genpd_debug_init); 3393 3394 static void __exit genpd_debug_exit(void) 3395 { 3396 debugfs_remove_recursive(genpd_debugfs_dir); 3397 } 3398 __exitcall(genpd_debug_exit); 3399 #endif /* CONFIG_DEBUG_FS */ 3400