1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/power/domain.c - Common code related to device power domains. 4 * 5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 6 */ 7 #define pr_fmt(fmt) "PM: " fmt 8 9 #include <linux/delay.h> 10 #include <linux/idr.h> 11 #include <linux/kernel.h> 12 #include <linux/io.h> 13 #include <linux/platform_device.h> 14 #include <linux/pm_opp.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/pm_domain.h> 17 #include <linux/pm_qos.h> 18 #include <linux/pm_clock.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/sched.h> 22 #include <linux/suspend.h> 23 #include <linux/export.h> 24 #include <linux/cpu.h> 25 #include <linux/debugfs.h> 26 27 /* Provides a unique ID for each genpd device */ 28 static DEFINE_IDA(genpd_ida); 29 30 /* The bus for genpd_providers. */ 31 static const struct bus_type genpd_provider_bus_type = { 32 .name = "genpd_provider", 33 }; 34 35 /* The parent for genpd_provider devices. */ 36 static struct device genpd_provider_bus = { 37 .init_name = "genpd_provider", 38 }; 39 40 #define GENPD_RETRY_MAX_MS 250 /* Approximate */ 41 42 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 43 ({ \ 44 type (*__routine)(struct device *__d); \ 45 type __ret = (type)0; \ 46 \ 47 __routine = genpd->dev_ops.callback; \ 48 if (__routine) { \ 49 __ret = __routine(dev); \ 50 } \ 51 __ret; \ 52 }) 53 54 static LIST_HEAD(gpd_list); 55 static DEFINE_MUTEX(gpd_list_lock); 56 57 struct genpd_lock_ops { 58 void (*lock)(struct generic_pm_domain *genpd); 59 void (*lock_nested)(struct generic_pm_domain *genpd, int depth); 60 int (*lock_interruptible)(struct generic_pm_domain *genpd); 61 void (*unlock)(struct generic_pm_domain *genpd); 62 }; 63 64 static void genpd_lock_mtx(struct generic_pm_domain *genpd) 65 { 66 mutex_lock(&genpd->mlock); 67 } 68 69 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, 70 int depth) 71 { 72 mutex_lock_nested(&genpd->mlock, depth); 73 } 74 75 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) 76 { 77 return mutex_lock_interruptible(&genpd->mlock); 78 } 79 80 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) 81 { 82 return mutex_unlock(&genpd->mlock); 83 } 84 85 static const struct genpd_lock_ops genpd_mtx_ops = { 86 .lock = genpd_lock_mtx, 87 .lock_nested = genpd_lock_nested_mtx, 88 .lock_interruptible = genpd_lock_interruptible_mtx, 89 .unlock = genpd_unlock_mtx, 90 }; 91 92 static void genpd_lock_spin(struct generic_pm_domain *genpd) 93 __acquires(&genpd->slock) 94 { 95 unsigned long flags; 96 97 spin_lock_irqsave(&genpd->slock, flags); 98 genpd->lock_flags = flags; 99 } 100 101 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, 102 int depth) 103 __acquires(&genpd->slock) 104 { 105 unsigned long flags; 106 107 spin_lock_irqsave_nested(&genpd->slock, flags, depth); 108 genpd->lock_flags = flags; 109 } 110 111 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) 112 __acquires(&genpd->slock) 113 { 114 unsigned long flags; 115 116 spin_lock_irqsave(&genpd->slock, flags); 117 genpd->lock_flags = flags; 118 return 0; 119 } 120 121 static void genpd_unlock_spin(struct generic_pm_domain *genpd) 122 __releases(&genpd->slock) 123 { 124 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); 125 } 126 127 static const struct genpd_lock_ops genpd_spin_ops = { 128 .lock = genpd_lock_spin, 129 .lock_nested = genpd_lock_nested_spin, 130 .lock_interruptible = genpd_lock_interruptible_spin, 131 .unlock = genpd_unlock_spin, 132 }; 133 134 static void genpd_lock_raw_spin(struct generic_pm_domain *genpd) 135 __acquires(&genpd->raw_slock) 136 { 137 unsigned long flags; 138 139 raw_spin_lock_irqsave(&genpd->raw_slock, flags); 140 genpd->raw_lock_flags = flags; 141 } 142 143 static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd, 144 int depth) 145 __acquires(&genpd->raw_slock) 146 { 147 unsigned long flags; 148 149 raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth); 150 genpd->raw_lock_flags = flags; 151 } 152 153 static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd) 154 __acquires(&genpd->raw_slock) 155 { 156 unsigned long flags; 157 158 raw_spin_lock_irqsave(&genpd->raw_slock, flags); 159 genpd->raw_lock_flags = flags; 160 return 0; 161 } 162 163 static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd) 164 __releases(&genpd->raw_slock) 165 { 166 raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags); 167 } 168 169 static const struct genpd_lock_ops genpd_raw_spin_ops = { 170 .lock = genpd_lock_raw_spin, 171 .lock_nested = genpd_lock_nested_raw_spin, 172 .lock_interruptible = genpd_lock_interruptible_raw_spin, 173 .unlock = genpd_unlock_raw_spin, 174 }; 175 176 #define genpd_lock(p) p->lock_ops->lock(p) 177 #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) 178 #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) 179 #define genpd_unlock(p) p->lock_ops->unlock(p) 180 181 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) 182 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) 183 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) 184 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) 185 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) 186 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) 187 #define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW) 188 #define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW) 189 #define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE) 190 #define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON) 191 192 static inline bool irq_safe_dev_in_sleep_domain(struct device *dev, 193 const struct generic_pm_domain *genpd) 194 { 195 bool ret; 196 197 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); 198 199 /* 200 * Warn once if an IRQ safe device is attached to a domain, which 201 * callbacks are allowed to sleep. This indicates a suboptimal 202 * configuration for PM, but it doesn't matter for an always on domain. 203 */ 204 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) 205 return ret; 206 207 if (ret) 208 dev_warn_once(dev, "PM domain %s will not be powered off\n", 209 dev_name(&genpd->dev)); 210 211 return ret; 212 } 213 214 static int genpd_runtime_suspend(struct device *dev); 215 216 /* 217 * Get the generic PM domain for a particular struct device. 218 * This validates the struct device pointer, the PM domain pointer, 219 * and checks that the PM domain pointer is a real generic PM domain. 220 * Any failure results in NULL being returned. 221 */ 222 static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) 223 { 224 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) 225 return NULL; 226 227 /* A genpd's always have its ->runtime_suspend() callback assigned. */ 228 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) 229 return pd_to_genpd(dev->pm_domain); 230 231 return NULL; 232 } 233 234 /* 235 * This should only be used where we are certain that the pm_domain 236 * attached to the device is a genpd domain. 237 */ 238 static struct generic_pm_domain *dev_to_genpd(struct device *dev) 239 { 240 if (IS_ERR_OR_NULL(dev->pm_domain)) 241 return ERR_PTR(-EINVAL); 242 243 return pd_to_genpd(dev->pm_domain); 244 } 245 246 struct device *dev_to_genpd_dev(struct device *dev) 247 { 248 struct generic_pm_domain *genpd = dev_to_genpd(dev); 249 250 if (IS_ERR(genpd)) 251 return ERR_CAST(genpd); 252 253 return &genpd->dev; 254 } 255 256 static int genpd_stop_dev(const struct generic_pm_domain *genpd, 257 struct device *dev) 258 { 259 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); 260 } 261 262 static int genpd_start_dev(const struct generic_pm_domain *genpd, 263 struct device *dev) 264 { 265 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 266 } 267 268 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 269 { 270 bool ret = false; 271 272 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 273 ret = !!atomic_dec_and_test(&genpd->sd_count); 274 275 return ret; 276 } 277 278 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 279 { 280 atomic_inc(&genpd->sd_count); 281 smp_mb__after_atomic(); 282 } 283 284 #ifdef CONFIG_DEBUG_FS 285 static struct dentry *genpd_debugfs_dir; 286 287 static void genpd_debug_add(struct generic_pm_domain *genpd); 288 289 static void genpd_debug_remove(struct generic_pm_domain *genpd) 290 { 291 if (!genpd_debugfs_dir) 292 return; 293 294 debugfs_lookup_and_remove(dev_name(&genpd->dev), genpd_debugfs_dir); 295 } 296 297 static void genpd_update_accounting(struct generic_pm_domain *genpd) 298 { 299 u64 delta, now; 300 301 now = ktime_get_mono_fast_ns(); 302 if (now <= genpd->accounting_time) 303 return; 304 305 delta = now - genpd->accounting_time; 306 307 /* 308 * If genpd->status is active, it means we are just 309 * out of off and so update the idle time and vice 310 * versa. 311 */ 312 if (genpd->status == GENPD_STATE_ON) 313 genpd->states[genpd->state_idx].idle_time += delta; 314 else 315 genpd->on_time += delta; 316 317 genpd->accounting_time = now; 318 } 319 320 static void genpd_reflect_residency(struct generic_pm_domain *genpd) 321 { 322 struct genpd_governor_data *gd = genpd->gd; 323 struct genpd_power_state *state, *next_state; 324 unsigned int state_idx; 325 s64 sleep_ns, target_ns; 326 327 if (!gd || !gd->reflect_residency) 328 return; 329 330 sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter)); 331 state_idx = genpd->state_idx; 332 state = &genpd->states[state_idx]; 333 target_ns = state->power_off_latency_ns + state->residency_ns; 334 335 if (sleep_ns < target_ns) { 336 state->above++; 337 } else if (state_idx < (genpd->state_count -1)) { 338 next_state = &genpd->states[state_idx + 1]; 339 target_ns = next_state->power_off_latency_ns + 340 next_state->residency_ns; 341 342 if (sleep_ns >= target_ns) 343 state->below++; 344 } 345 346 gd->reflect_residency = false; 347 } 348 #else 349 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} 350 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} 351 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} 352 static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {} 353 #endif 354 355 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, 356 unsigned int state) 357 { 358 struct generic_pm_domain_data *pd_data; 359 struct pm_domain_data *pdd; 360 struct gpd_link *link; 361 362 /* New requested state is same as Max requested state */ 363 if (state == genpd->performance_state) 364 return state; 365 366 /* New requested state is higher than Max requested state */ 367 if (state > genpd->performance_state) 368 return state; 369 370 /* Traverse all devices within the domain */ 371 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 372 pd_data = to_gpd_data(pdd); 373 374 if (pd_data->performance_state > state) 375 state = pd_data->performance_state; 376 } 377 378 /* 379 * Traverse all sub-domains within the domain. This can be 380 * done without any additional locking as the link->performance_state 381 * field is protected by the parent genpd->lock, which is already taken. 382 * 383 * Also note that link->performance_state (subdomain's performance state 384 * requirement to parent domain) is different from 385 * link->child->performance_state (current performance state requirement 386 * of the devices/sub-domains of the subdomain) and so can have a 387 * different value. 388 * 389 * Note that we also take vote from powered-off sub-domains into account 390 * as the same is done for devices right now. 391 */ 392 list_for_each_entry(link, &genpd->parent_links, parent_node) { 393 if (link->performance_state > state) 394 state = link->performance_state; 395 } 396 397 return state; 398 } 399 400 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd, 401 struct generic_pm_domain *parent, 402 unsigned int pstate) 403 { 404 if (!parent->set_performance_state) 405 return pstate; 406 407 return dev_pm_opp_xlate_performance_state(genpd->opp_table, 408 parent->opp_table, 409 pstate); 410 } 411 412 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 413 unsigned int state, int depth); 414 415 static void _genpd_rollback_parent_state(struct gpd_link *link, int depth) 416 { 417 struct generic_pm_domain *parent = link->parent; 418 int parent_state; 419 420 genpd_lock_nested(parent, depth + 1); 421 422 parent_state = link->prev_performance_state; 423 link->performance_state = parent_state; 424 425 parent_state = _genpd_reeval_performance_state(parent, parent_state); 426 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 427 pr_err("%s: Failed to roll back to %d performance state\n", 428 parent->name, parent_state); 429 } 430 431 genpd_unlock(parent); 432 } 433 434 static int _genpd_set_parent_state(struct generic_pm_domain *genpd, 435 struct gpd_link *link, 436 unsigned int state, int depth) 437 { 438 struct generic_pm_domain *parent = link->parent; 439 int parent_state, ret; 440 441 /* Find parent's performance state */ 442 ret = genpd_xlate_performance_state(genpd, parent, state); 443 if (unlikely(ret < 0)) 444 return ret; 445 446 parent_state = ret; 447 448 genpd_lock_nested(parent, depth + 1); 449 450 link->prev_performance_state = link->performance_state; 451 link->performance_state = parent_state; 452 453 parent_state = _genpd_reeval_performance_state(parent, parent_state); 454 ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 455 if (ret) 456 link->performance_state = link->prev_performance_state; 457 458 genpd_unlock(parent); 459 460 return ret; 461 } 462 463 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 464 unsigned int state, int depth) 465 { 466 struct gpd_link *link = NULL; 467 int ret; 468 469 if (state == genpd->performance_state) 470 return 0; 471 472 /* When scaling up, propagate to parents first in normal order */ 473 if (state > genpd->performance_state) { 474 list_for_each_entry(link, &genpd->child_links, child_node) { 475 ret = _genpd_set_parent_state(genpd, link, state, depth); 476 if (ret) 477 goto rollback_parents_up; 478 } 479 } 480 481 if (genpd->set_performance_state) { 482 ret = genpd->set_performance_state(genpd, state); 483 if (ret) { 484 if (link) 485 goto rollback_parents_up; 486 return ret; 487 } 488 } 489 490 /* When scaling down, propagate to parents last in reverse order */ 491 if (state < genpd->performance_state) { 492 list_for_each_entry_reverse(link, &genpd->child_links, child_node) { 493 ret = _genpd_set_parent_state(genpd, link, state, depth); 494 if (ret) 495 goto rollback_parents_down; 496 } 497 } 498 499 genpd->performance_state = state; 500 return 0; 501 502 rollback_parents_up: 503 list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node) 504 _genpd_rollback_parent_state(link, depth); 505 return ret; 506 rollback_parents_down: 507 list_for_each_entry_continue(link, &genpd->child_links, child_node) 508 _genpd_rollback_parent_state(link, depth); 509 return ret; 510 } 511 512 static int genpd_set_performance_state(struct device *dev, unsigned int state) 513 { 514 struct generic_pm_domain *genpd = dev_to_genpd(dev); 515 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 516 unsigned int prev_state; 517 int ret; 518 519 prev_state = gpd_data->performance_state; 520 if (prev_state == state) 521 return 0; 522 523 gpd_data->performance_state = state; 524 state = _genpd_reeval_performance_state(genpd, state); 525 526 ret = _genpd_set_performance_state(genpd, state, 0); 527 if (ret) 528 gpd_data->performance_state = prev_state; 529 530 return ret; 531 } 532 533 static int genpd_drop_performance_state(struct device *dev) 534 { 535 unsigned int prev_state = dev_gpd_data(dev)->performance_state; 536 537 if (!genpd_set_performance_state(dev, 0)) 538 return prev_state; 539 540 return 0; 541 } 542 543 static void genpd_restore_performance_state(struct device *dev, 544 unsigned int state) 545 { 546 if (state) 547 genpd_set_performance_state(dev, state); 548 } 549 550 static int genpd_dev_pm_set_performance_state(struct device *dev, 551 unsigned int state) 552 { 553 struct generic_pm_domain *genpd = dev_to_genpd(dev); 554 int ret = 0; 555 556 genpd_lock(genpd); 557 if (pm_runtime_suspended(dev)) { 558 dev_gpd_data(dev)->rpm_pstate = state; 559 } else { 560 ret = genpd_set_performance_state(dev, state); 561 if (!ret) 562 dev_gpd_data(dev)->rpm_pstate = 0; 563 } 564 genpd_unlock(genpd); 565 566 return ret; 567 } 568 569 /** 570 * dev_pm_genpd_set_performance_state- Set performance state of device's power 571 * domain. 572 * 573 * @dev: Device for which the performance-state needs to be set. 574 * @state: Target performance state of the device. This can be set as 0 when the 575 * device doesn't have any performance state constraints left (And so 576 * the device wouldn't participate anymore to find the target 577 * performance state of the genpd). 578 * 579 * It is assumed that the users guarantee that the genpd wouldn't be detached 580 * while this routine is getting called. 581 * 582 * Returns 0 on success and negative error values on failures. 583 */ 584 int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) 585 { 586 struct generic_pm_domain *genpd; 587 588 genpd = dev_to_genpd_safe(dev); 589 if (!genpd) 590 return -ENODEV; 591 592 if (WARN_ON(!dev->power.subsys_data || 593 !dev->power.subsys_data->domain_data)) 594 return -EINVAL; 595 596 return genpd_dev_pm_set_performance_state(dev, state); 597 } 598 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); 599 600 /** 601 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup. 602 * 603 * @dev: Device to handle 604 * @next: impending interrupt/wakeup for the device 605 * 606 * 607 * Allow devices to inform of the next wakeup. It's assumed that the users 608 * guarantee that the genpd wouldn't be detached while this routine is getting 609 * called. Additionally, it's also assumed that @dev isn't runtime suspended 610 * (RPM_SUSPENDED)." 611 * Although devices are expected to update the next_wakeup after the end of 612 * their usecase as well, it is possible the devices themselves may not know 613 * about that, so stale @next will be ignored when powering off the domain. 614 */ 615 void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next) 616 { 617 struct generic_pm_domain *genpd; 618 struct gpd_timing_data *td; 619 620 genpd = dev_to_genpd_safe(dev); 621 if (!genpd) 622 return; 623 624 td = to_gpd_data(dev->power.subsys_data->domain_data)->td; 625 if (td) 626 td->next_wakeup = next; 627 } 628 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup); 629 630 /** 631 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd 632 * @dev: A device that is attached to the genpd. 633 * 634 * This routine should typically be called for a device, at the point of when a 635 * GENPD_NOTIFY_PRE_OFF notification has been sent for it. 636 * 637 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no 638 * valid value have been set. 639 */ 640 ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev) 641 { 642 struct generic_pm_domain *genpd; 643 644 genpd = dev_to_genpd_safe(dev); 645 if (!genpd) 646 return KTIME_MAX; 647 648 if (genpd->gd) 649 return genpd->gd->next_hrtimer; 650 651 return KTIME_MAX; 652 } 653 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer); 654 655 /* 656 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous 657 * 658 * @dev: A device that is attached to the genpd. 659 * 660 * Allows a consumer of the genpd to notify the provider that the next power off 661 * should be synchronous. 662 * 663 * It is assumed that the users guarantee that the genpd wouldn't be detached 664 * while this routine is getting called. 665 */ 666 void dev_pm_genpd_synced_poweroff(struct device *dev) 667 { 668 struct generic_pm_domain *genpd; 669 670 genpd = dev_to_genpd_safe(dev); 671 if (!genpd) 672 return; 673 674 genpd_lock(genpd); 675 genpd->synced_poweroff = true; 676 genpd_unlock(genpd); 677 } 678 EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff); 679 680 /** 681 * dev_pm_genpd_set_hwmode() - Set the HW mode for the device and its PM domain. 682 * 683 * @dev: Device for which the HW-mode should be changed. 684 * @enable: Value to set or unset the HW-mode. 685 * 686 * Some PM domains can rely on HW signals to control the power for a device. To 687 * allow a consumer driver to switch the behaviour for its device in runtime, 688 * which may be beneficial from a latency or energy point of view, this function 689 * may be called. 690 * 691 * It is assumed that the users guarantee that the genpd wouldn't be detached 692 * while this routine is getting called. 693 * 694 * Return: Returns 0 on success and negative error values on failures. 695 */ 696 int dev_pm_genpd_set_hwmode(struct device *dev, bool enable) 697 { 698 struct generic_pm_domain *genpd; 699 int ret = 0; 700 701 genpd = dev_to_genpd_safe(dev); 702 if (!genpd) 703 return -ENODEV; 704 705 if (!genpd->set_hwmode_dev) 706 return -EOPNOTSUPP; 707 708 genpd_lock(genpd); 709 710 if (dev_gpd_data(dev)->hw_mode == enable) 711 goto out; 712 713 ret = genpd->set_hwmode_dev(genpd, dev, enable); 714 if (!ret) 715 dev_gpd_data(dev)->hw_mode = enable; 716 717 out: 718 genpd_unlock(genpd); 719 return ret; 720 } 721 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_hwmode); 722 723 /** 724 * dev_pm_genpd_get_hwmode() - Get the HW mode setting for the device. 725 * 726 * @dev: Device for which the current HW-mode setting should be fetched. 727 * 728 * This helper function allows consumer drivers to fetch the current HW mode 729 * setting of its the device. 730 * 731 * It is assumed that the users guarantee that the genpd wouldn't be detached 732 * while this routine is getting called. 733 * 734 * Return: Returns the HW mode setting of device from SW cached hw_mode. 735 */ 736 bool dev_pm_genpd_get_hwmode(struct device *dev) 737 { 738 return dev_gpd_data(dev)->hw_mode; 739 } 740 EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode); 741 742 /** 743 * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off. 744 * 745 * @dev: Device for which the PM domain may need to stay on for. 746 * @on: Value to set or unset for the condition. 747 * 748 * For some usecases a consumer driver requires its device to remain power-on 749 * from the PM domain perspective during runtime. This function allows the 750 * behaviour to be dynamically controlled for a device attached to a genpd. 751 * 752 * It is assumed that the users guarantee that the genpd wouldn't be detached 753 * while this routine is getting called. 754 * 755 * Return: Returns 0 on success and negative error values on failures. 756 */ 757 int dev_pm_genpd_rpm_always_on(struct device *dev, bool on) 758 { 759 struct generic_pm_domain *genpd; 760 761 genpd = dev_to_genpd_safe(dev); 762 if (!genpd) 763 return -ENODEV; 764 765 genpd_lock(genpd); 766 dev_gpd_data(dev)->rpm_always_on = on; 767 genpd_unlock(genpd); 768 769 return 0; 770 } 771 EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on); 772 773 /** 774 * dev_pm_genpd_is_on() - Get device's current power domain status 775 * 776 * @dev: Device to get the current power status 777 * 778 * This function checks whether the generic power domain associated with the 779 * given device is on or not by verifying if genpd_status_on equals 780 * GENPD_STATE_ON. 781 * 782 * Note: this function returns the power status of the genpd at the time of the 783 * call. The power status may change after due to activity from other devices 784 * sharing the same genpd. Therefore, this information should not be relied for 785 * long-term decisions about the device power state. 786 * 787 * Return: 'true' if the device's power domain is on, 'false' otherwise. 788 */ 789 bool dev_pm_genpd_is_on(struct device *dev) 790 { 791 struct generic_pm_domain *genpd; 792 bool is_on; 793 794 genpd = dev_to_genpd_safe(dev); 795 if (!genpd) 796 return false; 797 798 genpd_lock(genpd); 799 is_on = genpd_status_on(genpd); 800 genpd_unlock(genpd); 801 802 return is_on; 803 } 804 EXPORT_SYMBOL_GPL(dev_pm_genpd_is_on); 805 806 /** 807 * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state. 808 * 809 * @genpd: The PM domain the idle-state belongs to. 810 * @state_idx: The index of the idle-state that failed. 811 * 812 * In some special cases the ->power_off() callback is asynchronously powering 813 * off the PM domain, leading to that it may return zero to indicate success, 814 * even though the actual power-off could fail. To account for this correctly in 815 * the rejected/usage counts for the idle-state statistics, users can call this 816 * function to adjust the values. 817 * 818 * It is assumed that the users guarantee that the genpd doesn't get removed 819 * while this routine is getting called. 820 */ 821 void pm_genpd_inc_rejected(struct generic_pm_domain *genpd, 822 unsigned int state_idx) 823 { 824 genpd_lock(genpd); 825 genpd->states[genpd->state_idx].rejected++; 826 genpd->states[genpd->state_idx].usage--; 827 genpd_unlock(genpd); 828 } 829 EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected); 830 831 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) 832 { 833 unsigned int state_idx = genpd->state_idx; 834 ktime_t time_start; 835 s64 elapsed_ns; 836 int ret; 837 838 /* Notify consumers that we are about to power on. */ 839 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 840 GENPD_NOTIFY_PRE_ON, 841 GENPD_NOTIFY_OFF, NULL); 842 ret = notifier_to_errno(ret); 843 if (ret) 844 return ret; 845 846 if (!genpd->power_on) 847 goto out; 848 849 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 850 if (!timed) { 851 ret = genpd->power_on(genpd); 852 if (ret) 853 goto err; 854 855 goto out; 856 } 857 858 time_start = ktime_get(); 859 ret = genpd->power_on(genpd); 860 if (ret) 861 goto err; 862 863 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 864 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) 865 goto out; 866 867 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; 868 genpd->gd->max_off_time_changed = true; 869 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 870 dev_name(&genpd->dev), "on", elapsed_ns); 871 872 out: 873 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 874 genpd->synced_poweroff = false; 875 return 0; 876 err: 877 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 878 NULL); 879 return ret; 880 } 881 882 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) 883 { 884 unsigned int state_idx = genpd->state_idx; 885 ktime_t time_start; 886 s64 elapsed_ns; 887 int ret; 888 889 /* Notify consumers that we are about to power off. */ 890 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, 891 GENPD_NOTIFY_PRE_OFF, 892 GENPD_NOTIFY_ON, NULL); 893 ret = notifier_to_errno(ret); 894 if (ret) 895 return ret; 896 897 if (!genpd->power_off) 898 goto out; 899 900 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode; 901 if (!timed) { 902 ret = genpd->power_off(genpd); 903 if (ret) 904 goto busy; 905 906 goto out; 907 } 908 909 time_start = ktime_get(); 910 ret = genpd->power_off(genpd); 911 if (ret) 912 goto busy; 913 914 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 915 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) 916 goto out; 917 918 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; 919 genpd->gd->max_off_time_changed = true; 920 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", 921 dev_name(&genpd->dev), "off", elapsed_ns); 922 923 out: 924 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, 925 NULL); 926 return 0; 927 busy: 928 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); 929 return ret; 930 } 931 932 /** 933 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). 934 * @genpd: PM domain to power off. 935 * 936 * Queue up the execution of genpd_power_off() unless it's already been done 937 * before. 938 */ 939 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 940 { 941 queue_work(pm_wq, &genpd->power_off_work); 942 } 943 944 /** 945 * genpd_power_off - Remove power from a given PM domain. 946 * @genpd: PM domain to power down. 947 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the 948 * RPM status of the releated device is in an intermediate state, not yet turned 949 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not 950 * be RPM_SUSPENDED, while it tries to power off the PM domain. 951 * @depth: nesting count for lockdep. 952 * 953 * If all of the @genpd's devices have been suspended and all of its subdomains 954 * have been powered down, remove power from @genpd. 955 */ 956 static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, 957 unsigned int depth) 958 { 959 struct pm_domain_data *pdd; 960 struct gpd_link *link; 961 unsigned int not_suspended = 0; 962 963 /* 964 * Do not try to power off the domain in the following situations: 965 * The domain is already in the "power off" state. 966 * System suspend is in progress. 967 * The domain is configured as always on. 968 * The domain was on at boot and still need to stay on. 969 * The domain has a subdomain being powered on. 970 */ 971 if (!genpd_status_on(genpd) || genpd->prepared_count > 0 || 972 genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) || 973 genpd->stay_on || atomic_read(&genpd->sd_count) > 0) 974 return; 975 976 /* 977 * The children must be in their deepest (powered-off) states to allow 978 * the parent to be powered off. Note that, there's no need for 979 * additional locking, as powering on a child, requires the parent's 980 * lock to be acquired first. 981 */ 982 list_for_each_entry(link, &genpd->parent_links, parent_node) { 983 struct generic_pm_domain *child = link->child; 984 if (child->state_idx < child->state_count - 1) 985 return; 986 } 987 988 list_for_each_entry(pdd, &genpd->dev_list, list_node) { 989 /* 990 * Do not allow PM domain to be powered off, when an IRQ safe 991 * device is part of a non-IRQ safe domain. 992 */ 993 if (!pm_runtime_suspended(pdd->dev) || 994 irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) 995 not_suspended++; 996 997 /* The device may need its PM domain to stay powered on. */ 998 if (to_gpd_data(pdd)->rpm_always_on) 999 return; 1000 } 1001 1002 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) 1003 return; 1004 1005 if (genpd->gov && genpd->gov->power_down_ok) { 1006 if (!genpd->gov->power_down_ok(&genpd->domain)) 1007 return; 1008 } 1009 1010 /* Default to shallowest state. */ 1011 if (!genpd->gov) 1012 genpd->state_idx = 0; 1013 1014 /* Don't power off, if a child domain is waiting to power on. */ 1015 if (atomic_read(&genpd->sd_count) > 0) 1016 return; 1017 1018 if (_genpd_power_off(genpd, true)) { 1019 genpd->states[genpd->state_idx].rejected++; 1020 return; 1021 } 1022 1023 genpd->status = GENPD_STATE_OFF; 1024 genpd_update_accounting(genpd); 1025 genpd->states[genpd->state_idx].usage++; 1026 1027 list_for_each_entry(link, &genpd->child_links, child_node) { 1028 genpd_sd_counter_dec(link->parent); 1029 genpd_lock_nested(link->parent, depth + 1); 1030 genpd_power_off(link->parent, false, depth + 1); 1031 genpd_unlock(link->parent); 1032 } 1033 } 1034 1035 /** 1036 * genpd_power_on - Restore power to a given PM domain and its parents. 1037 * @genpd: PM domain to power up. 1038 * @depth: nesting count for lockdep. 1039 * 1040 * Restore power to @genpd and all of its parents so that it is possible to 1041 * resume a device belonging to it. 1042 */ 1043 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) 1044 { 1045 struct gpd_link *link; 1046 int ret = 0; 1047 1048 if (genpd_status_on(genpd)) 1049 return 0; 1050 1051 /* Reflect over the entered idle-states residency for debugfs. */ 1052 genpd_reflect_residency(genpd); 1053 1054 /* 1055 * The list is guaranteed not to change while the loop below is being 1056 * executed, unless one of the parents' .power_on() callbacks fiddles 1057 * with it. 1058 */ 1059 list_for_each_entry(link, &genpd->child_links, child_node) { 1060 struct generic_pm_domain *parent = link->parent; 1061 1062 genpd_sd_counter_inc(parent); 1063 1064 genpd_lock_nested(parent, depth + 1); 1065 ret = genpd_power_on(parent, depth + 1); 1066 genpd_unlock(parent); 1067 1068 if (ret) { 1069 genpd_sd_counter_dec(parent); 1070 goto err; 1071 } 1072 } 1073 1074 ret = _genpd_power_on(genpd, true); 1075 if (ret) 1076 goto err; 1077 1078 genpd->status = GENPD_STATE_ON; 1079 genpd_update_accounting(genpd); 1080 1081 return 0; 1082 1083 err: 1084 list_for_each_entry_continue_reverse(link, 1085 &genpd->child_links, 1086 child_node) { 1087 genpd_sd_counter_dec(link->parent); 1088 genpd_lock_nested(link->parent, depth + 1); 1089 genpd_power_off(link->parent, false, depth + 1); 1090 genpd_unlock(link->parent); 1091 } 1092 1093 return ret; 1094 } 1095 1096 static int genpd_dev_pm_start(struct device *dev) 1097 { 1098 struct generic_pm_domain *genpd = dev_to_genpd(dev); 1099 1100 return genpd_start_dev(genpd, dev); 1101 } 1102 1103 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 1104 unsigned long val, void *ptr) 1105 { 1106 struct generic_pm_domain_data *gpd_data; 1107 struct device *dev; 1108 1109 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 1110 dev = gpd_data->base.dev; 1111 1112 for (;;) { 1113 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA); 1114 struct pm_domain_data *pdd; 1115 struct gpd_timing_data *td; 1116 1117 spin_lock_irq(&dev->power.lock); 1118 1119 pdd = dev->power.subsys_data ? 1120 dev->power.subsys_data->domain_data : NULL; 1121 if (pdd) { 1122 td = to_gpd_data(pdd)->td; 1123 if (td) { 1124 td->constraint_changed = true; 1125 genpd = dev_to_genpd(dev); 1126 } 1127 } 1128 1129 spin_unlock_irq(&dev->power.lock); 1130 1131 if (!IS_ERR(genpd)) { 1132 genpd_lock(genpd); 1133 genpd->gd->max_off_time_changed = true; 1134 genpd_unlock(genpd); 1135 } 1136 1137 dev = dev->parent; 1138 if (!dev || dev->power.ignore_children) 1139 break; 1140 } 1141 1142 return NOTIFY_DONE; 1143 } 1144 1145 /** 1146 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 1147 * @work: Work structure used for scheduling the execution of this function. 1148 */ 1149 static void genpd_power_off_work_fn(struct work_struct *work) 1150 { 1151 struct generic_pm_domain *genpd; 1152 1153 genpd = container_of(work, struct generic_pm_domain, power_off_work); 1154 1155 genpd_lock(genpd); 1156 genpd_power_off(genpd, false, 0); 1157 genpd_unlock(genpd); 1158 } 1159 1160 /** 1161 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks 1162 * @dev: Device to handle. 1163 */ 1164 static int __genpd_runtime_suspend(struct device *dev) 1165 { 1166 int (*cb)(struct device *__dev); 1167 1168 if (dev->type && dev->type->pm) 1169 cb = dev->type->pm->runtime_suspend; 1170 else if (dev->class && dev->class->pm) 1171 cb = dev->class->pm->runtime_suspend; 1172 else if (dev->bus && dev->bus->pm) 1173 cb = dev->bus->pm->runtime_suspend; 1174 else 1175 cb = NULL; 1176 1177 if (!cb && dev->driver && dev->driver->pm) 1178 cb = dev->driver->pm->runtime_suspend; 1179 1180 return cb ? cb(dev) : 0; 1181 } 1182 1183 /** 1184 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks 1185 * @dev: Device to handle. 1186 */ 1187 static int __genpd_runtime_resume(struct device *dev) 1188 { 1189 int (*cb)(struct device *__dev); 1190 1191 if (dev->type && dev->type->pm) 1192 cb = dev->type->pm->runtime_resume; 1193 else if (dev->class && dev->class->pm) 1194 cb = dev->class->pm->runtime_resume; 1195 else if (dev->bus && dev->bus->pm) 1196 cb = dev->bus->pm->runtime_resume; 1197 else 1198 cb = NULL; 1199 1200 if (!cb && dev->driver && dev->driver->pm) 1201 cb = dev->driver->pm->runtime_resume; 1202 1203 return cb ? cb(dev) : 0; 1204 } 1205 1206 /** 1207 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 1208 * @dev: Device to suspend. 1209 * 1210 * Carry out a runtime suspend of a device under the assumption that its 1211 * pm_domain field points to the domain member of an object of type 1212 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1213 */ 1214 static int genpd_runtime_suspend(struct device *dev) 1215 { 1216 struct generic_pm_domain *genpd; 1217 bool (*suspend_ok)(struct device *__dev); 1218 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1219 struct gpd_timing_data *td = gpd_data->td; 1220 bool runtime_pm = pm_runtime_enabled(dev); 1221 ktime_t time_start = 0; 1222 s64 elapsed_ns; 1223 int ret; 1224 1225 dev_dbg(dev, "%s()\n", __func__); 1226 1227 genpd = dev_to_genpd(dev); 1228 if (IS_ERR(genpd)) 1229 return -EINVAL; 1230 1231 /* 1232 * A runtime PM centric subsystem/driver may re-use the runtime PM 1233 * callbacks for other purposes than runtime PM. In those scenarios 1234 * runtime PM is disabled. Under these circumstances, we shall skip 1235 * validating/measuring the PM QoS latency. 1236 */ 1237 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; 1238 if (runtime_pm && suspend_ok && !suspend_ok(dev)) 1239 return -EBUSY; 1240 1241 /* Measure suspend latency. */ 1242 if (td && runtime_pm) 1243 time_start = ktime_get(); 1244 1245 ret = __genpd_runtime_suspend(dev); 1246 if (ret) 1247 return ret; 1248 1249 ret = genpd_stop_dev(genpd, dev); 1250 if (ret) { 1251 __genpd_runtime_resume(dev); 1252 return ret; 1253 } 1254 1255 /* Update suspend latency value if the measured time exceeds it. */ 1256 if (td && runtime_pm) { 1257 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1258 if (elapsed_ns > td->suspend_latency_ns) { 1259 td->suspend_latency_ns = elapsed_ns; 1260 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 1261 elapsed_ns); 1262 genpd->gd->max_off_time_changed = true; 1263 td->constraint_changed = true; 1264 } 1265 } 1266 1267 /* 1268 * If power.irq_safe is set, this routine may be run with 1269 * IRQs disabled, so suspend only if the PM domain also is irq_safe. 1270 */ 1271 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1272 return 0; 1273 1274 genpd_lock(genpd); 1275 genpd_power_off(genpd, true, 0); 1276 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1277 genpd_unlock(genpd); 1278 1279 return 0; 1280 } 1281 1282 /** 1283 * genpd_runtime_resume - Resume a device belonging to I/O PM domain. 1284 * @dev: Device to resume. 1285 * 1286 * Carry out a runtime resume of a device under the assumption that its 1287 * pm_domain field points to the domain member of an object of type 1288 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 1289 */ 1290 static int genpd_runtime_resume(struct device *dev) 1291 { 1292 struct generic_pm_domain *genpd; 1293 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev); 1294 struct gpd_timing_data *td = gpd_data->td; 1295 bool timed = td && pm_runtime_enabled(dev); 1296 ktime_t time_start = 0; 1297 s64 elapsed_ns; 1298 int ret; 1299 1300 dev_dbg(dev, "%s()\n", __func__); 1301 1302 genpd = dev_to_genpd(dev); 1303 if (IS_ERR(genpd)) 1304 return -EINVAL; 1305 1306 /* 1307 * As we don't power off a non IRQ safe domain, which holds 1308 * an IRQ safe device, we don't need to restore power to it. 1309 */ 1310 if (irq_safe_dev_in_sleep_domain(dev, genpd)) 1311 goto out; 1312 1313 genpd_lock(genpd); 1314 genpd_restore_performance_state(dev, gpd_data->rpm_pstate); 1315 ret = genpd_power_on(genpd, 0); 1316 genpd_unlock(genpd); 1317 1318 if (ret) 1319 return ret; 1320 1321 out: 1322 /* Measure resume latency. */ 1323 if (timed) 1324 time_start = ktime_get(); 1325 1326 ret = genpd_start_dev(genpd, dev); 1327 if (ret) 1328 goto err_poweroff; 1329 1330 ret = __genpd_runtime_resume(dev); 1331 if (ret) 1332 goto err_stop; 1333 1334 /* Update resume latency value if the measured time exceeds it. */ 1335 if (timed) { 1336 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 1337 if (elapsed_ns > td->resume_latency_ns) { 1338 td->resume_latency_ns = elapsed_ns; 1339 dev_dbg(dev, "resume latency exceeded, %lld ns\n", 1340 elapsed_ns); 1341 genpd->gd->max_off_time_changed = true; 1342 td->constraint_changed = true; 1343 } 1344 } 1345 1346 return 0; 1347 1348 err_stop: 1349 genpd_stop_dev(genpd, dev); 1350 err_poweroff: 1351 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) { 1352 genpd_lock(genpd); 1353 genpd_power_off(genpd, true, 0); 1354 gpd_data->rpm_pstate = genpd_drop_performance_state(dev); 1355 genpd_unlock(genpd); 1356 } 1357 1358 return ret; 1359 } 1360 1361 static bool pd_ignore_unused; 1362 static int __init pd_ignore_unused_setup(char *__unused) 1363 { 1364 pd_ignore_unused = true; 1365 return 1; 1366 } 1367 __setup("pd_ignore_unused", pd_ignore_unused_setup); 1368 1369 /** 1370 * genpd_power_off_unused - Power off all PM domains with no devices in use. 1371 */ 1372 static int __init genpd_power_off_unused(void) 1373 { 1374 struct generic_pm_domain *genpd; 1375 1376 if (pd_ignore_unused) { 1377 pr_warn("genpd: Not disabling unused power domains\n"); 1378 return 0; 1379 } 1380 1381 pr_info("genpd: Disabling unused power domains\n"); 1382 mutex_lock(&gpd_list_lock); 1383 1384 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 1385 genpd_queue_power_off_work(genpd); 1386 } 1387 1388 mutex_unlock(&gpd_list_lock); 1389 1390 return 0; 1391 } 1392 late_initcall_sync(genpd_power_off_unused); 1393 1394 #ifdef CONFIG_PM_SLEEP 1395 1396 /** 1397 * genpd_sync_power_off - Synchronously power off a PM domain and its parents. 1398 * @genpd: PM domain to power off, if possible. 1399 * @use_lock: use the lock. 1400 * @depth: nesting count for lockdep. 1401 * 1402 * Check if the given PM domain can be powered off (during system suspend or 1403 * hibernation) and do that if so. Also, in that case propagate to its parents. 1404 * 1405 * This function is only called in "noirq" and "syscore" stages of system power 1406 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1407 * these cases the lock must be held. 1408 */ 1409 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, 1410 unsigned int depth) 1411 { 1412 struct gpd_link *link; 1413 1414 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) 1415 return; 1416 1417 if (genpd->suspended_count != genpd->device_count 1418 || atomic_read(&genpd->sd_count) > 0) 1419 return; 1420 1421 /* Check that the children are in their deepest (powered-off) state. */ 1422 list_for_each_entry(link, &genpd->parent_links, parent_node) { 1423 struct generic_pm_domain *child = link->child; 1424 if (child->state_idx < child->state_count - 1) 1425 return; 1426 } 1427 1428 /* Choose the deepest state when suspending */ 1429 genpd->state_idx = genpd->state_count - 1; 1430 if (_genpd_power_off(genpd, false)) { 1431 genpd->states[genpd->state_idx].rejected++; 1432 return; 1433 } else { 1434 genpd->states[genpd->state_idx].usage++; 1435 } 1436 1437 genpd->status = GENPD_STATE_OFF; 1438 1439 list_for_each_entry(link, &genpd->child_links, child_node) { 1440 genpd_sd_counter_dec(link->parent); 1441 1442 if (use_lock) 1443 genpd_lock_nested(link->parent, depth + 1); 1444 1445 genpd_sync_power_off(link->parent, use_lock, depth + 1); 1446 1447 if (use_lock) 1448 genpd_unlock(link->parent); 1449 } 1450 } 1451 1452 /** 1453 * genpd_sync_power_on - Synchronously power on a PM domain and its parents. 1454 * @genpd: PM domain to power on. 1455 * @use_lock: use the lock. 1456 * @depth: nesting count for lockdep. 1457 * 1458 * This function is only called in "noirq" and "syscore" stages of system power 1459 * transitions. The "noirq" callbacks may be executed asynchronously, thus in 1460 * these cases the lock must be held. 1461 */ 1462 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, 1463 unsigned int depth) 1464 { 1465 struct gpd_link *link; 1466 1467 if (genpd_status_on(genpd)) 1468 return; 1469 1470 list_for_each_entry(link, &genpd->child_links, child_node) { 1471 genpd_sd_counter_inc(link->parent); 1472 1473 if (use_lock) 1474 genpd_lock_nested(link->parent, depth + 1); 1475 1476 genpd_sync_power_on(link->parent, use_lock, depth + 1); 1477 1478 if (use_lock) 1479 genpd_unlock(link->parent); 1480 } 1481 1482 _genpd_power_on(genpd, false); 1483 genpd->status = GENPD_STATE_ON; 1484 } 1485 1486 /** 1487 * genpd_prepare - Start power transition of a device in a PM domain. 1488 * @dev: Device to start the transition of. 1489 * 1490 * Start a power transition of a device (during a system-wide power transition) 1491 * under the assumption that its pm_domain field points to the domain member of 1492 * an object of type struct generic_pm_domain representing a PM domain 1493 * consisting of I/O devices. 1494 */ 1495 static int genpd_prepare(struct device *dev) 1496 { 1497 struct generic_pm_domain *genpd; 1498 int ret; 1499 1500 dev_dbg(dev, "%s()\n", __func__); 1501 1502 genpd = dev_to_genpd(dev); 1503 if (IS_ERR(genpd)) 1504 return -EINVAL; 1505 1506 genpd_lock(genpd); 1507 genpd->prepared_count++; 1508 genpd_unlock(genpd); 1509 1510 ret = pm_generic_prepare(dev); 1511 if (ret < 0) { 1512 genpd_lock(genpd); 1513 1514 genpd->prepared_count--; 1515 1516 genpd_unlock(genpd); 1517 } 1518 1519 /* Never return 1, as genpd don't cope with the direct_complete path. */ 1520 return ret >= 0 ? 0 : ret; 1521 } 1522 1523 /** 1524 * genpd_finish_suspend - Completion of suspend or hibernation of device in an 1525 * I/O pm domain. 1526 * @dev: Device to suspend. 1527 * @suspend_noirq: Generic suspend_noirq callback. 1528 * @resume_noirq: Generic resume_noirq callback. 1529 * 1530 * Stop the device and remove power from the domain if all devices in it have 1531 * been stopped. 1532 */ 1533 static int genpd_finish_suspend(struct device *dev, 1534 int (*suspend_noirq)(struct device *dev), 1535 int (*resume_noirq)(struct device *dev)) 1536 { 1537 struct generic_pm_domain *genpd; 1538 int ret = 0; 1539 1540 genpd = dev_to_genpd(dev); 1541 if (IS_ERR(genpd)) 1542 return -EINVAL; 1543 1544 ret = suspend_noirq(dev); 1545 if (ret) 1546 return ret; 1547 1548 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd)) 1549 return 0; 1550 1551 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1552 !pm_runtime_status_suspended(dev)) { 1553 ret = genpd_stop_dev(genpd, dev); 1554 if (ret) { 1555 resume_noirq(dev); 1556 return ret; 1557 } 1558 } 1559 1560 genpd_lock(genpd); 1561 genpd->suspended_count++; 1562 genpd_sync_power_off(genpd, true, 0); 1563 genpd_unlock(genpd); 1564 1565 return 0; 1566 } 1567 1568 /** 1569 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1570 * @dev: Device to suspend. 1571 * 1572 * Stop the device and remove power from the domain if all devices in it have 1573 * been stopped. 1574 */ 1575 static int genpd_suspend_noirq(struct device *dev) 1576 { 1577 dev_dbg(dev, "%s()\n", __func__); 1578 1579 return genpd_finish_suspend(dev, 1580 pm_generic_suspend_noirq, 1581 pm_generic_resume_noirq); 1582 } 1583 1584 /** 1585 * genpd_finish_resume - Completion of resume of device in an I/O PM domain. 1586 * @dev: Device to resume. 1587 * @resume_noirq: Generic resume_noirq callback. 1588 * 1589 * Restore power to the device's PM domain, if necessary, and start the device. 1590 */ 1591 static int genpd_finish_resume(struct device *dev, 1592 int (*resume_noirq)(struct device *dev)) 1593 { 1594 struct generic_pm_domain *genpd; 1595 int ret; 1596 1597 dev_dbg(dev, "%s()\n", __func__); 1598 1599 genpd = dev_to_genpd(dev); 1600 if (IS_ERR(genpd)) 1601 return -EINVAL; 1602 1603 if (device_awake_path(dev) && genpd_is_active_wakeup(genpd)) 1604 return resume_noirq(dev); 1605 1606 genpd_lock(genpd); 1607 genpd_sync_power_on(genpd, true, 0); 1608 genpd->suspended_count--; 1609 genpd_unlock(genpd); 1610 1611 if (genpd->dev_ops.stop && genpd->dev_ops.start && 1612 !pm_runtime_status_suspended(dev)) { 1613 ret = genpd_start_dev(genpd, dev); 1614 if (ret) 1615 return ret; 1616 } 1617 1618 return pm_generic_resume_noirq(dev); 1619 } 1620 1621 /** 1622 * genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1623 * @dev: Device to resume. 1624 * 1625 * Restore power to the device's PM domain, if necessary, and start the device. 1626 */ 1627 static int genpd_resume_noirq(struct device *dev) 1628 { 1629 dev_dbg(dev, "%s()\n", __func__); 1630 1631 return genpd_finish_resume(dev, pm_generic_resume_noirq); 1632 } 1633 1634 /** 1635 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1636 * @dev: Device to freeze. 1637 * 1638 * Carry out a late freeze of a device under the assumption that its 1639 * pm_domain field points to the domain member of an object of type 1640 * struct generic_pm_domain representing a power domain consisting of I/O 1641 * devices. 1642 */ 1643 static int genpd_freeze_noirq(struct device *dev) 1644 { 1645 dev_dbg(dev, "%s()\n", __func__); 1646 1647 return genpd_finish_suspend(dev, 1648 pm_generic_freeze_noirq, 1649 pm_generic_thaw_noirq); 1650 } 1651 1652 /** 1653 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1654 * @dev: Device to thaw. 1655 * 1656 * Start the device, unless power has been removed from the domain already 1657 * before the system transition. 1658 */ 1659 static int genpd_thaw_noirq(struct device *dev) 1660 { 1661 dev_dbg(dev, "%s()\n", __func__); 1662 1663 return genpd_finish_resume(dev, pm_generic_thaw_noirq); 1664 } 1665 1666 /** 1667 * genpd_poweroff_noirq - Completion of hibernation of device in an 1668 * I/O PM domain. 1669 * @dev: Device to poweroff. 1670 * 1671 * Stop the device and remove power from the domain if all devices in it have 1672 * been stopped. 1673 */ 1674 static int genpd_poweroff_noirq(struct device *dev) 1675 { 1676 dev_dbg(dev, "%s()\n", __func__); 1677 1678 return genpd_finish_suspend(dev, 1679 pm_generic_poweroff_noirq, 1680 pm_generic_restore_noirq); 1681 } 1682 1683 /** 1684 * genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1685 * @dev: Device to resume. 1686 * 1687 * Make sure the domain will be in the same power state as before the 1688 * hibernation the system is resuming from and start the device if necessary. 1689 */ 1690 static int genpd_restore_noirq(struct device *dev) 1691 { 1692 dev_dbg(dev, "%s()\n", __func__); 1693 1694 return genpd_finish_resume(dev, pm_generic_restore_noirq); 1695 } 1696 1697 /** 1698 * genpd_complete - Complete power transition of a device in a power domain. 1699 * @dev: Device to complete the transition of. 1700 * 1701 * Complete a power transition of a device (during a system-wide power 1702 * transition) under the assumption that its pm_domain field points to the 1703 * domain member of an object of type struct generic_pm_domain representing 1704 * a power domain consisting of I/O devices. 1705 */ 1706 static void genpd_complete(struct device *dev) 1707 { 1708 struct generic_pm_domain *genpd; 1709 1710 dev_dbg(dev, "%s()\n", __func__); 1711 1712 genpd = dev_to_genpd(dev); 1713 if (IS_ERR(genpd)) 1714 return; 1715 1716 pm_generic_complete(dev); 1717 1718 genpd_lock(genpd); 1719 1720 genpd->prepared_count--; 1721 if (!genpd->prepared_count) 1722 genpd_queue_power_off_work(genpd); 1723 1724 genpd_unlock(genpd); 1725 } 1726 1727 static void genpd_switch_state(struct device *dev, bool suspend) 1728 { 1729 struct generic_pm_domain *genpd; 1730 bool use_lock; 1731 1732 genpd = dev_to_genpd_safe(dev); 1733 if (!genpd) 1734 return; 1735 1736 use_lock = genpd_is_irq_safe(genpd); 1737 1738 if (use_lock) 1739 genpd_lock(genpd); 1740 1741 if (suspend) { 1742 genpd->suspended_count++; 1743 genpd_sync_power_off(genpd, use_lock, 0); 1744 } else { 1745 genpd_sync_power_on(genpd, use_lock, 0); 1746 genpd->suspended_count--; 1747 } 1748 1749 if (use_lock) 1750 genpd_unlock(genpd); 1751 } 1752 1753 /** 1754 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev 1755 * @dev: The device that is attached to the genpd, that can be suspended. 1756 * 1757 * This routine should typically be called for a device that needs to be 1758 * suspended during the syscore suspend phase. It may also be called during 1759 * suspend-to-idle to suspend a corresponding CPU device that is attached to a 1760 * genpd. 1761 */ 1762 void dev_pm_genpd_suspend(struct device *dev) 1763 { 1764 genpd_switch_state(dev, true); 1765 } 1766 EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend); 1767 1768 /** 1769 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev 1770 * @dev: The device that is attached to the genpd, which needs to be resumed. 1771 * 1772 * This routine should typically be called for a device that needs to be resumed 1773 * during the syscore resume phase. It may also be called during suspend-to-idle 1774 * to resume a corresponding CPU device that is attached to a genpd. 1775 */ 1776 void dev_pm_genpd_resume(struct device *dev) 1777 { 1778 genpd_switch_state(dev, false); 1779 } 1780 EXPORT_SYMBOL_GPL(dev_pm_genpd_resume); 1781 1782 #else /* !CONFIG_PM_SLEEP */ 1783 1784 #define genpd_prepare NULL 1785 #define genpd_suspend_noirq NULL 1786 #define genpd_resume_noirq NULL 1787 #define genpd_freeze_noirq NULL 1788 #define genpd_thaw_noirq NULL 1789 #define genpd_poweroff_noirq NULL 1790 #define genpd_restore_noirq NULL 1791 #define genpd_complete NULL 1792 1793 #endif /* CONFIG_PM_SLEEP */ 1794 1795 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1796 bool has_governor) 1797 { 1798 struct generic_pm_domain_data *gpd_data; 1799 struct gpd_timing_data *td; 1800 int ret; 1801 1802 ret = dev_pm_get_subsys_data(dev); 1803 if (ret) 1804 return ERR_PTR(ret); 1805 1806 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1807 if (!gpd_data) { 1808 ret = -ENOMEM; 1809 goto err_put; 1810 } 1811 1812 gpd_data->base.dev = dev; 1813 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1814 1815 /* Allocate data used by a governor. */ 1816 if (has_governor) { 1817 td = kzalloc(sizeof(*td), GFP_KERNEL); 1818 if (!td) { 1819 ret = -ENOMEM; 1820 goto err_free; 1821 } 1822 1823 td->constraint_changed = true; 1824 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; 1825 td->next_wakeup = KTIME_MAX; 1826 gpd_data->td = td; 1827 } 1828 1829 spin_lock_irq(&dev->power.lock); 1830 1831 if (dev->power.subsys_data->domain_data) 1832 ret = -EINVAL; 1833 else 1834 dev->power.subsys_data->domain_data = &gpd_data->base; 1835 1836 spin_unlock_irq(&dev->power.lock); 1837 1838 if (ret) 1839 goto err_free; 1840 1841 return gpd_data; 1842 1843 err_free: 1844 kfree(gpd_data->td); 1845 kfree(gpd_data); 1846 err_put: 1847 dev_pm_put_subsys_data(dev); 1848 return ERR_PTR(ret); 1849 } 1850 1851 static void genpd_free_dev_data(struct device *dev, 1852 struct generic_pm_domain_data *gpd_data) 1853 { 1854 spin_lock_irq(&dev->power.lock); 1855 1856 dev->power.subsys_data->domain_data = NULL; 1857 1858 spin_unlock_irq(&dev->power.lock); 1859 1860 dev_pm_opp_clear_config(gpd_data->opp_token); 1861 kfree(gpd_data->td); 1862 kfree(gpd_data); 1863 dev_pm_put_subsys_data(dev); 1864 } 1865 1866 static void genpd_update_cpumask(struct generic_pm_domain *genpd, 1867 int cpu, bool set, unsigned int depth) 1868 { 1869 struct gpd_link *link; 1870 1871 if (!genpd_is_cpu_domain(genpd)) 1872 return; 1873 1874 list_for_each_entry(link, &genpd->child_links, child_node) { 1875 struct generic_pm_domain *parent = link->parent; 1876 1877 genpd_lock_nested(parent, depth + 1); 1878 genpd_update_cpumask(parent, cpu, set, depth + 1); 1879 genpd_unlock(parent); 1880 } 1881 1882 if (set) 1883 cpumask_set_cpu(cpu, genpd->cpus); 1884 else 1885 cpumask_clear_cpu(cpu, genpd->cpus); 1886 } 1887 1888 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) 1889 { 1890 if (cpu >= 0) 1891 genpd_update_cpumask(genpd, cpu, true, 0); 1892 } 1893 1894 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) 1895 { 1896 if (cpu >= 0) 1897 genpd_update_cpumask(genpd, cpu, false, 0); 1898 } 1899 1900 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) 1901 { 1902 int cpu; 1903 1904 if (!genpd_is_cpu_domain(genpd)) 1905 return -1; 1906 1907 for_each_possible_cpu(cpu) { 1908 if (get_cpu_device(cpu) == dev) 1909 return cpu; 1910 } 1911 1912 return -1; 1913 } 1914 1915 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1916 struct device *base_dev) 1917 { 1918 struct genpd_governor_data *gd = genpd->gd; 1919 struct generic_pm_domain_data *gpd_data; 1920 int ret; 1921 1922 dev_dbg(dev, "%s()\n", __func__); 1923 1924 gpd_data = genpd_alloc_dev_data(dev, gd); 1925 if (IS_ERR(gpd_data)) 1926 return PTR_ERR(gpd_data); 1927 1928 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); 1929 1930 gpd_data->hw_mode = genpd->get_hwmode_dev ? genpd->get_hwmode_dev(genpd, dev) : false; 1931 1932 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1933 if (ret) 1934 goto out; 1935 1936 genpd_lock(genpd); 1937 1938 genpd_set_cpumask(genpd, gpd_data->cpu); 1939 1940 genpd->device_count++; 1941 if (gd) 1942 gd->max_off_time_changed = true; 1943 1944 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1945 1946 genpd_unlock(genpd); 1947 dev_pm_domain_set(dev, &genpd->domain); 1948 out: 1949 if (ret) 1950 genpd_free_dev_data(dev, gpd_data); 1951 else 1952 dev_pm_qos_add_notifier(dev, &gpd_data->nb, 1953 DEV_PM_QOS_RESUME_LATENCY); 1954 1955 return ret; 1956 } 1957 1958 /** 1959 * pm_genpd_add_device - Add a device to an I/O PM domain. 1960 * @genpd: PM domain to add the device to. 1961 * @dev: Device to be added. 1962 */ 1963 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) 1964 { 1965 int ret; 1966 1967 if (!genpd || !dev) 1968 return -EINVAL; 1969 1970 mutex_lock(&gpd_list_lock); 1971 ret = genpd_add_device(genpd, dev, dev); 1972 mutex_unlock(&gpd_list_lock); 1973 1974 return ret; 1975 } 1976 EXPORT_SYMBOL_GPL(pm_genpd_add_device); 1977 1978 static int genpd_remove_device(struct generic_pm_domain *genpd, 1979 struct device *dev) 1980 { 1981 struct generic_pm_domain_data *gpd_data; 1982 struct pm_domain_data *pdd; 1983 int ret = 0; 1984 1985 dev_dbg(dev, "%s()\n", __func__); 1986 1987 pdd = dev->power.subsys_data->domain_data; 1988 gpd_data = to_gpd_data(pdd); 1989 dev_pm_qos_remove_notifier(dev, &gpd_data->nb, 1990 DEV_PM_QOS_RESUME_LATENCY); 1991 1992 genpd_lock(genpd); 1993 1994 if (genpd->prepared_count > 0) { 1995 ret = -EAGAIN; 1996 goto out; 1997 } 1998 1999 genpd->device_count--; 2000 if (genpd->gd) 2001 genpd->gd->max_off_time_changed = true; 2002 2003 genpd_clear_cpumask(genpd, gpd_data->cpu); 2004 2005 list_del_init(&pdd->list_node); 2006 2007 genpd_unlock(genpd); 2008 2009 dev_pm_domain_set(dev, NULL); 2010 2011 if (genpd->detach_dev) 2012 genpd->detach_dev(genpd, dev); 2013 2014 genpd_free_dev_data(dev, gpd_data); 2015 2016 return 0; 2017 2018 out: 2019 genpd_unlock(genpd); 2020 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); 2021 2022 return ret; 2023 } 2024 2025 /** 2026 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 2027 * @dev: Device to be removed. 2028 */ 2029 int pm_genpd_remove_device(struct device *dev) 2030 { 2031 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); 2032 2033 if (!genpd) 2034 return -EINVAL; 2035 2036 return genpd_remove_device(genpd, dev); 2037 } 2038 EXPORT_SYMBOL_GPL(pm_genpd_remove_device); 2039 2040 /** 2041 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev 2042 * 2043 * @dev: Device that should be associated with the notifier 2044 * @nb: The notifier block to register 2045 * 2046 * Users may call this function to add a genpd power on/off notifier for an 2047 * attached @dev. Only one notifier per device is allowed. The notifier is 2048 * sent when genpd is powering on/off the PM domain. 2049 * 2050 * It is assumed that the user guarantee that the genpd wouldn't be detached 2051 * while this routine is getting called. 2052 * 2053 * Returns 0 on success and negative error values on failures. 2054 */ 2055 int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) 2056 { 2057 struct generic_pm_domain *genpd; 2058 struct generic_pm_domain_data *gpd_data; 2059 int ret; 2060 2061 genpd = dev_to_genpd_safe(dev); 2062 if (!genpd) 2063 return -ENODEV; 2064 2065 if (WARN_ON(!dev->power.subsys_data || 2066 !dev->power.subsys_data->domain_data)) 2067 return -EINVAL; 2068 2069 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 2070 if (gpd_data->power_nb) 2071 return -EEXIST; 2072 2073 genpd_lock(genpd); 2074 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); 2075 genpd_unlock(genpd); 2076 2077 if (ret) { 2078 dev_warn(dev, "failed to add notifier for PM domain %s\n", 2079 dev_name(&genpd->dev)); 2080 return ret; 2081 } 2082 2083 gpd_data->power_nb = nb; 2084 return 0; 2085 } 2086 EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); 2087 2088 /** 2089 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev 2090 * 2091 * @dev: Device that is associated with the notifier 2092 * 2093 * Users may call this function to remove a genpd power on/off notifier for an 2094 * attached @dev. 2095 * 2096 * It is assumed that the user guarantee that the genpd wouldn't be detached 2097 * while this routine is getting called. 2098 * 2099 * Returns 0 on success and negative error values on failures. 2100 */ 2101 int dev_pm_genpd_remove_notifier(struct device *dev) 2102 { 2103 struct generic_pm_domain *genpd; 2104 struct generic_pm_domain_data *gpd_data; 2105 int ret; 2106 2107 genpd = dev_to_genpd_safe(dev); 2108 if (!genpd) 2109 return -ENODEV; 2110 2111 if (WARN_ON(!dev->power.subsys_data || 2112 !dev->power.subsys_data->domain_data)) 2113 return -EINVAL; 2114 2115 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 2116 if (!gpd_data->power_nb) 2117 return -ENODEV; 2118 2119 genpd_lock(genpd); 2120 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, 2121 gpd_data->power_nb); 2122 genpd_unlock(genpd); 2123 2124 if (ret) { 2125 dev_warn(dev, "failed to remove notifier for PM domain %s\n", 2126 dev_name(&genpd->dev)); 2127 return ret; 2128 } 2129 2130 gpd_data->power_nb = NULL; 2131 return 0; 2132 } 2133 EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); 2134 2135 static int genpd_add_subdomain(struct generic_pm_domain *genpd, 2136 struct generic_pm_domain *subdomain) 2137 { 2138 struct gpd_link *link, *itr; 2139 int ret = 0; 2140 2141 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 2142 || genpd == subdomain) 2143 return -EINVAL; 2144 2145 /* 2146 * If the domain can be powered on/off in an IRQ safe 2147 * context, ensure that the subdomain can also be 2148 * powered on/off in that context. 2149 */ 2150 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { 2151 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", 2152 dev_name(&genpd->dev), subdomain->name); 2153 return -EINVAL; 2154 } 2155 2156 link = kzalloc(sizeof(*link), GFP_KERNEL); 2157 if (!link) 2158 return -ENOMEM; 2159 2160 genpd_lock(subdomain); 2161 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 2162 2163 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { 2164 ret = -EINVAL; 2165 goto out; 2166 } 2167 2168 list_for_each_entry(itr, &genpd->parent_links, parent_node) { 2169 if (itr->child == subdomain && itr->parent == genpd) { 2170 ret = -EINVAL; 2171 goto out; 2172 } 2173 } 2174 2175 link->parent = genpd; 2176 list_add_tail(&link->parent_node, &genpd->parent_links); 2177 link->child = subdomain; 2178 list_add_tail(&link->child_node, &subdomain->child_links); 2179 if (genpd_status_on(subdomain)) 2180 genpd_sd_counter_inc(genpd); 2181 2182 out: 2183 genpd_unlock(genpd); 2184 genpd_unlock(subdomain); 2185 if (ret) 2186 kfree(link); 2187 return ret; 2188 } 2189 2190 /** 2191 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2192 * @genpd: Leader PM domain to add the subdomain to. 2193 * @subdomain: Subdomain to be added. 2194 */ 2195 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 2196 struct generic_pm_domain *subdomain) 2197 { 2198 int ret; 2199 2200 mutex_lock(&gpd_list_lock); 2201 ret = genpd_add_subdomain(genpd, subdomain); 2202 mutex_unlock(&gpd_list_lock); 2203 2204 return ret; 2205 } 2206 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); 2207 2208 /** 2209 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2210 * @genpd: Leader PM domain to remove the subdomain from. 2211 * @subdomain: Subdomain to be removed. 2212 */ 2213 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 2214 struct generic_pm_domain *subdomain) 2215 { 2216 struct gpd_link *l, *link; 2217 int ret = -EINVAL; 2218 2219 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 2220 return -EINVAL; 2221 2222 genpd_lock(subdomain); 2223 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); 2224 2225 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { 2226 pr_warn("%s: unable to remove subdomain %s\n", 2227 dev_name(&genpd->dev), subdomain->name); 2228 ret = -EBUSY; 2229 goto out; 2230 } 2231 2232 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { 2233 if (link->child != subdomain) 2234 continue; 2235 2236 list_del(&link->parent_node); 2237 list_del(&link->child_node); 2238 kfree(link); 2239 if (genpd_status_on(subdomain)) 2240 genpd_sd_counter_dec(genpd); 2241 2242 ret = 0; 2243 break; 2244 } 2245 2246 out: 2247 genpd_unlock(genpd); 2248 genpd_unlock(subdomain); 2249 2250 return ret; 2251 } 2252 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); 2253 2254 static void genpd_free_default_power_state(struct genpd_power_state *states, 2255 unsigned int state_count) 2256 { 2257 kfree(states); 2258 } 2259 2260 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) 2261 { 2262 struct genpd_power_state *state; 2263 2264 state = kzalloc(sizeof(*state), GFP_KERNEL); 2265 if (!state) 2266 return -ENOMEM; 2267 2268 genpd->states = state; 2269 genpd->state_count = 1; 2270 genpd->free_states = genpd_free_default_power_state; 2271 2272 return 0; 2273 } 2274 2275 static void genpd_provider_release(struct device *dev) 2276 { 2277 /* nothing to be done here */ 2278 } 2279 2280 static int genpd_alloc_data(struct generic_pm_domain *genpd) 2281 { 2282 struct genpd_governor_data *gd = NULL; 2283 int ret; 2284 2285 if (genpd_is_cpu_domain(genpd) && 2286 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) 2287 return -ENOMEM; 2288 2289 if (genpd->gov) { 2290 gd = kzalloc(sizeof(*gd), GFP_KERNEL); 2291 if (!gd) { 2292 ret = -ENOMEM; 2293 goto free; 2294 } 2295 2296 gd->max_off_time_ns = -1; 2297 gd->max_off_time_changed = true; 2298 gd->next_wakeup = KTIME_MAX; 2299 gd->next_hrtimer = KTIME_MAX; 2300 } 2301 2302 /* Use only one "off" state if there were no states declared */ 2303 if (genpd->state_count == 0) { 2304 ret = genpd_set_default_power_state(genpd); 2305 if (ret) 2306 goto free; 2307 } 2308 2309 genpd->gd = gd; 2310 device_initialize(&genpd->dev); 2311 genpd->dev.release = genpd_provider_release; 2312 genpd->dev.bus = &genpd_provider_bus_type; 2313 genpd->dev.parent = &genpd_provider_bus; 2314 2315 if (!genpd_is_dev_name_fw(genpd)) { 2316 dev_set_name(&genpd->dev, "%s", genpd->name); 2317 } else { 2318 ret = ida_alloc(&genpd_ida, GFP_KERNEL); 2319 if (ret < 0) 2320 goto put; 2321 2322 genpd->device_id = ret; 2323 dev_set_name(&genpd->dev, "%s_%u", genpd->name, genpd->device_id); 2324 } 2325 2326 return 0; 2327 put: 2328 put_device(&genpd->dev); 2329 if (genpd->free_states == genpd_free_default_power_state) { 2330 kfree(genpd->states); 2331 genpd->states = NULL; 2332 } 2333 free: 2334 if (genpd_is_cpu_domain(genpd)) 2335 free_cpumask_var(genpd->cpus); 2336 kfree(gd); 2337 return ret; 2338 } 2339 2340 static void genpd_free_data(struct generic_pm_domain *genpd) 2341 { 2342 put_device(&genpd->dev); 2343 if (genpd->device_id != -ENXIO) 2344 ida_free(&genpd_ida, genpd->device_id); 2345 if (genpd_is_cpu_domain(genpd)) 2346 free_cpumask_var(genpd->cpus); 2347 if (genpd->free_states) 2348 genpd->free_states(genpd->states, genpd->state_count); 2349 kfree(genpd->gd); 2350 } 2351 2352 static void genpd_lock_init(struct generic_pm_domain *genpd) 2353 { 2354 if (genpd_is_cpu_domain(genpd)) { 2355 raw_spin_lock_init(&genpd->raw_slock); 2356 genpd->lock_ops = &genpd_raw_spin_ops; 2357 } else if (genpd_is_irq_safe(genpd)) { 2358 spin_lock_init(&genpd->slock); 2359 genpd->lock_ops = &genpd_spin_ops; 2360 } else { 2361 mutex_init(&genpd->mlock); 2362 genpd->lock_ops = &genpd_mtx_ops; 2363 } 2364 } 2365 2366 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2367 static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off) 2368 { 2369 genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off; 2370 } 2371 #else 2372 static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off) 2373 { 2374 genpd->stay_on = false; 2375 } 2376 #endif 2377 2378 /** 2379 * pm_genpd_init - Initialize a generic I/O PM domain object. 2380 * @genpd: PM domain object to initialize. 2381 * @gov: PM domain governor to associate with the domain (may be NULL). 2382 * @is_off: Initial value of the domain's power_is_off field. 2383 * 2384 * Returns 0 on successful initialization, else a negative error code. 2385 */ 2386 int pm_genpd_init(struct generic_pm_domain *genpd, 2387 struct dev_power_governor *gov, bool is_off) 2388 { 2389 int ret; 2390 2391 if (IS_ERR_OR_NULL(genpd)) 2392 return -EINVAL; 2393 2394 INIT_LIST_HEAD(&genpd->parent_links); 2395 INIT_LIST_HEAD(&genpd->child_links); 2396 INIT_LIST_HEAD(&genpd->dev_list); 2397 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); 2398 genpd_lock_init(genpd); 2399 genpd->gov = gov; 2400 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2401 atomic_set(&genpd->sd_count, 0); 2402 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; 2403 genpd_set_stay_on(genpd, is_off); 2404 genpd->sync_state = GENPD_SYNC_STATE_OFF; 2405 genpd->device_count = 0; 2406 genpd->provider = NULL; 2407 genpd->device_id = -ENXIO; 2408 genpd->has_provider = false; 2409 genpd->opp_table = NULL; 2410 genpd->accounting_time = ktime_get_mono_fast_ns(); 2411 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; 2412 genpd->domain.ops.runtime_resume = genpd_runtime_resume; 2413 genpd->domain.ops.prepare = genpd_prepare; 2414 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; 2415 genpd->domain.ops.resume_noirq = genpd_resume_noirq; 2416 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; 2417 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; 2418 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; 2419 genpd->domain.ops.restore_noirq = genpd_restore_noirq; 2420 genpd->domain.ops.complete = genpd_complete; 2421 genpd->domain.start = genpd_dev_pm_start; 2422 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state; 2423 2424 if (genpd->flags & GENPD_FLAG_PM_CLK) { 2425 genpd->dev_ops.stop = pm_clk_suspend; 2426 genpd->dev_ops.start = pm_clk_resume; 2427 } 2428 2429 /* The always-on governor works better with the corresponding flag. */ 2430 if (gov == &pm_domain_always_on_gov) 2431 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON; 2432 2433 /* Always-on domains must be powered on at initialization. */ 2434 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && 2435 !genpd_status_on(genpd)) { 2436 pr_err("always-on PM domain %s is not on\n", genpd->name); 2437 return -EINVAL; 2438 } 2439 2440 /* Multiple states but no governor doesn't make sense. */ 2441 if (!gov && genpd->state_count > 1) 2442 pr_warn("%s: no governor for states\n", genpd->name); 2443 2444 ret = genpd_alloc_data(genpd); 2445 if (ret) 2446 return ret; 2447 2448 mutex_lock(&gpd_list_lock); 2449 list_add(&genpd->gpd_list_node, &gpd_list); 2450 mutex_unlock(&gpd_list_lock); 2451 genpd_debug_add(genpd); 2452 2453 return 0; 2454 } 2455 EXPORT_SYMBOL_GPL(pm_genpd_init); 2456 2457 static int genpd_remove(struct generic_pm_domain *genpd) 2458 { 2459 struct gpd_link *l, *link; 2460 2461 if (IS_ERR_OR_NULL(genpd)) 2462 return -EINVAL; 2463 2464 genpd_lock(genpd); 2465 2466 if (genpd->has_provider) { 2467 genpd_unlock(genpd); 2468 pr_err("Provider present, unable to remove %s\n", dev_name(&genpd->dev)); 2469 return -EBUSY; 2470 } 2471 2472 if (!list_empty(&genpd->parent_links) || genpd->device_count) { 2473 genpd_unlock(genpd); 2474 pr_err("%s: unable to remove %s\n", __func__, dev_name(&genpd->dev)); 2475 return -EBUSY; 2476 } 2477 2478 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { 2479 list_del(&link->parent_node); 2480 list_del(&link->child_node); 2481 kfree(link); 2482 } 2483 2484 list_del(&genpd->gpd_list_node); 2485 genpd_unlock(genpd); 2486 genpd_debug_remove(genpd); 2487 cancel_work_sync(&genpd->power_off_work); 2488 genpd_free_data(genpd); 2489 2490 pr_debug("%s: removed %s\n", __func__, dev_name(&genpd->dev)); 2491 2492 return 0; 2493 } 2494 2495 /** 2496 * pm_genpd_remove - Remove a generic I/O PM domain 2497 * @genpd: Pointer to PM domain that is to be removed. 2498 * 2499 * To remove the PM domain, this function: 2500 * - Removes the PM domain as a subdomain to any parent domains, 2501 * if it was added. 2502 * - Removes the PM domain from the list of registered PM domains. 2503 * 2504 * The PM domain will only be removed, if the associated provider has 2505 * been removed, it is not a parent to any other PM domain and has no 2506 * devices associated with it. 2507 */ 2508 int pm_genpd_remove(struct generic_pm_domain *genpd) 2509 { 2510 int ret; 2511 2512 mutex_lock(&gpd_list_lock); 2513 ret = genpd_remove(genpd); 2514 mutex_unlock(&gpd_list_lock); 2515 2516 return ret; 2517 } 2518 EXPORT_SYMBOL_GPL(pm_genpd_remove); 2519 2520 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF 2521 2522 /* 2523 * Device Tree based PM domain providers. 2524 * 2525 * The code below implements generic device tree based PM domain providers that 2526 * bind device tree nodes with generic PM domains registered in the system. 2527 * 2528 * Any driver that registers generic PM domains and needs to support binding of 2529 * devices to these domains is supposed to register a PM domain provider, which 2530 * maps a PM domain specifier retrieved from the device tree to a PM domain. 2531 * 2532 * Two simple mapping functions have been provided for convenience: 2533 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. 2534 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by 2535 * index. 2536 */ 2537 2538 /** 2539 * struct of_genpd_provider - PM domain provider registration structure 2540 * @link: Entry in global list of PM domain providers 2541 * @node: Pointer to device tree node of PM domain provider 2542 * @xlate: Provider-specific xlate callback mapping a set of specifier cells 2543 * into a PM domain. 2544 * @data: context pointer to be passed into @xlate callback 2545 */ 2546 struct of_genpd_provider { 2547 struct list_head link; 2548 struct device_node *node; 2549 genpd_xlate_t xlate; 2550 void *data; 2551 }; 2552 2553 /* List of registered PM domain providers. */ 2554 static LIST_HEAD(of_genpd_providers); 2555 /* Mutex to protect the list above. */ 2556 static DEFINE_MUTEX(of_genpd_mutex); 2557 /* Used to prevent registering devices before the bus. */ 2558 static bool genpd_bus_registered; 2559 2560 /** 2561 * genpd_xlate_simple() - Xlate function for direct node-domain mapping 2562 * @genpdspec: OF phandle args to map into a PM domain 2563 * @data: xlate function private data - pointer to struct generic_pm_domain 2564 * 2565 * This is a generic xlate function that can be used to model PM domains that 2566 * have their own device tree nodes. The private data of xlate function needs 2567 * to be a valid pointer to struct generic_pm_domain. 2568 */ 2569 static struct generic_pm_domain *genpd_xlate_simple( 2570 const struct of_phandle_args *genpdspec, 2571 void *data) 2572 { 2573 return data; 2574 } 2575 2576 /** 2577 * genpd_xlate_onecell() - Xlate function using a single index. 2578 * @genpdspec: OF phandle args to map into a PM domain 2579 * @data: xlate function private data - pointer to struct genpd_onecell_data 2580 * 2581 * This is a generic xlate function that can be used to model simple PM domain 2582 * controllers that have one device tree node and provide multiple PM domains. 2583 * A single cell is used as an index into an array of PM domains specified in 2584 * the genpd_onecell_data struct when registering the provider. 2585 */ 2586 static struct generic_pm_domain *genpd_xlate_onecell( 2587 const struct of_phandle_args *genpdspec, 2588 void *data) 2589 { 2590 struct genpd_onecell_data *genpd_data = data; 2591 unsigned int idx = genpdspec->args[0]; 2592 2593 if (genpdspec->args_count != 1) 2594 return ERR_PTR(-EINVAL); 2595 2596 if (idx >= genpd_data->num_domains) { 2597 pr_err("%s: invalid domain index %u\n", __func__, idx); 2598 return ERR_PTR(-EINVAL); 2599 } 2600 2601 if (!genpd_data->domains[idx]) 2602 return ERR_PTR(-ENOENT); 2603 2604 return genpd_data->domains[idx]; 2605 } 2606 2607 /** 2608 * genpd_add_provider() - Register a PM domain provider for a node 2609 * @np: Device node pointer associated with the PM domain provider. 2610 * @xlate: Callback for decoding PM domain from phandle arguments. 2611 * @data: Context pointer for @xlate callback. 2612 */ 2613 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 2614 void *data) 2615 { 2616 struct of_genpd_provider *cp; 2617 2618 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2619 if (!cp) 2620 return -ENOMEM; 2621 2622 cp->node = of_node_get(np); 2623 cp->data = data; 2624 cp->xlate = xlate; 2625 fwnode_dev_initialized(of_fwnode_handle(np), true); 2626 2627 mutex_lock(&of_genpd_mutex); 2628 list_add(&cp->link, &of_genpd_providers); 2629 mutex_unlock(&of_genpd_mutex); 2630 pr_debug("Added domain provider from %pOF\n", np); 2631 2632 return 0; 2633 } 2634 2635 static bool genpd_present(const struct generic_pm_domain *genpd) 2636 { 2637 bool ret = false; 2638 const struct generic_pm_domain *gpd; 2639 2640 mutex_lock(&gpd_list_lock); 2641 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2642 if (gpd == genpd) { 2643 ret = true; 2644 break; 2645 } 2646 } 2647 mutex_unlock(&gpd_list_lock); 2648 2649 return ret; 2650 } 2651 2652 static void genpd_sync_state(struct device *dev) 2653 { 2654 return of_genpd_sync_state(dev->of_node); 2655 } 2656 2657 /** 2658 * of_genpd_add_provider_simple() - Register a simple PM domain provider 2659 * @np: Device node pointer associated with the PM domain provider. 2660 * @genpd: Pointer to PM domain associated with the PM domain provider. 2661 */ 2662 int of_genpd_add_provider_simple(struct device_node *np, 2663 struct generic_pm_domain *genpd) 2664 { 2665 struct fwnode_handle *fwnode; 2666 struct device *dev; 2667 int ret; 2668 2669 if (!np || !genpd) 2670 return -EINVAL; 2671 2672 if (!genpd_bus_registered) 2673 return -ENODEV; 2674 2675 if (!genpd_present(genpd)) 2676 return -EINVAL; 2677 2678 genpd->dev.of_node = np; 2679 2680 fwnode = of_fwnode_handle(np); 2681 dev = get_dev_from_fwnode(fwnode); 2682 if (!dev && !genpd_is_no_sync_state(genpd)) { 2683 genpd->sync_state = GENPD_SYNC_STATE_SIMPLE; 2684 device_set_node(&genpd->dev, fwnode); 2685 } else { 2686 dev_set_drv_sync_state(dev, genpd_sync_state); 2687 } 2688 2689 put_device(dev); 2690 2691 ret = device_add(&genpd->dev); 2692 if (ret) 2693 return ret; 2694 2695 /* Parse genpd OPP table */ 2696 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2697 ret = dev_pm_opp_of_add_table(&genpd->dev); 2698 if (ret) { 2699 dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n"); 2700 goto err_del; 2701 } 2702 2703 /* 2704 * Save table for faster processing while setting performance 2705 * state. 2706 */ 2707 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2708 WARN_ON(IS_ERR(genpd->opp_table)); 2709 } 2710 2711 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 2712 if (ret) 2713 goto err_opp; 2714 2715 genpd->provider = fwnode; 2716 genpd->has_provider = true; 2717 2718 return 0; 2719 2720 err_opp: 2721 if (genpd->opp_table) { 2722 dev_pm_opp_put_opp_table(genpd->opp_table); 2723 dev_pm_opp_of_remove_table(&genpd->dev); 2724 } 2725 err_del: 2726 device_del(&genpd->dev); 2727 return ret; 2728 } 2729 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); 2730 2731 /** 2732 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider 2733 * @np: Device node pointer associated with the PM domain provider. 2734 * @data: Pointer to the data associated with the PM domain provider. 2735 */ 2736 int of_genpd_add_provider_onecell(struct device_node *np, 2737 struct genpd_onecell_data *data) 2738 { 2739 struct generic_pm_domain *genpd; 2740 struct fwnode_handle *fwnode; 2741 struct device *dev; 2742 unsigned int i; 2743 int ret = -EINVAL; 2744 bool sync_state = false; 2745 2746 if (!np || !data) 2747 return -EINVAL; 2748 2749 if (!genpd_bus_registered) 2750 return -ENODEV; 2751 2752 if (!data->xlate) 2753 data->xlate = genpd_xlate_onecell; 2754 2755 fwnode = of_fwnode_handle(np); 2756 dev = get_dev_from_fwnode(fwnode); 2757 if (!dev) 2758 sync_state = true; 2759 else 2760 dev_set_drv_sync_state(dev, genpd_sync_state); 2761 2762 put_device(dev); 2763 2764 for (i = 0; i < data->num_domains; i++) { 2765 genpd = data->domains[i]; 2766 2767 if (!genpd) 2768 continue; 2769 if (!genpd_present(genpd)) 2770 goto error; 2771 2772 genpd->dev.of_node = np; 2773 2774 if (sync_state && !genpd_is_no_sync_state(genpd)) { 2775 genpd->sync_state = GENPD_SYNC_STATE_ONECELL; 2776 device_set_node(&genpd->dev, fwnode); 2777 sync_state = false; 2778 } 2779 2780 ret = device_add(&genpd->dev); 2781 if (ret) 2782 goto error; 2783 2784 /* Parse genpd OPP table */ 2785 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) { 2786 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); 2787 if (ret) { 2788 dev_err_probe(&genpd->dev, ret, 2789 "Failed to add OPP table for index %d\n", i); 2790 device_del(&genpd->dev); 2791 goto error; 2792 } 2793 2794 /* 2795 * Save table for faster processing while setting 2796 * performance state. 2797 */ 2798 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); 2799 WARN_ON(IS_ERR(genpd->opp_table)); 2800 } 2801 2802 genpd->provider = fwnode; 2803 genpd->has_provider = true; 2804 } 2805 2806 ret = genpd_add_provider(np, data->xlate, data); 2807 if (ret < 0) 2808 goto error; 2809 2810 return 0; 2811 2812 error: 2813 while (i--) { 2814 genpd = data->domains[i]; 2815 2816 if (!genpd) 2817 continue; 2818 2819 genpd->provider = NULL; 2820 genpd->has_provider = false; 2821 2822 if (genpd->opp_table) { 2823 dev_pm_opp_put_opp_table(genpd->opp_table); 2824 dev_pm_opp_of_remove_table(&genpd->dev); 2825 } 2826 2827 device_del(&genpd->dev); 2828 } 2829 2830 return ret; 2831 } 2832 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); 2833 2834 /** 2835 * of_genpd_del_provider() - Remove a previously registered PM domain provider 2836 * @np: Device node pointer associated with the PM domain provider 2837 */ 2838 void of_genpd_del_provider(struct device_node *np) 2839 { 2840 struct of_genpd_provider *cp, *tmp; 2841 struct generic_pm_domain *gpd; 2842 2843 mutex_lock(&gpd_list_lock); 2844 mutex_lock(&of_genpd_mutex); 2845 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { 2846 if (cp->node == np) { 2847 /* 2848 * For each PM domain associated with the 2849 * provider, set the 'has_provider' to false 2850 * so that the PM domain can be safely removed. 2851 */ 2852 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 2853 if (gpd->provider == of_fwnode_handle(np)) { 2854 gpd->has_provider = false; 2855 2856 if (gpd->opp_table) { 2857 dev_pm_opp_put_opp_table(gpd->opp_table); 2858 dev_pm_opp_of_remove_table(&gpd->dev); 2859 } 2860 2861 device_del(&gpd->dev); 2862 } 2863 } 2864 2865 fwnode_dev_initialized(of_fwnode_handle(cp->node), false); 2866 list_del(&cp->link); 2867 of_node_put(cp->node); 2868 kfree(cp); 2869 break; 2870 } 2871 } 2872 mutex_unlock(&of_genpd_mutex); 2873 mutex_unlock(&gpd_list_lock); 2874 } 2875 EXPORT_SYMBOL_GPL(of_genpd_del_provider); 2876 2877 /** 2878 * genpd_get_from_provider() - Look-up PM domain 2879 * @genpdspec: OF phandle args to use for look-up 2880 * 2881 * Looks for a PM domain provider under the node specified by @genpdspec and if 2882 * found, uses xlate function of the provider to map phandle args to a PM 2883 * domain. 2884 * 2885 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2886 * on failure. 2887 */ 2888 static struct generic_pm_domain *genpd_get_from_provider( 2889 const struct of_phandle_args *genpdspec) 2890 { 2891 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2892 struct of_genpd_provider *provider; 2893 2894 if (!genpdspec) 2895 return ERR_PTR(-EINVAL); 2896 2897 mutex_lock(&of_genpd_mutex); 2898 2899 /* Check if we have such a provider in our array */ 2900 list_for_each_entry(provider, &of_genpd_providers, link) { 2901 if (provider->node == genpdspec->np) 2902 genpd = provider->xlate(genpdspec, provider->data); 2903 if (!IS_ERR(genpd)) 2904 break; 2905 } 2906 2907 mutex_unlock(&of_genpd_mutex); 2908 2909 return genpd; 2910 } 2911 2912 /** 2913 * of_genpd_add_device() - Add a device to an I/O PM domain 2914 * @genpdspec: OF phandle args to use for look-up PM domain 2915 * @dev: Device to be added. 2916 * 2917 * Looks-up an I/O PM domain based upon phandle args provided and adds 2918 * the device to the PM domain. Returns a negative error code on failure. 2919 */ 2920 int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev) 2921 { 2922 struct generic_pm_domain *genpd; 2923 int ret; 2924 2925 if (!dev) 2926 return -EINVAL; 2927 2928 mutex_lock(&gpd_list_lock); 2929 2930 genpd = genpd_get_from_provider(genpdspec); 2931 if (IS_ERR(genpd)) { 2932 ret = PTR_ERR(genpd); 2933 goto out; 2934 } 2935 2936 ret = genpd_add_device(genpd, dev, dev); 2937 2938 out: 2939 mutex_unlock(&gpd_list_lock); 2940 2941 return ret; 2942 } 2943 EXPORT_SYMBOL_GPL(of_genpd_add_device); 2944 2945 /** 2946 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 2947 * @parent_spec: OF phandle args to use for parent PM domain look-up 2948 * @subdomain_spec: OF phandle args to use for subdomain look-up 2949 * 2950 * Looks-up a parent PM domain and subdomain based upon phandle args 2951 * provided and adds the subdomain to the parent PM domain. Returns a 2952 * negative error code on failure. 2953 */ 2954 int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, 2955 const struct of_phandle_args *subdomain_spec) 2956 { 2957 struct generic_pm_domain *parent, *subdomain; 2958 int ret; 2959 2960 mutex_lock(&gpd_list_lock); 2961 2962 parent = genpd_get_from_provider(parent_spec); 2963 if (IS_ERR(parent)) { 2964 ret = PTR_ERR(parent); 2965 goto out; 2966 } 2967 2968 subdomain = genpd_get_from_provider(subdomain_spec); 2969 if (IS_ERR(subdomain)) { 2970 ret = PTR_ERR(subdomain); 2971 goto out; 2972 } 2973 2974 ret = genpd_add_subdomain(parent, subdomain); 2975 2976 out: 2977 mutex_unlock(&gpd_list_lock); 2978 2979 return ret == -ENOENT ? -EPROBE_DEFER : ret; 2980 } 2981 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); 2982 2983 /** 2984 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 2985 * @parent_spec: OF phandle args to use for parent PM domain look-up 2986 * @subdomain_spec: OF phandle args to use for subdomain look-up 2987 * 2988 * Looks-up a parent PM domain and subdomain based upon phandle args 2989 * provided and removes the subdomain from the parent PM domain. Returns a 2990 * negative error code on failure. 2991 */ 2992 int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, 2993 const struct of_phandle_args *subdomain_spec) 2994 { 2995 struct generic_pm_domain *parent, *subdomain; 2996 int ret; 2997 2998 mutex_lock(&gpd_list_lock); 2999 3000 parent = genpd_get_from_provider(parent_spec); 3001 if (IS_ERR(parent)) { 3002 ret = PTR_ERR(parent); 3003 goto out; 3004 } 3005 3006 subdomain = genpd_get_from_provider(subdomain_spec); 3007 if (IS_ERR(subdomain)) { 3008 ret = PTR_ERR(subdomain); 3009 goto out; 3010 } 3011 3012 ret = pm_genpd_remove_subdomain(parent, subdomain); 3013 3014 out: 3015 mutex_unlock(&gpd_list_lock); 3016 3017 return ret; 3018 } 3019 EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); 3020 3021 /** 3022 * of_genpd_remove_last - Remove the last PM domain registered for a provider 3023 * @np: Pointer to device node associated with provider 3024 * 3025 * Find the last PM domain that was added by a particular provider and 3026 * remove this PM domain from the list of PM domains. The provider is 3027 * identified by the 'provider' device structure that is passed. The PM 3028 * domain will only be removed, if the provider associated with domain 3029 * has been removed. 3030 * 3031 * Returns a valid pointer to struct generic_pm_domain on success or 3032 * ERR_PTR() on failure. 3033 */ 3034 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 3035 { 3036 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); 3037 int ret; 3038 3039 if (IS_ERR_OR_NULL(np)) 3040 return ERR_PTR(-EINVAL); 3041 3042 mutex_lock(&gpd_list_lock); 3043 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { 3044 if (gpd->provider == of_fwnode_handle(np)) { 3045 ret = genpd_remove(gpd); 3046 genpd = ret ? ERR_PTR(ret) : gpd; 3047 break; 3048 } 3049 } 3050 mutex_unlock(&gpd_list_lock); 3051 3052 return genpd; 3053 } 3054 EXPORT_SYMBOL_GPL(of_genpd_remove_last); 3055 3056 static void genpd_release_dev(struct device *dev) 3057 { 3058 of_node_put(dev->of_node); 3059 kfree(dev); 3060 } 3061 3062 static const struct bus_type genpd_bus_type = { 3063 .name = "genpd", 3064 }; 3065 3066 /** 3067 * genpd_dev_pm_detach - Detach a device from its PM domain. 3068 * @dev: Device to detach. 3069 * @power_off: Currently not used 3070 * 3071 * Try to locate a corresponding generic PM domain, which the device was 3072 * attached to previously. If such is found, the device is detached from it. 3073 */ 3074 static void genpd_dev_pm_detach(struct device *dev, bool power_off) 3075 { 3076 struct generic_pm_domain *pd; 3077 unsigned int i; 3078 int ret = 0; 3079 3080 pd = dev_to_genpd(dev); 3081 if (IS_ERR(pd)) 3082 return; 3083 3084 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 3085 3086 /* Drop the default performance state */ 3087 if (dev_gpd_data(dev)->default_pstate) { 3088 dev_pm_genpd_set_performance_state(dev, 0); 3089 dev_gpd_data(dev)->default_pstate = 0; 3090 } 3091 3092 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 3093 ret = genpd_remove_device(pd, dev); 3094 if (ret != -EAGAIN) 3095 break; 3096 3097 mdelay(i); 3098 cond_resched(); 3099 } 3100 3101 if (ret < 0) { 3102 dev_err(dev, "failed to remove from PM domain %s: %d", 3103 pd->name, ret); 3104 return; 3105 } 3106 3107 /* Check if PM domain can be powered off after removing this device. */ 3108 genpd_queue_power_off_work(pd); 3109 3110 /* Unregister the device if it was created by genpd. */ 3111 if (dev->bus == &genpd_bus_type) 3112 device_unregister(dev); 3113 } 3114 3115 static void genpd_dev_pm_sync(struct device *dev) 3116 { 3117 struct generic_pm_domain *pd; 3118 3119 pd = dev_to_genpd(dev); 3120 if (IS_ERR(pd)) 3121 return; 3122 3123 genpd_queue_power_off_work(pd); 3124 } 3125 3126 static int genpd_set_required_opp_dev(struct device *dev, 3127 struct device *base_dev) 3128 { 3129 struct dev_pm_opp_config config = { 3130 .required_dev = dev, 3131 }; 3132 int ret; 3133 3134 /* Limit support to non-providers for now. */ 3135 if (of_property_present(base_dev->of_node, "#power-domain-cells")) 3136 return 0; 3137 3138 if (!dev_pm_opp_of_has_required_opp(base_dev)) 3139 return 0; 3140 3141 ret = dev_pm_opp_set_config(base_dev, &config); 3142 if (ret < 0) 3143 return ret; 3144 3145 dev_gpd_data(dev)->opp_token = ret; 3146 return 0; 3147 } 3148 3149 static int genpd_set_required_opp(struct device *dev, unsigned int index) 3150 { 3151 int ret, pstate; 3152 3153 /* Set the default performance state */ 3154 pstate = of_get_required_opp_performance_state(dev->of_node, index); 3155 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) { 3156 ret = pstate; 3157 goto err; 3158 } else if (pstate > 0) { 3159 ret = dev_pm_genpd_set_performance_state(dev, pstate); 3160 if (ret) 3161 goto err; 3162 dev_gpd_data(dev)->default_pstate = pstate; 3163 } 3164 3165 return 0; 3166 err: 3167 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n", 3168 dev_to_genpd(dev)->name, ret); 3169 return ret; 3170 } 3171 3172 static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, 3173 unsigned int index, unsigned int num_domains, 3174 bool power_on) 3175 { 3176 struct of_phandle_args pd_args; 3177 struct generic_pm_domain *pd; 3178 int ret; 3179 3180 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 3181 "#power-domain-cells", index, &pd_args); 3182 if (ret < 0) 3183 return ret; 3184 3185 mutex_lock(&gpd_list_lock); 3186 pd = genpd_get_from_provider(&pd_args); 3187 of_node_put(pd_args.np); 3188 if (IS_ERR(pd)) { 3189 mutex_unlock(&gpd_list_lock); 3190 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 3191 __func__, PTR_ERR(pd)); 3192 return driver_deferred_probe_check_state(base_dev); 3193 } 3194 3195 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 3196 3197 ret = genpd_add_device(pd, dev, base_dev); 3198 mutex_unlock(&gpd_list_lock); 3199 3200 if (ret < 0) 3201 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name); 3202 3203 dev->pm_domain->detach = genpd_dev_pm_detach; 3204 dev->pm_domain->sync = genpd_dev_pm_sync; 3205 3206 /* 3207 * For a single PM domain the index of the required OPP must be zero, so 3208 * let's try to assign a required dev in that case. In the multiple PM 3209 * domains case, we need platform code to specify the index. 3210 */ 3211 if (num_domains == 1) { 3212 ret = genpd_set_required_opp_dev(dev, base_dev); 3213 if (ret) 3214 goto err; 3215 } 3216 3217 ret = genpd_set_required_opp(dev, index); 3218 if (ret) 3219 goto err; 3220 3221 if (power_on) { 3222 genpd_lock(pd); 3223 ret = genpd_power_on(pd, 0); 3224 genpd_unlock(pd); 3225 } 3226 3227 if (ret) { 3228 /* Drop the default performance state */ 3229 if (dev_gpd_data(dev)->default_pstate) { 3230 dev_pm_genpd_set_performance_state(dev, 0); 3231 dev_gpd_data(dev)->default_pstate = 0; 3232 } 3233 3234 genpd_remove_device(pd, dev); 3235 return -EPROBE_DEFER; 3236 } 3237 3238 return 1; 3239 3240 err: 3241 genpd_remove_device(pd, dev); 3242 return ret; 3243 } 3244 3245 /** 3246 * genpd_dev_pm_attach - Attach a device to its PM domain using DT. 3247 * @dev: Device to attach. 3248 * 3249 * Parse device's OF node to find a PM domain specifier. If such is found, 3250 * attaches the device to retrieved pm_domain ops. 3251 * 3252 * Returns 1 on successfully attached PM domain, 0 when the device don't need a 3253 * PM domain or when multiple power-domains exists for it, else a negative error 3254 * code. Note that if a power-domain exists for the device, but it cannot be 3255 * found or turned on, then return -EPROBE_DEFER to ensure that the device is 3256 * not probed and to re-try again later. 3257 */ 3258 int genpd_dev_pm_attach(struct device *dev) 3259 { 3260 if (!dev->of_node) 3261 return 0; 3262 3263 /* 3264 * Devices with multiple PM domains must be attached separately, as we 3265 * can only attach one PM domain per device. 3266 */ 3267 if (of_count_phandle_with_args(dev->of_node, "power-domains", 3268 "#power-domain-cells") != 1) 3269 return 0; 3270 3271 return __genpd_dev_pm_attach(dev, dev, 0, 1, true); 3272 } 3273 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 3274 3275 /** 3276 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. 3277 * @dev: The device used to lookup the PM domain. 3278 * @index: The index of the PM domain. 3279 * 3280 * Parse device's OF node to find a PM domain specifier at the provided @index. 3281 * If such is found, creates a virtual device and attaches it to the retrieved 3282 * pm_domain ops. To deal with detaching of the virtual device, the ->detach() 3283 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). 3284 * 3285 * Returns the created virtual device if successfully attached PM domain, NULL 3286 * when the device don't need a PM domain, else an ERR_PTR() in case of 3287 * failures. If a power-domain exists for the device, but cannot be found or 3288 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device 3289 * is not probed and to re-try again later. 3290 */ 3291 struct device *genpd_dev_pm_attach_by_id(struct device *dev, 3292 unsigned int index) 3293 { 3294 struct device *virt_dev; 3295 int num_domains; 3296 int ret; 3297 3298 if (!dev->of_node) 3299 return NULL; 3300 3301 /* Verify that the index is within a valid range. */ 3302 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", 3303 "#power-domain-cells"); 3304 if (num_domains < 0 || index >= num_domains) 3305 return NULL; 3306 3307 if (!genpd_bus_registered) 3308 return ERR_PTR(-ENODEV); 3309 3310 /* Allocate and register device on the genpd bus. */ 3311 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); 3312 if (!virt_dev) 3313 return ERR_PTR(-ENOMEM); 3314 3315 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); 3316 virt_dev->bus = &genpd_bus_type; 3317 virt_dev->release = genpd_release_dev; 3318 virt_dev->of_node = of_node_get(dev->of_node); 3319 3320 ret = device_register(virt_dev); 3321 if (ret) { 3322 put_device(virt_dev); 3323 return ERR_PTR(ret); 3324 } 3325 3326 /* Try to attach the device to the PM domain at the specified index. */ 3327 ret = __genpd_dev_pm_attach(virt_dev, dev, index, num_domains, false); 3328 if (ret < 1) { 3329 device_unregister(virt_dev); 3330 return ret ? ERR_PTR(ret) : NULL; 3331 } 3332 3333 pm_runtime_enable(virt_dev); 3334 genpd_queue_power_off_work(dev_to_genpd(virt_dev)); 3335 3336 return virt_dev; 3337 } 3338 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); 3339 3340 /** 3341 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. 3342 * @dev: The device used to lookup the PM domain. 3343 * @name: The name of the PM domain. 3344 * 3345 * Parse device's OF node to find a PM domain specifier using the 3346 * power-domain-names DT property. For further description see 3347 * genpd_dev_pm_attach_by_id(). 3348 */ 3349 struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) 3350 { 3351 int index; 3352 3353 if (!dev->of_node) 3354 return NULL; 3355 3356 index = of_property_match_string(dev->of_node, "power-domain-names", 3357 name); 3358 if (index < 0) 3359 return NULL; 3360 3361 return genpd_dev_pm_attach_by_id(dev, index); 3362 } 3363 3364 static const struct of_device_id idle_state_match[] = { 3365 { .compatible = "domain-idle-state", }, 3366 { } 3367 }; 3368 3369 static int genpd_parse_state(struct genpd_power_state *genpd_state, 3370 struct device_node *state_node) 3371 { 3372 int err; 3373 u32 residency; 3374 u32 entry_latency, exit_latency; 3375 3376 err = of_property_read_u32(state_node, "entry-latency-us", 3377 &entry_latency); 3378 if (err) { 3379 pr_debug(" * %pOF missing entry-latency-us property\n", 3380 state_node); 3381 return -EINVAL; 3382 } 3383 3384 err = of_property_read_u32(state_node, "exit-latency-us", 3385 &exit_latency); 3386 if (err) { 3387 pr_debug(" * %pOF missing exit-latency-us property\n", 3388 state_node); 3389 return -EINVAL; 3390 } 3391 3392 err = of_property_read_u32(state_node, "min-residency-us", &residency); 3393 if (!err) 3394 genpd_state->residency_ns = 1000LL * residency; 3395 3396 of_property_read_string(state_node, "idle-state-name", &genpd_state->name); 3397 3398 genpd_state->power_on_latency_ns = 1000LL * exit_latency; 3399 genpd_state->power_off_latency_ns = 1000LL * entry_latency; 3400 genpd_state->fwnode = of_fwnode_handle(state_node); 3401 3402 return 0; 3403 } 3404 3405 static int genpd_iterate_idle_states(struct device_node *dn, 3406 struct genpd_power_state *states) 3407 { 3408 int ret; 3409 struct of_phandle_iterator it; 3410 struct device_node *np; 3411 int i = 0; 3412 3413 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); 3414 if (ret <= 0) 3415 return ret == -ENOENT ? 0 : ret; 3416 3417 /* Loop over the phandles until all the requested entry is found */ 3418 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { 3419 np = it.node; 3420 if (!of_match_node(idle_state_match, np)) 3421 continue; 3422 3423 if (!of_device_is_available(np)) 3424 continue; 3425 3426 if (states) { 3427 ret = genpd_parse_state(&states[i], np); 3428 if (ret) { 3429 pr_err("Parsing idle state node %pOF failed with err %d\n", 3430 np, ret); 3431 of_node_put(np); 3432 return ret; 3433 } 3434 } 3435 i++; 3436 } 3437 3438 return i; 3439 } 3440 3441 /** 3442 * of_genpd_parse_idle_states: Return array of idle states for the genpd. 3443 * 3444 * @dn: The genpd device node 3445 * @states: The pointer to which the state array will be saved. 3446 * @n: The count of elements in the array returned from this function. 3447 * 3448 * Returns the device states parsed from the OF node. The memory for the states 3449 * is allocated by this function and is the responsibility of the caller to 3450 * free the memory after use. If any or zero compatible domain idle states is 3451 * found it returns 0 and in case of errors, a negative error code is returned. 3452 */ 3453 int of_genpd_parse_idle_states(struct device_node *dn, 3454 struct genpd_power_state **states, int *n) 3455 { 3456 struct genpd_power_state *st; 3457 int ret; 3458 3459 ret = genpd_iterate_idle_states(dn, NULL); 3460 if (ret < 0) 3461 return ret; 3462 3463 if (!ret) { 3464 *states = NULL; 3465 *n = 0; 3466 return 0; 3467 } 3468 3469 st = kcalloc(ret, sizeof(*st), GFP_KERNEL); 3470 if (!st) 3471 return -ENOMEM; 3472 3473 ret = genpd_iterate_idle_states(dn, st); 3474 if (ret <= 0) { 3475 kfree(st); 3476 return ret < 0 ? ret : -EINVAL; 3477 } 3478 3479 *states = st; 3480 *n = ret; 3481 3482 return 0; 3483 } 3484 EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 3485 3486 /** 3487 * of_genpd_sync_state() - A common sync_state function for genpd providers 3488 * @np: The device node the genpd provider is associated with. 3489 * 3490 * The @np that corresponds to a genpd provider may provide one or multiple 3491 * genpds. This function makes use @np to find the genpds that belongs to the 3492 * provider. For each genpd we try a power-off. 3493 */ 3494 void of_genpd_sync_state(struct device_node *np) 3495 { 3496 struct generic_pm_domain *genpd; 3497 3498 if (!np) 3499 return; 3500 3501 mutex_lock(&gpd_list_lock); 3502 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3503 if (genpd->provider == of_fwnode_handle(np)) { 3504 genpd_lock(genpd); 3505 genpd->stay_on = false; 3506 genpd_power_off(genpd, false, 0); 3507 genpd_unlock(genpd); 3508 } 3509 } 3510 mutex_unlock(&gpd_list_lock); 3511 } 3512 EXPORT_SYMBOL_GPL(of_genpd_sync_state); 3513 3514 static int genpd_provider_probe(struct device *dev) 3515 { 3516 return 0; 3517 } 3518 3519 static void genpd_provider_sync_state(struct device *dev) 3520 { 3521 struct generic_pm_domain *genpd = container_of(dev, struct generic_pm_domain, dev); 3522 3523 switch (genpd->sync_state) { 3524 case GENPD_SYNC_STATE_OFF: 3525 break; 3526 3527 case GENPD_SYNC_STATE_ONECELL: 3528 of_genpd_sync_state(dev->of_node); 3529 break; 3530 3531 case GENPD_SYNC_STATE_SIMPLE: 3532 genpd_lock(genpd); 3533 genpd->stay_on = false; 3534 genpd_power_off(genpd, false, 0); 3535 genpd_unlock(genpd); 3536 break; 3537 3538 default: 3539 break; 3540 } 3541 } 3542 3543 static struct device_driver genpd_provider_drv = { 3544 .name = "genpd_provider", 3545 .bus = &genpd_provider_bus_type, 3546 .probe = genpd_provider_probe, 3547 .sync_state = genpd_provider_sync_state, 3548 .suppress_bind_attrs = true, 3549 }; 3550 3551 static int __init genpd_bus_init(void) 3552 { 3553 int ret; 3554 3555 ret = device_register(&genpd_provider_bus); 3556 if (ret) { 3557 put_device(&genpd_provider_bus); 3558 return ret; 3559 } 3560 3561 ret = bus_register(&genpd_provider_bus_type); 3562 if (ret) 3563 goto err_dev; 3564 3565 ret = bus_register(&genpd_bus_type); 3566 if (ret) 3567 goto err_prov_bus; 3568 3569 ret = driver_register(&genpd_provider_drv); 3570 if (ret) 3571 goto err_bus; 3572 3573 genpd_bus_registered = true; 3574 return 0; 3575 3576 err_bus: 3577 bus_unregister(&genpd_bus_type); 3578 err_prov_bus: 3579 bus_unregister(&genpd_provider_bus_type); 3580 err_dev: 3581 device_unregister(&genpd_provider_bus); 3582 return ret; 3583 } 3584 core_initcall(genpd_bus_init); 3585 3586 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 3587 3588 3589 /*** debugfs support ***/ 3590 3591 #ifdef CONFIG_DEBUG_FS 3592 /* 3593 * TODO: This function is a slightly modified version of rtpm_status_show 3594 * from sysfs.c, so generalize it. 3595 */ 3596 static void rtpm_status_str(struct seq_file *s, struct device *dev) 3597 { 3598 static const char * const status_lookup[] = { 3599 [RPM_ACTIVE] = "active", 3600 [RPM_RESUMING] = "resuming", 3601 [RPM_SUSPENDED] = "suspended", 3602 [RPM_SUSPENDING] = "suspending" 3603 }; 3604 const char *p = ""; 3605 3606 if (dev->power.runtime_error) 3607 p = "error"; 3608 else if (dev->power.disable_depth) 3609 p = "unsupported"; 3610 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) 3611 p = status_lookup[dev->power.runtime_status]; 3612 else 3613 WARN_ON(1); 3614 3615 seq_printf(s, "%-26s ", p); 3616 } 3617 3618 static void perf_status_str(struct seq_file *s, struct device *dev) 3619 { 3620 struct generic_pm_domain_data *gpd_data; 3621 3622 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3623 3624 seq_printf(s, "%-10u ", gpd_data->performance_state); 3625 } 3626 3627 static void mode_status_str(struct seq_file *s, struct device *dev) 3628 { 3629 struct generic_pm_domain_data *gpd_data; 3630 3631 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 3632 3633 seq_printf(s, "%2s", gpd_data->hw_mode ? "HW" : "SW"); 3634 } 3635 3636 static int genpd_summary_one(struct seq_file *s, 3637 struct generic_pm_domain *genpd) 3638 { 3639 static const char * const status_lookup[] = { 3640 [GENPD_STATE_ON] = "on", 3641 [GENPD_STATE_OFF] = "off" 3642 }; 3643 struct pm_domain_data *pm_data; 3644 struct gpd_link *link; 3645 char state[16]; 3646 int ret; 3647 3648 ret = genpd_lock_interruptible(genpd); 3649 if (ret) 3650 return -ERESTARTSYS; 3651 3652 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) 3653 goto exit; 3654 if (!genpd_status_on(genpd)) 3655 snprintf(state, sizeof(state), "%s-%u", 3656 status_lookup[genpd->status], genpd->state_idx); 3657 else 3658 snprintf(state, sizeof(state), "%s", 3659 status_lookup[genpd->status]); 3660 seq_printf(s, "%-30s %-30s %u", dev_name(&genpd->dev), state, genpd->performance_state); 3661 3662 /* 3663 * Modifications on the list require holding locks on both 3664 * parent and child, so we are safe. 3665 * Also the device name is immutable. 3666 */ 3667 list_for_each_entry(link, &genpd->parent_links, parent_node) { 3668 if (list_is_first(&link->parent_node, &genpd->parent_links)) 3669 seq_printf(s, "\n%48s", " "); 3670 seq_printf(s, "%s", link->child->name); 3671 if (!list_is_last(&link->parent_node, &genpd->parent_links)) 3672 seq_puts(s, ", "); 3673 } 3674 3675 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { 3676 seq_printf(s, "\n %-30s ", dev_name(pm_data->dev)); 3677 rtpm_status_str(s, pm_data->dev); 3678 perf_status_str(s, pm_data->dev); 3679 mode_status_str(s, pm_data->dev); 3680 } 3681 3682 seq_puts(s, "\n"); 3683 exit: 3684 genpd_unlock(genpd); 3685 3686 return 0; 3687 } 3688 3689 static int summary_show(struct seq_file *s, void *data) 3690 { 3691 struct generic_pm_domain *genpd; 3692 int ret = 0; 3693 3694 seq_puts(s, "domain status children performance\n"); 3695 seq_puts(s, " /device runtime status managed by\n"); 3696 seq_puts(s, "------------------------------------------------------------------------------\n"); 3697 3698 ret = mutex_lock_interruptible(&gpd_list_lock); 3699 if (ret) 3700 return -ERESTARTSYS; 3701 3702 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { 3703 ret = genpd_summary_one(s, genpd); 3704 if (ret) 3705 break; 3706 } 3707 mutex_unlock(&gpd_list_lock); 3708 3709 return ret; 3710 } 3711 3712 static int status_show(struct seq_file *s, void *data) 3713 { 3714 static const char * const status_lookup[] = { 3715 [GENPD_STATE_ON] = "on", 3716 [GENPD_STATE_OFF] = "off" 3717 }; 3718 3719 struct generic_pm_domain *genpd = s->private; 3720 int ret = 0; 3721 3722 ret = genpd_lock_interruptible(genpd); 3723 if (ret) 3724 return -ERESTARTSYS; 3725 3726 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) 3727 goto exit; 3728 3729 if (genpd->status == GENPD_STATE_OFF) 3730 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], 3731 genpd->state_idx); 3732 else 3733 seq_printf(s, "%s\n", status_lookup[genpd->status]); 3734 exit: 3735 genpd_unlock(genpd); 3736 return ret; 3737 } 3738 3739 static int sub_domains_show(struct seq_file *s, void *data) 3740 { 3741 struct generic_pm_domain *genpd = s->private; 3742 struct gpd_link *link; 3743 int ret = 0; 3744 3745 ret = genpd_lock_interruptible(genpd); 3746 if (ret) 3747 return -ERESTARTSYS; 3748 3749 list_for_each_entry(link, &genpd->parent_links, parent_node) 3750 seq_printf(s, "%s\n", link->child->name); 3751 3752 genpd_unlock(genpd); 3753 return ret; 3754 } 3755 3756 static int idle_states_show(struct seq_file *s, void *data) 3757 { 3758 struct generic_pm_domain *genpd = s->private; 3759 u64 now, delta, idle_time = 0; 3760 unsigned int i; 3761 int ret = 0; 3762 3763 ret = genpd_lock_interruptible(genpd); 3764 if (ret) 3765 return -ERESTARTSYS; 3766 3767 seq_puts(s, "State Time Spent(ms) Usage Rejected Above Below\n"); 3768 3769 for (i = 0; i < genpd->state_count; i++) { 3770 struct genpd_power_state *state = &genpd->states[i]; 3771 char state_name[15]; 3772 3773 idle_time += state->idle_time; 3774 3775 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3776 now = ktime_get_mono_fast_ns(); 3777 if (now > genpd->accounting_time) { 3778 delta = now - genpd->accounting_time; 3779 idle_time += delta; 3780 } 3781 } 3782 3783 if (!state->name) 3784 snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i); 3785 3786 do_div(idle_time, NSEC_PER_MSEC); 3787 seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n", 3788 state->name ?: state_name, idle_time, 3789 state->usage, state->rejected, state->above, 3790 state->below); 3791 } 3792 3793 genpd_unlock(genpd); 3794 return ret; 3795 } 3796 3797 static int active_time_show(struct seq_file *s, void *data) 3798 { 3799 struct generic_pm_domain *genpd = s->private; 3800 u64 now, on_time, delta = 0; 3801 int ret = 0; 3802 3803 ret = genpd_lock_interruptible(genpd); 3804 if (ret) 3805 return -ERESTARTSYS; 3806 3807 if (genpd->status == GENPD_STATE_ON) { 3808 now = ktime_get_mono_fast_ns(); 3809 if (now > genpd->accounting_time) 3810 delta = now - genpd->accounting_time; 3811 } 3812 3813 on_time = genpd->on_time + delta; 3814 do_div(on_time, NSEC_PER_MSEC); 3815 seq_printf(s, "%llu ms\n", on_time); 3816 3817 genpd_unlock(genpd); 3818 return ret; 3819 } 3820 3821 static int total_idle_time_show(struct seq_file *s, void *data) 3822 { 3823 struct generic_pm_domain *genpd = s->private; 3824 u64 now, delta, total = 0; 3825 unsigned int i; 3826 int ret = 0; 3827 3828 ret = genpd_lock_interruptible(genpd); 3829 if (ret) 3830 return -ERESTARTSYS; 3831 3832 for (i = 0; i < genpd->state_count; i++) { 3833 total += genpd->states[i].idle_time; 3834 3835 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) { 3836 now = ktime_get_mono_fast_ns(); 3837 if (now > genpd->accounting_time) { 3838 delta = now - genpd->accounting_time; 3839 total += delta; 3840 } 3841 } 3842 } 3843 3844 do_div(total, NSEC_PER_MSEC); 3845 seq_printf(s, "%llu ms\n", total); 3846 3847 genpd_unlock(genpd); 3848 return ret; 3849 } 3850 3851 3852 static int devices_show(struct seq_file *s, void *data) 3853 { 3854 struct generic_pm_domain *genpd = s->private; 3855 struct pm_domain_data *pm_data; 3856 int ret = 0; 3857 3858 ret = genpd_lock_interruptible(genpd); 3859 if (ret) 3860 return -ERESTARTSYS; 3861 3862 list_for_each_entry(pm_data, &genpd->dev_list, list_node) 3863 seq_printf(s, "%s\n", dev_name(pm_data->dev)); 3864 3865 genpd_unlock(genpd); 3866 return ret; 3867 } 3868 3869 static int perf_state_show(struct seq_file *s, void *data) 3870 { 3871 struct generic_pm_domain *genpd = s->private; 3872 3873 if (genpd_lock_interruptible(genpd)) 3874 return -ERESTARTSYS; 3875 3876 seq_printf(s, "%u\n", genpd->performance_state); 3877 3878 genpd_unlock(genpd); 3879 return 0; 3880 } 3881 3882 DEFINE_SHOW_ATTRIBUTE(summary); 3883 DEFINE_SHOW_ATTRIBUTE(status); 3884 DEFINE_SHOW_ATTRIBUTE(sub_domains); 3885 DEFINE_SHOW_ATTRIBUTE(idle_states); 3886 DEFINE_SHOW_ATTRIBUTE(active_time); 3887 DEFINE_SHOW_ATTRIBUTE(total_idle_time); 3888 DEFINE_SHOW_ATTRIBUTE(devices); 3889 DEFINE_SHOW_ATTRIBUTE(perf_state); 3890 3891 static void genpd_debug_add(struct generic_pm_domain *genpd) 3892 { 3893 struct dentry *d; 3894 3895 if (!genpd_debugfs_dir) 3896 return; 3897 3898 d = debugfs_create_dir(dev_name(&genpd->dev), genpd_debugfs_dir); 3899 3900 debugfs_create_file("current_state", 0444, 3901 d, genpd, &status_fops); 3902 debugfs_create_file("sub_domains", 0444, 3903 d, genpd, &sub_domains_fops); 3904 debugfs_create_file("idle_states", 0444, 3905 d, genpd, &idle_states_fops); 3906 debugfs_create_file("active_time", 0444, 3907 d, genpd, &active_time_fops); 3908 debugfs_create_file("total_idle_time", 0444, 3909 d, genpd, &total_idle_time_fops); 3910 debugfs_create_file("devices", 0444, 3911 d, genpd, &devices_fops); 3912 if (genpd->set_performance_state) 3913 debugfs_create_file("perf_state", 0444, 3914 d, genpd, &perf_state_fops); 3915 } 3916 3917 static int __init genpd_debug_init(void) 3918 { 3919 struct generic_pm_domain *genpd; 3920 3921 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); 3922 3923 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, 3924 NULL, &summary_fops); 3925 3926 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 3927 genpd_debug_add(genpd); 3928 3929 return 0; 3930 } 3931 late_initcall(genpd_debug_init); 3932 3933 static void __exit genpd_debug_exit(void) 3934 { 3935 debugfs_remove_recursive(genpd_debugfs_dir); 3936 } 3937 __exitcall(genpd_debug_exit); 3938 #endif /* CONFIG_DEBUG_FS */ 3939