1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright 2010 Nexenta Systems, Inc. All rights reserved. 27 * Copyright 2023 Oxide Computer Company 28 */ 29 30 /* 31 * sunpm.c builds sunpm.o "power management framework" 32 * kernel-resident power management code. Implements power management 33 * policy 34 * Assumes: all backwards compat. device components wake up on & 35 * the pm_info pointer in dev_info is initially NULL 36 * 37 * PM - (device) Power Management 38 * 39 * Each device may have 0 or more components. If a device has no components, 40 * then it can't be power managed. Each component has 2 or more 41 * power states. 42 * 43 * "Backwards Compatible" (bc) devices: 44 * There are two different types of devices from the point of view of this 45 * code. The original type, left over from the original PM implementation on 46 * the voyager platform are known in this code as "backwards compatible" 47 * devices (PM_ISBC(dip) returns true). 48 * They are recognized by the pm code by the lack of a pm-components property 49 * and a call made by the driver to pm_create_components(9F). 50 * For these devices, component 0 is special, and represents the power state 51 * of the device. If component 0 is to be set to power level 0 (off), then 52 * the framework must first call into the driver's detach(9E) routine with 53 * DDI_PM_SUSPEND, to get the driver to save the hardware state of the device. 54 * After setting component 0 from 0 to a non-zero power level, a call must be 55 * made into the driver's attach(9E) routine with DDI_PM_RESUME. 56 * 57 * Currently, the only way to get a bc device power managed is via a set of 58 * ioctls (PM_DIRECT_PM, PM_SET_CURRENT_POWER) issued to /dev/pm. 59 * 60 * For non-bc devices, the driver describes the components by exporting a 61 * pm-components(9P) property that tells how many components there are, 62 * tells what each component's power state values are, and provides human 63 * readable strings (currently unused) for each component name and power state. 64 * Devices which export pm-components(9P) are automatically power managed 65 * whenever autopm is enabled (via PM_START_PM ioctl issued by pmconfig(8) 66 * after parsing power.conf(5)). The exception to this rule is that power 67 * manageable CPU devices may be automatically managed independently of autopm 68 * by either enabling or disabling (via PM_START_CPUPM and PM_STOP_CPUPM 69 * ioctls) cpupm. If the CPU devices are not managed independently, then they 70 * are managed by autopm. In either case, for automatically power managed 71 * devices, all components are considered independent of each other, and it is 72 * up to the driver to decide when a transition requires saving or restoring 73 * hardware state. 74 * 75 * Each device component also has a threshold time associated with each power 76 * transition (see power.conf(5)), and a busy/idle state maintained by the 77 * driver calling pm_idle_component(9F) and pm_busy_component(9F). 78 * Components are created idle. 79 * 80 * The PM framework provides several functions: 81 * -implement PM policy as described in power.conf(5) 82 * Policy is set by pmconfig(8) issuing pm ioctls based on power.conf(5). 83 * Policies consist of: 84 * -set threshold values (defaults if none provided by pmconfig) 85 * -set dependencies among devices 86 * -enable/disable autopm 87 * -enable/disable cpupm 88 * -turn down idle components based on thresholds (if autopm or cpupm is 89 * enabled) (aka scanning) 90 * -maintain power states based on dependencies among devices 91 * -upon request, or when the frame buffer powers off, attempt to turn off 92 * all components that are idle or become idle over the next (10 sec) 93 * period in an attempt to get down to an EnergyStar compliant state 94 * -prevent powering off of a device which exported the 95 * pm-no-involuntary-power-cycles property without active involvement of 96 * the device's driver (so no removing power when the device driver is 97 * not attached) 98 * -provide a mechanism for a device driver to request that a device's component 99 * be brought back to the power level necessary for the use of the device 100 * -allow a process to directly control the power levels of device components 101 * (via ioctls issued to /dev/pm--see usr/src/uts/common/io/pm.c) 102 * -ensure that the console frame buffer is powered up before being referenced 103 * via prom_printf() or other prom calls that might generate console output 104 * -maintain implicit dependencies (e.g. parent must be powered up if child is) 105 * -provide "backwards compatible" behavior for devices without pm-components 106 * property 107 * 108 * Scanning: 109 * Whenever autopm or cpupm is enabled, the framework attempts to bring each 110 * component of each managed device to its lowest power based on the threshold 111 * of idleness associated with each transition and the busy/idle state of the 112 * component. 113 * 114 * The actual work of this is done by pm_scan_dev(), which cycles through each 115 * component of a device, checking its idleness against its current threshold, 116 * and calling pm_set_power() as appropriate to change the power level. 117 * This function also indicates when it would next be profitable to scan the 118 * device again, and a new scan is scheduled after that time. 119 * 120 * Dependencies: 121 * It is possible to establish a dependency between the power states of two 122 * otherwise unrelated devices. This is currently done to ensure that the 123 * cdrom is always up whenever the console framebuffer is up, so that the user 124 * can insert a cdrom and see a popup as a result. 125 * 126 * The dependency terminology used in power.conf(5) is not easy to understand, 127 * so we've adopted a different terminology in the implementation. We write 128 * of a "keeps up" and a "kept up" device. A relationship can be established 129 * where one device keeps up another. That means that if the keepsup device 130 * has any component that is at a non-zero power level, all components of the 131 * "kept up" device must be brought to full power. This relationship is 132 * asynchronous. When the keeping device is powered up, a request is queued 133 * to a worker thread to bring up the kept device. The caller does not wait. 134 * Scan will not turn down a kept up device. 135 * 136 * Direct PM: 137 * A device may be directly power managed by a process. If a device is 138 * directly pm'd, then it will not be scanned, and dependencies will not be 139 * enforced. * If a directly pm'd device's driver requests a power change (via 140 * pm_raise_power(9F)), then the request is blocked and notification is sent 141 * to the controlling process, which must issue the requested power change for 142 * the driver to proceed. 143 * 144 */ 145 146 #include <sys/types.h> 147 #include <sys/errno.h> 148 #include <sys/callb.h> /* callback registration during CPR */ 149 #include <sys/conf.h> /* driver flags and functions */ 150 #include <sys/open.h> /* OTYP_CHR definition */ 151 #include <sys/stat.h> /* S_IFCHR definition */ 152 #include <sys/pathname.h> /* name -> dev_info xlation */ 153 #include <sys/ddi_impldefs.h> /* dev_info node fields */ 154 #include <sys/kmem.h> /* memory alloc stuff */ 155 #include <sys/debug.h> 156 #include <sys/archsystm.h> 157 #include <sys/pm.h> 158 #include <sys/ddi.h> 159 #include <sys/sunddi.h> 160 #include <sys/sunndi.h> 161 #include <sys/sunpm.h> 162 #include <sys/epm.h> 163 #include <sys/vfs.h> 164 #include <sys/mode.h> 165 #include <sys/mkdev.h> 166 #include <sys/promif.h> 167 #include <sys/consdev.h> 168 #include <sys/esunddi.h> 169 #include <sys/modctl.h> 170 #include <sys/fs/ufs_fs.h> 171 #include <sys/note.h> 172 #include <sys/taskq.h> 173 #include <sys/bootconf.h> 174 #include <sys/reboot.h> 175 #include <sys/spl.h> 176 #include <sys/disp.h> 177 #include <sys/sobject.h> 178 #include <sys/sunmdi.h> 179 #include <sys/systm.h> 180 #include <sys/cpuvar.h> 181 #include <sys/cyclic.h> 182 #include <sys/uadmin.h> 183 #include <sys/srn.h> 184 185 186 /* 187 * PM LOCKING 188 * The list of locks: 189 * Global pm mutex locks. 190 * 191 * pm_scan_lock: 192 * It protects the timeout id of the scan thread, and the value 193 * of autopm_enabled and cpupm. This lock is not held 194 * concurrently with any other PM locks. 195 * 196 * pm_clone_lock: Protects the clone list and count of poll events 197 * pending for the pm driver. 198 * Lock ordering: 199 * pm_clone_lock -> pm_pscc_interest_rwlock, 200 * pm_clone_lock -> pm_pscc_direct_rwlock. 201 * 202 * pm_rsvp_lock: 203 * Used to synchronize the data structures used for processes 204 * to rendezvous with state change information when doing 205 * direct PM. 206 * Lock ordering: 207 * pm_rsvp_lock -> pm_pscc_interest_rwlock, 208 * pm_rsvp_lock -> pm_pscc_direct_rwlock, 209 * pm_rsvp_lock -> pm_clone_lock. 210 * 211 * ppm_lock: protects the list of registered ppm drivers 212 * Lock ordering: 213 * ppm_lock -> ppm driver unit_lock 214 * 215 * pm_compcnt_lock: 216 * Protects count of components that are not at their lowest 217 * power level. 218 * Lock ordering: 219 * pm_compcnt_lock -> ppm_lock. 220 * 221 * pm_dep_thread_lock: 222 * Protects work list for pm_dep_thread. Not taken concurrently 223 * with any other pm lock. 224 * 225 * pm_remdrv_lock: 226 * Serializes the operation of removing noinvol data structure 227 * entries for a branch of the tree when a driver has been 228 * removed from the system (modctl_rem_major). 229 * Lock ordering: 230 * pm_remdrv_lock -> pm_noinvol_rwlock. 231 * 232 * pm_cfb_lock: (High level spin lock) 233 * Protects the count of how many components of the console 234 * frame buffer are off (so we know if we have to bring up the 235 * console as a result of a prom_printf, etc. 236 * No other locks are taken while holding this lock. 237 * 238 * pm_loan_lock: 239 * Protects the lock_loan list. List is used to record that one 240 * thread has acquired a power lock but has launched another thread 241 * to complete its processing. An entry in the list indicates that 242 * the worker thread can borrow the lock held by the other thread, 243 * which must block on the completion of the worker. Use is 244 * specific to module loading. 245 * No other locks are taken while holding this lock. 246 * 247 * Global PM rwlocks 248 * 249 * pm_thresh_rwlock: 250 * Protects the list of thresholds recorded for future use (when 251 * devices attach). 252 * Lock ordering: 253 * pm_thresh_rwlock -> devi_pm_lock 254 * 255 * pm_noinvol_rwlock: 256 * Protects list of detached nodes that had noinvol registered. 257 * No other PM locks are taken while holding pm_noinvol_rwlock. 258 * 259 * pm_pscc_direct_rwlock: 260 * Protects the list that maps devices being directly power 261 * managed to the processes that manage them. 262 * Lock ordering: 263 * pm_pscc_direct_rwlock -> psce_lock 264 * 265 * pm_pscc_interest_rwlock; 266 * Protects the list that maps state change events to processes 267 * that want to know about them. 268 * Lock ordering: 269 * pm_pscc_interest_rwlock -> psce_lock 270 * 271 * per-dip locks: 272 * 273 * Each node has these per-dip locks, which are only used if the device is 274 * a candidate for power management (e.g. has pm components) 275 * 276 * devi_pm_lock: 277 * Protects all power management state of the node except for 278 * power level, which is protected by ndi_devi_enter(). 279 * Encapsulated in macros PM_LOCK_DIP()/PM_UNLOCK_DIP(). 280 * Lock ordering: 281 * devi_pm_lock -> pm_rsvp_lock, 282 * devi_pm_lock -> pm_dep_thread_lock, 283 * devi_pm_lock -> pm_noinvol_rwlock, 284 * devi_pm_lock -> power lock 285 * 286 * power lock (ndi_devi_enter()): 287 * Since changing power level is possibly a slow operation (30 288 * seconds to spin up a disk drive), this is locked separately. 289 * Since a call into the driver to change the power level of one 290 * component may result in a call back into the framework to change 291 * the power level of another, this lock allows re-entrancy by 292 * the same thread (ndi_devi_enter is used for this because 293 * the USB framework uses ndi_devi_enter in its power entry point, 294 * and use of any other lock would produce a deadlock. 295 * 296 * devi_pm_busy_lock: 297 * This lock protects the integrity of the busy count. It is 298 * only taken by pm_busy_component() and pm_idle_component and 299 * some code that adjust the busy time after the timer gets set 300 * up or after a CPR operation. It is per-dip to keep from 301 * single-threading all the disk drivers on a system. 302 * It could be per component instead, but most devices have 303 * only one component. 304 * No other PM locks are taken while holding this lock. 305 * 306 */ 307 308 static int stdout_is_framebuffer; 309 static kmutex_t e_pm_power_lock; 310 static kmutex_t pm_loan_lock; 311 kmutex_t pm_scan_lock; 312 callb_id_t pm_cpr_cb_id; 313 callb_id_t pm_panic_cb_id; 314 callb_id_t pm_halt_cb_id; 315 int pm_comps_notlowest; /* no. of comps not at lowest power */ 316 int pm_powering_down; /* cpr is source of DDI_SUSPEND calls */ 317 318 clock_t pm_id_ticks = 5; /* ticks to wait before scan during idle-down */ 319 clock_t pm_default_min_scan = PM_DEFAULT_MIN_SCAN; 320 clock_t pm_cpu_min_scan = PM_CPU_MIN_SCAN; 321 322 #define PM_MIN_SCAN(dip) (PM_ISCPU(dip) ? pm_cpu_min_scan : \ 323 pm_default_min_scan) 324 325 static int pm_busop_set_power(dev_info_t *, 326 void *, pm_bus_power_op_t, void *, void *); 327 static int pm_busop_match_request(dev_info_t *, void *); 328 static int pm_all_to_normal_nexus(dev_info_t *, pm_canblock_t); 329 static void e_pm_set_max_power(dev_info_t *, int, int); 330 static int e_pm_get_max_power(dev_info_t *, int); 331 332 /* 333 * Dependency Processing is done thru a seperate thread. 334 */ 335 kmutex_t pm_dep_thread_lock; 336 kcondvar_t pm_dep_thread_cv; 337 pm_dep_wk_t *pm_dep_thread_workq = NULL; 338 pm_dep_wk_t *pm_dep_thread_tail = NULL; 339 340 /* 341 * Autopm must be turned on by a PM_START_PM ioctl, so we don't end up 342 * power managing things in single user mode that have been suppressed via 343 * power.conf entries. Protected by pm_scan_lock. 344 */ 345 int autopm_enabled; 346 347 /* 348 * cpupm is turned on and off, by the PM_START_CPUPM and PM_STOP_CPUPM ioctls, 349 * to define the power management behavior of CPU devices separate from 350 * autopm. Protected by pm_scan_lock. 351 */ 352 pm_cpupm_t cpupm = PM_CPUPM_NOTSET; 353 354 /* 355 * Defines the default mode of operation for CPU power management, 356 * either the polling implementation, or the event based dispatcher driven 357 * implementation. 358 */ 359 pm_cpupm_t cpupm_default_mode = PM_CPUPM_EVENT; 360 361 /* 362 * AutoS3 depends on autopm being enabled, and must be enabled by 363 * PM_START_AUTOS3 command. 364 */ 365 int autoS3_enabled; 366 367 #if !defined(__sparc) 368 /* 369 * on sparc these live in fillsysinfo.c 370 * 371 * If this variable is non-zero, cpr should return "not supported" when 372 * it is queried even though it would normally be supported on this platform. 373 */ 374 int cpr_supported_override; 375 376 /* 377 * Some platforms may need to support CPR even in the absence of 378 * having the correct platform id information. If this 379 * variable is non-zero, cpr should proceed even in the absence 380 * of otherwise being qualified. 381 */ 382 int cpr_platform_enable = 0; 383 384 #endif 385 386 /* 387 * pm_S3_enabled indicates that we believe the platform can support S3, 388 * which we get from pmconfig(8) 389 */ 390 int pm_S3_enabled; 391 392 /* 393 * This flag is true while processes are stopped for a checkpoint/resume. 394 * Controlling processes of direct pm'd devices are not available to 395 * participate in power level changes, so we bypass them when this is set. 396 */ 397 static int pm_processes_stopped; 398 399 #ifdef DEBUG 400 401 /* 402 * see common/sys/epm.h for PMD_* values 403 */ 404 405 uint_t pm_debug = 0; 406 407 /* 408 * If pm_divertdebug is set, then no prom_printf calls will be made by 409 * PMD(), which will prevent debug output from bringing up the console 410 * frame buffer. Clearing this variable before setting pm_debug will result 411 * in PMD output going to the console. 412 * 413 * pm_divertdebug is incremented in pm_set_power() if dip == cfb_dip to avoid 414 * deadlocks and decremented at the end of pm_set_power() 415 */ 416 uint_t pm_divertdebug = 1; 417 volatile uint_t pm_debug_to_console = 0; 418 kmutex_t pm_debug_lock; /* protects pm_divertdebug */ 419 420 void prdeps(char *); 421 #endif 422 423 /* Globals */ 424 425 /* 426 * List of recorded thresholds and dependencies 427 */ 428 pm_thresh_rec_t *pm_thresh_head; 429 krwlock_t pm_thresh_rwlock; 430 431 pm_pdr_t *pm_dep_head; 432 static int pm_unresolved_deps = 0; 433 static int pm_prop_deps = 0; 434 435 /* 436 * List of devices that exported no-involuntary-power-cycles property 437 */ 438 pm_noinvol_t *pm_noinvol_head; 439 440 /* 441 * Locks used in noinvol processing 442 */ 443 krwlock_t pm_noinvol_rwlock; 444 kmutex_t pm_remdrv_lock; 445 446 int pm_default_idle_threshold = PM_DEFAULT_SYS_IDLENESS; 447 int pm_system_idle_threshold; 448 int pm_cpu_idle_threshold; 449 450 /* 451 * By default nexus has 0 threshold, and depends on its children to keep it up 452 */ 453 int pm_default_nexus_threshold = 0; 454 455 /* 456 * Data structures shared with common/io/pm.c 457 */ 458 kmutex_t pm_clone_lock; 459 kcondvar_t pm_clones_cv[PM_MAX_CLONE]; 460 uint_t pm_poll_cnt[PM_MAX_CLONE]; /* count of events for poll */ 461 unsigned char pm_interest[PM_MAX_CLONE]; 462 struct pollhead pm_pollhead; 463 464 /* 465 * Data structures shared with common/io/srn.c 466 */ 467 kmutex_t srn_clone_lock; /* protects srn_signal, srn_inuse */ 468 void (*srn_signal)(int type, int event); 469 int srn_inuse; /* stop srn detach */ 470 471 extern int hz; 472 extern char *platform_module_list[]; 473 474 /* 475 * Wrappers for use in ddi_walk_devs 476 */ 477 478 static int pm_set_dev_thr_walk(dev_info_t *, void *); 479 static int pm_restore_direct_lvl_walk(dev_info_t *, void *); 480 static int pm_save_direct_lvl_walk(dev_info_t *, void *); 481 static int pm_discard_dep_walk(dev_info_t *, void *); 482 #ifdef DEBUG 483 static int pm_desc_pwrchk_walk(dev_info_t *, void *); 484 #endif 485 486 /* 487 * Routines for managing noinvol devices 488 */ 489 int pm_noinvol_update(int, int, int, char *, dev_info_t *); 490 void pm_noinvol_update_node(dev_info_t *, 491 pm_bp_noinvol_t *req); 492 493 kmutex_t pm_rsvp_lock; 494 kmutex_t pm_compcnt_lock; 495 krwlock_t pm_pscc_direct_rwlock; 496 krwlock_t pm_pscc_interest_rwlock; 497 498 #define PSC_INTEREST 0 /* belongs to interest psc list */ 499 #define PSC_DIRECT 1 /* belongs to direct psc list */ 500 501 pscc_t *pm_pscc_interest; 502 pscc_t *pm_pscc_direct; 503 504 #define PM_MAJOR(dip) ddi_driver_major(dip) 505 #define PM_IS_NEXUS(dip) ((PM_MAJOR(dip) == DDI_MAJOR_T_NONE) ? 0 : \ 506 NEXUS_DRV(devopsp[PM_MAJOR(dip)])) 507 #define POWERING_ON(old, new) ((old) == 0 && (new) != 0) 508 #define POWERING_OFF(old, new) ((old) != 0 && (new) == 0) 509 510 #define PM_INCR_NOTLOWEST(dip) { \ 511 mutex_enter(&pm_compcnt_lock); \ 512 if (!PM_IS_NEXUS(dip) || \ 513 (DEVI(dip)->devi_pm_flags & (PMC_DEV_THRESH|PMC_COMP_THRESH))) {\ 514 if (pm_comps_notlowest == 0) \ 515 pm_ppm_notify_all_lowest(dip, PM_NOT_ALL_LOWEST);\ 516 pm_comps_notlowest++; \ 517 PMD(PMD_LEVEL, ("%s: %s@%s(%s#%d) incr notlowest->%d\n",\ 518 pmf, PM_DEVICE(dip), pm_comps_notlowest)) \ 519 } \ 520 mutex_exit(&pm_compcnt_lock); \ 521 } 522 #define PM_DECR_NOTLOWEST(dip) { \ 523 mutex_enter(&pm_compcnt_lock); \ 524 if (!PM_IS_NEXUS(dip) || \ 525 (DEVI(dip)->devi_pm_flags & (PMC_DEV_THRESH|PMC_COMP_THRESH))) {\ 526 ASSERT(pm_comps_notlowest); \ 527 pm_comps_notlowest--; \ 528 PMD(PMD_LEVEL, ("%s: %s@%s(%s#%d) decr notlowest to " \ 529 "%d\n", pmf, PM_DEVICE(dip), pm_comps_notlowest))\ 530 if (pm_comps_notlowest == 0) \ 531 pm_ppm_notify_all_lowest(dip, PM_ALL_LOWEST); \ 532 } \ 533 mutex_exit(&pm_compcnt_lock); \ 534 } 535 536 /* 537 * console frame-buffer power-management is not enabled when 538 * debugging services are present. to override, set pm_cfb_override 539 * to non-zero. 540 */ 541 uint_t pm_cfb_comps_off = 0; /* PM_LEVEL_UNKNOWN is considered on */ 542 kmutex_t pm_cfb_lock; 543 int pm_cfb_enabled = 1; /* non-zero allows pm of console frame buffer */ 544 #ifdef DEBUG 545 int pm_cfb_override = 1; /* non-zero allows pm of cfb with debuggers */ 546 #else 547 int pm_cfb_override = 0; /* non-zero allows pm of cfb with debuggers */ 548 #endif 549 550 static dev_info_t *cfb_dip = 0; 551 static dev_info_t *cfb_dip_detaching = 0; 552 uint_t cfb_inuse = 0; 553 static ddi_softintr_t pm_soft_id; 554 static boolean_t pm_soft_pending; 555 int pm_scans_disabled = 0; 556 557 /* 558 * A structure to record the fact that one thread has borrowed a lock held 559 * by another thread. The context requires that the lender block on the 560 * completion of the borrower. 561 */ 562 typedef struct lock_loan { 563 struct lock_loan *pmlk_next; 564 kthread_t *pmlk_borrower; 565 kthread_t *pmlk_lender; 566 dev_info_t *pmlk_dip; 567 } lock_loan_t; 568 static lock_loan_t lock_loan_head; /* list head is a dummy element */ 569 570 #ifdef DEBUG 571 #ifdef PMDDEBUG 572 #define PMD_FUNC(func, name) char *(func) = (name); 573 #else /* !PMDDEBUG */ 574 #define PMD_FUNC(func, name) 575 #endif /* PMDDEBUG */ 576 #else /* !DEBUG */ 577 #define PMD_FUNC(func, name) 578 #endif /* DEBUG */ 579 580 581 /* 582 * Must be called before first device (including pseudo) attach 583 */ 584 void 585 pm_init_locks(void) 586 { 587 mutex_init(&pm_scan_lock, NULL, MUTEX_DRIVER, NULL); 588 mutex_init(&pm_rsvp_lock, NULL, MUTEX_DRIVER, NULL); 589 mutex_init(&pm_compcnt_lock, NULL, MUTEX_DRIVER, NULL); 590 mutex_init(&pm_dep_thread_lock, NULL, MUTEX_DRIVER, NULL); 591 mutex_init(&pm_remdrv_lock, NULL, MUTEX_DRIVER, NULL); 592 mutex_init(&pm_loan_lock, NULL, MUTEX_DRIVER, NULL); 593 rw_init(&pm_thresh_rwlock, NULL, RW_DEFAULT, NULL); 594 rw_init(&pm_noinvol_rwlock, NULL, RW_DEFAULT, NULL); 595 cv_init(&pm_dep_thread_cv, NULL, CV_DEFAULT, NULL); 596 } 597 598 static int pm_reset_timestamps(dev_info_t *, void *); 599 600 static boolean_t 601 pm_cpr_callb(void *arg, int code) 602 { 603 _NOTE(ARGUNUSED(arg)) 604 static int auto_save; 605 static pm_cpupm_t cpupm_save; 606 607 switch (code) { 608 case CB_CODE_CPR_CHKPT: 609 /* 610 * Cancel scan or wait for scan in progress to finish 611 * Other threads may be trying to restart the scan, so we 612 * have to keep at it unil it sticks 613 */ 614 mutex_enter(&pm_scan_lock); 615 ASSERT(!pm_scans_disabled); 616 pm_scans_disabled = 1; 617 auto_save = autopm_enabled; 618 autopm_enabled = 0; 619 cpupm_save = cpupm; 620 cpupm = PM_CPUPM_NOTSET; 621 mutex_exit(&pm_scan_lock); 622 ddi_walk_devs(ddi_root_node(), pm_scan_stop_walk, NULL); 623 break; 624 625 case CB_CODE_CPR_RESUME: 626 ASSERT(!autopm_enabled); 627 ASSERT(cpupm == PM_CPUPM_NOTSET); 628 ASSERT(pm_scans_disabled); 629 pm_scans_disabled = 0; 630 /* 631 * Call pm_reset_timestamps to reset timestamps of each 632 * device to the time when the system is resumed so that their 633 * idleness can be re-calculated. That's to avoid devices from 634 * being powered down right after resume if the system was in 635 * suspended mode long enough. 636 */ 637 ddi_walk_devs(ddi_root_node(), pm_reset_timestamps, NULL); 638 639 autopm_enabled = auto_save; 640 cpupm = cpupm_save; 641 /* 642 * If there is any auto-pm device, get the scanning 643 * going. Otherwise don't bother. 644 */ 645 ddi_walk_devs(ddi_root_node(), pm_rescan_walk, NULL); 646 break; 647 } 648 return (B_TRUE); 649 } 650 651 /* 652 * This callback routine is called when there is a system panic. This function 653 * exists for prototype matching. 654 */ 655 static boolean_t 656 pm_panic_callb(void *arg, int code) 657 { 658 _NOTE(ARGUNUSED(arg, code)) 659 void pm_cfb_check_and_powerup(void); 660 PMD(PMD_CFB, ("pm_panic_callb\n")) 661 pm_cfb_check_and_powerup(); 662 return (B_TRUE); 663 } 664 665 static boolean_t 666 pm_halt_callb(void *arg, int code) 667 { 668 _NOTE(ARGUNUSED(arg, code)) 669 return (B_TRUE); 670 } 671 672 static void pm_dep_thread(void); 673 674 /* 675 * This needs to be called after the root and platform drivers are loaded 676 * and be single-threaded with respect to driver attach/detach 677 */ 678 void 679 pm_init(void) 680 { 681 PMD_FUNC(pmf, "pm_init") 682 char **mod; 683 extern pri_t minclsyspri; 684 685 pm_comps_notlowest = 0; 686 pm_system_idle_threshold = pm_default_idle_threshold; 687 pm_cpu_idle_threshold = 0; 688 689 pm_cpr_cb_id = callb_add(pm_cpr_callb, (void *)NULL, 690 CB_CL_CPR_PM, "pm_cpr"); 691 pm_panic_cb_id = callb_add(pm_panic_callb, (void *)NULL, 692 CB_CL_PANIC, "pm_panic"); 693 pm_halt_cb_id = callb_add(pm_halt_callb, (void *)NULL, 694 CB_CL_HALT, "pm_halt"); 695 696 /* 697 * Create a thread to do dependency processing. 698 */ 699 (void) thread_create(NULL, 0, (void (*)())pm_dep_thread, NULL, 0, &p0, 700 TS_RUN, minclsyspri); 701 702 /* 703 * loadrootmodules already loaded these ppm drivers, now get them 704 * attached so they can claim the root drivers as they attach 705 */ 706 for (mod = platform_module_list; *mod; mod++) { 707 if (i_ddi_attach_hw_nodes(*mod) != DDI_SUCCESS) { 708 cmn_err(CE_WARN, "!cannot load platform pm driver %s\n", 709 *mod); 710 } else { 711 PMD(PMD_DHR, ("%s: %s (%s)\n", pmf, *mod, 712 ddi_major_to_name(ddi_name_to_major(*mod)))) 713 } 714 } 715 } 716 717 /* 718 * pm_scan_init - create pm scan data structure. Called (if autopm or cpupm 719 * enabled) when device becomes power managed or after a failed detach and 720 * when autopm is started via PM_START_PM or PM_START_CPUPM ioctls, and after 721 * a CPR resume to get all the devices scanning again. 722 */ 723 void 724 pm_scan_init(dev_info_t *dip) 725 { 726 PMD_FUNC(pmf, "scan_init") 727 pm_scan_t *scanp; 728 729 ASSERT(!PM_ISBC(dip)); 730 731 PM_LOCK_DIP(dip); 732 scanp = PM_GET_PM_SCAN(dip); 733 if (!scanp) { 734 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): create scan data\n", 735 pmf, PM_DEVICE(dip))) 736 scanp = kmem_zalloc(sizeof (pm_scan_t), KM_SLEEP); 737 DEVI(dip)->devi_pm_scan = scanp; 738 } else if (scanp->ps_scan_flags & PM_SCAN_STOP) { 739 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): " 740 "clear PM_SCAN_STOP flag\n", pmf, PM_DEVICE(dip))) 741 scanp->ps_scan_flags &= ~PM_SCAN_STOP; 742 } 743 PM_UNLOCK_DIP(dip); 744 } 745 746 /* 747 * pm_scan_fini - remove pm scan data structure when stopping pm on the device 748 */ 749 void 750 pm_scan_fini(dev_info_t *dip) 751 { 752 PMD_FUNC(pmf, "scan_fini") 753 pm_scan_t *scanp; 754 755 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 756 ASSERT(!PM_ISBC(dip)); 757 PM_LOCK_DIP(dip); 758 scanp = PM_GET_PM_SCAN(dip); 759 if (!scanp) { 760 PM_UNLOCK_DIP(dip); 761 return; 762 } 763 764 ASSERT(!scanp->ps_scan_id && !(scanp->ps_scan_flags & 765 (PM_SCANNING | PM_SCAN_DISPATCHED | PM_SCAN_AGAIN))); 766 767 kmem_free(scanp, sizeof (pm_scan_t)); 768 DEVI(dip)->devi_pm_scan = NULL; 769 PM_UNLOCK_DIP(dip); 770 } 771 772 /* 773 * Given a pointer to a component struct, return the current power level 774 * (struct contains index unless it is a continuous level). 775 * Located here in hopes of getting both this and dev_is_needed into the 776 * cache together 777 */ 778 static int 779 cur_power(pm_component_t *cp) 780 { 781 if (cp->pmc_cur_pwr == PM_LEVEL_UNKNOWN) 782 return (cp->pmc_cur_pwr); 783 784 return (cp->pmc_comp.pmc_lvals[cp->pmc_cur_pwr]); 785 } 786 787 static char * 788 pm_decode_direction(int direction) 789 { 790 switch (direction) { 791 case PM_LEVEL_UPONLY: 792 return ("up"); 793 794 case PM_LEVEL_EXACT: 795 return ("exact"); 796 797 case PM_LEVEL_DOWNONLY: 798 return ("down"); 799 800 default: 801 return ("INVALID DIRECTION"); 802 } 803 } 804 805 char * 806 pm_decode_op(pm_bus_power_op_t op) 807 { 808 switch (op) { 809 case BUS_POWER_CHILD_PWRCHG: 810 return ("CHILD_PWRCHG"); 811 case BUS_POWER_NEXUS_PWRUP: 812 return ("NEXUS_PWRUP"); 813 case BUS_POWER_PRE_NOTIFICATION: 814 return ("PRE_NOTIFICATION"); 815 case BUS_POWER_POST_NOTIFICATION: 816 return ("POST_NOTIFICATION"); 817 case BUS_POWER_HAS_CHANGED: 818 return ("HAS_CHANGED"); 819 case BUS_POWER_NOINVOL: 820 return ("NOINVOL"); 821 default: 822 return ("UNKNOWN OP"); 823 } 824 } 825 826 /* 827 * Returns true if level is a possible (valid) power level for component 828 */ 829 int 830 e_pm_valid_power(dev_info_t *dip, int cmpt, int level) 831 { 832 PMD_FUNC(pmf, "e_pm_valid_power") 833 pm_component_t *cp = PM_CP(dip, cmpt); 834 int i; 835 int *ip = cp->pmc_comp.pmc_lvals; 836 int limit = cp->pmc_comp.pmc_numlevels; 837 838 if (level < 0) 839 return (0); 840 for (i = 0; i < limit; i++) { 841 if (level == *ip++) 842 return (1); 843 } 844 #ifdef DEBUG 845 if (pm_debug & PMD_FAIL) { 846 ip = cp->pmc_comp.pmc_lvals; 847 848 for (i = 0; i < limit; i++) 849 PMD(PMD_FAIL, ("%s: index=%d, level=%d\n", 850 pmf, i, *ip++)) 851 } 852 #endif 853 return (0); 854 } 855 856 static int pm_start(dev_info_t *dip); 857 /* 858 * Returns true if device is pm'd (after calling pm_start if need be) 859 */ 860 int 861 e_pm_valid_info(dev_info_t *dip, pm_info_t **infop) 862 { 863 pm_info_t *info; 864 865 /* 866 * Check if the device is power managed if not. 867 * To make the common case (device is power managed already) 868 * fast, we check without the lock. If device is not already 869 * power managed, then we take the lock and the long route through 870 * go get it managed. Devices never go unmanaged until they 871 * detach. 872 */ 873 info = PM_GET_PM_INFO(dip); 874 if (!info) { 875 if (!DEVI_IS_ATTACHING(dip)) { 876 return (0); 877 } 878 if (pm_start(dip) != DDI_SUCCESS) { 879 return (0); 880 } 881 info = PM_GET_PM_INFO(dip); 882 } 883 ASSERT(info); 884 if (infop != NULL) 885 *infop = info; 886 return (1); 887 } 888 889 int 890 e_pm_valid_comp(dev_info_t *dip, int cmpt, pm_component_t **cpp) 891 { 892 if (cmpt >= 0 && cmpt < PM_NUMCMPTS(dip)) { 893 if (cpp != NULL) 894 *cpp = PM_CP(dip, cmpt); 895 return (1); 896 } else { 897 return (0); 898 } 899 } 900 901 /* 902 * Internal guts of ddi_dev_is_needed and pm_raise/lower_power 903 */ 904 static int 905 dev_is_needed(dev_info_t *dip, int cmpt, int level, int direction) 906 { 907 PMD_FUNC(pmf, "din") 908 pm_component_t *cp; 909 char *pathbuf; 910 int result; 911 912 ASSERT(direction == PM_LEVEL_UPONLY || direction == PM_LEVEL_DOWNONLY); 913 if (!e_pm_valid_info(dip, NULL) || !e_pm_valid_comp(dip, cmpt, &cp) || 914 !e_pm_valid_power(dip, cmpt, level)) 915 return (DDI_FAILURE); 916 917 PMD(PMD_DIN, ("%s: %s@%s(%s#%d) cmpt=%d, dir=%s, new=%d, cur=%d\n", 918 pmf, PM_DEVICE(dip), cmpt, pm_decode_direction(direction), 919 level, cur_power(cp))) 920 921 if (pm_set_power(dip, cmpt, level, direction, 922 PM_CANBLOCK_BLOCK, 0, &result) != DDI_SUCCESS) { 923 if (direction == PM_LEVEL_UPONLY) { 924 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 925 (void) ddi_pathname(dip, pathbuf); 926 cmn_err(CE_WARN, "Device %s failed to power up.", 927 pathbuf); 928 kmem_free(pathbuf, MAXPATHLEN); 929 } 930 PMD(PMD_DIN | PMD_FAIL, ("%s: %s@%s(%s#%d) [%d] %s->%d failed, " 931 "errno %d\n", pmf, PM_DEVICE(dip), cmpt, 932 pm_decode_direction(direction), level, result)) 933 return (DDI_FAILURE); 934 } 935 936 PMD(PMD_RESCAN | PMD_DIN, ("%s: pm_rescan %s@%s(%s#%d)\n", pmf, 937 PM_DEVICE(dip))) 938 pm_rescan(dip); 939 return (DDI_SUCCESS); 940 } 941 942 /* 943 * We can get multiple pm_rescan() threads, if one of them discovers 944 * that no scan is running at the moment, it kicks it into action. 945 * Otherwise, it tells the current scanning thread to scan again when 946 * it is done by asserting the PM_SCAN_AGAIN flag. The PM_SCANNING and 947 * PM_SCAN_AGAIN flags are used to regulate scan, to make sure only one 948 * thread at a time runs the pm_scan_dev() code. 949 */ 950 void 951 pm_rescan(void *arg) 952 { 953 PMD_FUNC(pmf, "rescan") 954 dev_info_t *dip = (dev_info_t *)arg; 955 pm_info_t *info; 956 pm_scan_t *scanp; 957 timeout_id_t scanid; 958 959 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 960 PM_LOCK_DIP(dip); 961 info = PM_GET_PM_INFO(dip); 962 scanp = PM_GET_PM_SCAN(dip); 963 if (pm_scans_disabled || !PM_SCANABLE(dip) || !info || !scanp || 964 (scanp->ps_scan_flags & PM_SCAN_STOP)) { 965 PM_UNLOCK_DIP(dip); 966 return; 967 } 968 if (scanp->ps_scan_flags & PM_SCANNING) { 969 scanp->ps_scan_flags |= PM_SCAN_AGAIN; 970 PM_UNLOCK_DIP(dip); 971 return; 972 } else if (scanp->ps_scan_id) { 973 scanid = scanp->ps_scan_id; 974 scanp->ps_scan_id = 0; 975 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): cancel timeout scanid %lx\n", 976 pmf, PM_DEVICE(dip), (ulong_t)scanid)) 977 PM_UNLOCK_DIP(dip); 978 (void) untimeout(scanid); 979 PM_LOCK_DIP(dip); 980 } 981 982 /* 983 * Dispatching pm_scan during attach time is risky due to the fact that 984 * attach might soon fail and dip dissolved, and panic may happen while 985 * attempting to stop scan. So schedule a pm_rescan instead. 986 * (Note that if either of the first two terms are true, taskq_dispatch 987 * will not be invoked). 988 * 989 * Multiple pm_scan dispatching is unecessary and costly to keep track 990 * of. The PM_SCAN_DISPATCHED flag is used between pm_rescan and pm_scan 991 * to regulate the dispatching. 992 * 993 * Scan is stopped before the device is detached (in pm_detaching()) 994 * but it may get re-started during the post_detach processing if the 995 * driver fails to detach. 996 */ 997 if (DEVI_IS_ATTACHING(dip) || 998 (scanp->ps_scan_flags & PM_SCAN_DISPATCHED) || 999 taskq_dispatch(system_taskq, pm_scan, (void *)dip, TQ_NOSLEEP) == 1000 TASKQID_INVALID) { 1001 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): attaching, pm_scan already " 1002 "dispatched or dispatching failed\n", pmf, PM_DEVICE(dip))) 1003 if (scanp->ps_scan_id) { 1004 scanid = scanp->ps_scan_id; 1005 scanp->ps_scan_id = 0; 1006 PM_UNLOCK_DIP(dip); 1007 (void) untimeout(scanid); 1008 PM_LOCK_DIP(dip); 1009 if (scanp->ps_scan_id) { 1010 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): a competing " 1011 "thread scheduled pm_rescan, scanid %lx\n", 1012 pmf, PM_DEVICE(dip), 1013 (ulong_t)scanp->ps_scan_id)) 1014 PM_UNLOCK_DIP(dip); 1015 return; 1016 } 1017 } 1018 scanp->ps_scan_id = timeout(pm_rescan, (void *)dip, 1019 (scanp->ps_idle_down ? pm_id_ticks : 1020 (PM_MIN_SCAN(dip) * hz))); 1021 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): scheduled next pm_rescan, " 1022 "scanid %lx\n", pmf, PM_DEVICE(dip), 1023 (ulong_t)scanp->ps_scan_id)) 1024 } else { 1025 PMD(PMD_SCAN, ("%s: dispatched pm_scan for %s@%s(%s#%d)\n", 1026 pmf, PM_DEVICE(dip))) 1027 scanp->ps_scan_flags |= PM_SCAN_DISPATCHED; 1028 } 1029 PM_UNLOCK_DIP(dip); 1030 } 1031 1032 void 1033 pm_scan(void *arg) 1034 { 1035 PMD_FUNC(pmf, "scan") 1036 dev_info_t *dip = (dev_info_t *)arg; 1037 pm_scan_t *scanp; 1038 time_t nextscan; 1039 1040 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 1041 1042 PM_LOCK_DIP(dip); 1043 scanp = PM_GET_PM_SCAN(dip); 1044 ASSERT(scanp && PM_GET_PM_INFO(dip)); 1045 1046 if (pm_scans_disabled || !PM_SCANABLE(dip) || 1047 (scanp->ps_scan_flags & PM_SCAN_STOP)) { 1048 scanp->ps_scan_flags &= ~(PM_SCAN_AGAIN | PM_SCAN_DISPATCHED); 1049 PM_UNLOCK_DIP(dip); 1050 return; 1051 } 1052 1053 if (scanp->ps_idle_down) { 1054 /* 1055 * make sure we remember idledown was in affect until 1056 * we've completed the scan 1057 */ 1058 PMID_SET_SCANS(scanp->ps_idle_down) 1059 PMD(PMD_IDLEDOWN, ("%s: %s@%s(%s#%d): idledown starts " 1060 "(pmid %x)\n", pmf, PM_DEVICE(dip), scanp->ps_idle_down)) 1061 } 1062 1063 /* possible having two threads running pm_scan() */ 1064 if (scanp->ps_scan_flags & PM_SCANNING) { 1065 scanp->ps_scan_flags |= PM_SCAN_AGAIN; 1066 PMD(PMD_SCAN, ("%s: scanning, will scan %s@%s(%s#%d) again\n", 1067 pmf, PM_DEVICE(dip))) 1068 scanp->ps_scan_flags &= ~PM_SCAN_DISPATCHED; 1069 PM_UNLOCK_DIP(dip); 1070 return; 1071 } 1072 1073 scanp->ps_scan_flags |= PM_SCANNING; 1074 scanp->ps_scan_flags &= ~PM_SCAN_DISPATCHED; 1075 do { 1076 scanp->ps_scan_flags &= ~PM_SCAN_AGAIN; 1077 PM_UNLOCK_DIP(dip); 1078 nextscan = pm_scan_dev(dip); 1079 PM_LOCK_DIP(dip); 1080 } while (scanp->ps_scan_flags & PM_SCAN_AGAIN); 1081 1082 ASSERT(scanp->ps_scan_flags & PM_SCANNING); 1083 scanp->ps_scan_flags &= ~PM_SCANNING; 1084 1085 if (scanp->ps_idle_down) { 1086 scanp->ps_idle_down &= ~PMID_SCANS; 1087 PMD(PMD_IDLEDOWN, ("%s: %s@%s(%s#%d): idledown ends " 1088 "(pmid %x)\n", pmf, PM_DEVICE(dip), scanp->ps_idle_down)) 1089 } 1090 1091 /* schedule for next idle check */ 1092 if (nextscan != LONG_MAX) { 1093 if (nextscan > (LONG_MAX / hz)) 1094 nextscan = (LONG_MAX - 1) / hz; 1095 if (scanp->ps_scan_id) { 1096 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): while scanning " 1097 "another rescan scheduled scanid(%lx)\n", pmf, 1098 PM_DEVICE(dip), (ulong_t)scanp->ps_scan_id)) 1099 PM_UNLOCK_DIP(dip); 1100 return; 1101 } else if (!(scanp->ps_scan_flags & PM_SCAN_STOP)) { 1102 scanp->ps_scan_id = timeout(pm_rescan, (void *)dip, 1103 (clock_t)(nextscan * hz)); 1104 PMD(PMD_SCAN, ("%s: nextscan for %s@%s(%s#%d) in " 1105 "%lx sec, scanid(%lx) \n", pmf, PM_DEVICE(dip), 1106 (ulong_t)nextscan, (ulong_t)scanp->ps_scan_id)) 1107 } 1108 } 1109 PM_UNLOCK_DIP(dip); 1110 } 1111 1112 void 1113 pm_get_timestamps(dev_info_t *dip, time_t *valuep) 1114 { 1115 int components = PM_NUMCMPTS(dip); 1116 int i; 1117 1118 ASSERT(components > 0); 1119 PM_LOCK_BUSY(dip); /* so we get a consistent view */ 1120 for (i = 0; i < components; i++) { 1121 valuep[i] = PM_CP(dip, i)->pmc_timestamp; 1122 } 1123 PM_UNLOCK_BUSY(dip); 1124 } 1125 1126 /* 1127 * Returns true if device needs to be kept up because it exported the 1128 * "no-involuntary-power-cycles" property or we're pretending it did (console 1129 * fb case) or it is an ancestor of such a device and has used up the "one 1130 * free cycle" allowed when all such leaf nodes have voluntarily powered down 1131 * upon detach 1132 */ 1133 int 1134 pm_noinvol(dev_info_t *dip) 1135 { 1136 PMD_FUNC(pmf, "noinvol") 1137 1138 /* 1139 * This doesn't change over the life of a driver, so no locking needed 1140 */ 1141 if (PM_IS_CFB(dip)) { 1142 PMD(PMD_NOINVOL | PMD_CFB, ("%s: inhibits CFB %s@%s(%s#%d)\n", 1143 pmf, PM_DEVICE(dip))) 1144 return (1); 1145 } 1146 /* 1147 * Not an issue if no such kids 1148 */ 1149 if (DEVI(dip)->devi_pm_noinvolpm == 0) { 1150 #ifdef DEBUG 1151 if (DEVI(dip)->devi_pm_volpmd != 0) { 1152 dev_info_t *pdip = dip; 1153 do { 1154 PMD(PMD_NOINVOL, ("%s: %s@%s(%s#%d) noinvol %d " 1155 "volpmd %d\n", pmf, PM_DEVICE(pdip), 1156 DEVI(pdip)->devi_pm_noinvolpm, 1157 DEVI(pdip)->devi_pm_volpmd)) 1158 pdip = ddi_get_parent(pdip); 1159 } while (pdip); 1160 } 1161 #endif 1162 ASSERT(DEVI(dip)->devi_pm_volpmd == 0); 1163 return (0); 1164 } 1165 1166 /* 1167 * Since we now maintain the counts correct at every node, we no longer 1168 * need to look up the tree. An ancestor cannot use up the free cycle 1169 * without the children getting their counts adjusted. 1170 */ 1171 1172 #ifdef DEBUG 1173 if (DEVI(dip)->devi_pm_noinvolpm != DEVI(dip)->devi_pm_volpmd) 1174 PMD(PMD_NOINVOL, ("%s: (%d != %d) inhibits %s@%s(%s#%d)\n", pmf, 1175 DEVI(dip)->devi_pm_noinvolpm, DEVI(dip)->devi_pm_volpmd, 1176 PM_DEVICE(dip))) 1177 #endif 1178 return (DEVI(dip)->devi_pm_noinvolpm != DEVI(dip)->devi_pm_volpmd); 1179 } 1180 1181 static int cur_threshold(dev_info_t *, int); 1182 static int pm_next_lower_power(pm_component_t *, int); 1183 1184 /* 1185 * This function performs the actual scanning of the device. 1186 * It attempts to power off the indicated device's components if they have 1187 * been idle and other restrictions are met. 1188 * pm_scan_dev calculates and returns when the next scan should happen for 1189 * this device. 1190 */ 1191 time_t 1192 pm_scan_dev(dev_info_t *dip) 1193 { 1194 PMD_FUNC(pmf, "scan_dev") 1195 pm_scan_t *scanp; 1196 time_t *timestamp, idletime, now, thresh; 1197 time_t timeleft = 0; 1198 #ifdef PMDDEBUG 1199 int curpwr; 1200 #endif 1201 int i, nxtpwr, pwrndx, unused; 1202 size_t size; 1203 pm_component_t *cp; 1204 dev_info_t *pdip = ddi_get_parent(dip); 1205 clock_t min_scan = pm_default_min_scan; 1206 1207 /* 1208 * skip attaching device 1209 */ 1210 if (DEVI_IS_ATTACHING(dip)) { 1211 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) is attaching, timeleft(%lx)\n", 1212 pmf, PM_DEVICE(dip), min_scan)) 1213 return (min_scan); 1214 } 1215 1216 PM_LOCK_DIP(dip); 1217 scanp = PM_GET_PM_SCAN(dip); 1218 min_scan = PM_MIN_SCAN(dip); 1219 ASSERT(scanp && PM_GET_PM_INFO(dip)); 1220 1221 PMD(PMD_SCAN, ("%s: [BEGIN %s@%s(%s#%d)]\n", pmf, PM_DEVICE(dip))) 1222 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d): kuc is %d\n", pmf, PM_DEVICE(dip), 1223 PM_KUC(dip))) 1224 1225 /* no scan under the following conditions */ 1226 if (pm_scans_disabled || !PM_SCANABLE(dip) || 1227 (scanp->ps_scan_flags & PM_SCAN_STOP) || 1228 (PM_KUC(dip) != 0) || 1229 PM_ISDIRECT(dip) || pm_noinvol(dip)) { 1230 PM_UNLOCK_DIP(dip); 1231 PMD(PMD_SCAN, ("%s: [END, %s@%s(%s#%d)] no scan, " 1232 "scan_disabled(%d), apm_enabled(%d), cpupm(%d), " 1233 "kuc(%d), %s directpm, %s pm_noinvol\n", 1234 pmf, PM_DEVICE(dip), pm_scans_disabled, autopm_enabled, 1235 cpupm, PM_KUC(dip), 1236 PM_ISDIRECT(dip) ? "is" : "is not", 1237 pm_noinvol(dip) ? "is" : "is not")) 1238 return (LONG_MAX); 1239 } 1240 PM_UNLOCK_DIP(dip); 1241 1242 if (!ndi_devi_tryenter(pdip)) { 1243 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) can't hold pdip", 1244 pmf, PM_DEVICE(pdip))) 1245 return ((time_t)1); 1246 } 1247 now = gethrestime_sec(); 1248 size = PM_NUMCMPTS(dip) * sizeof (time_t); 1249 timestamp = kmem_alloc(size, KM_SLEEP); 1250 pm_get_timestamps(dip, timestamp); 1251 1252 /* 1253 * Since we removed support for backwards compatible devices, 1254 * (see big comment at top of file) 1255 * it is no longer required to deal with component 0 last. 1256 */ 1257 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 1258 /* 1259 * If already off (an optimization, perhaps) 1260 */ 1261 cp = PM_CP(dip, i); 1262 pwrndx = cp->pmc_cur_pwr; 1263 #ifdef PMDDEBUG 1264 curpwr = (pwrndx == PM_LEVEL_UNKNOWN) ? 1265 PM_LEVEL_UNKNOWN : 1266 cp->pmc_comp.pmc_lvals[pwrndx]; 1267 #endif 1268 1269 if (pwrndx == 0) { 1270 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) comp %d off or " 1271 "lowest\n", pmf, PM_DEVICE(dip), i)) 1272 /* skip device if off or at its lowest */ 1273 continue; 1274 } 1275 1276 thresh = cur_threshold(dip, i); /* comp i threshold */ 1277 if ((timestamp[i] == 0) || (cp->pmc_busycount > 0)) { 1278 /* were busy or newly became busy by another thread */ 1279 if (timeleft == 0) 1280 timeleft = max(thresh, min_scan); 1281 else 1282 timeleft = min( 1283 timeleft, max(thresh, min_scan)); 1284 continue; 1285 } 1286 1287 idletime = now - timestamp[i]; /* idle time */ 1288 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) comp %d idle time %lx\n", 1289 pmf, PM_DEVICE(dip), i, idletime)) 1290 if (idletime >= thresh || PM_IS_PID(dip)) { 1291 nxtpwr = pm_next_lower_power(cp, pwrndx); 1292 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) comp %d, %d->%d\n", 1293 pmf, PM_DEVICE(dip), i, curpwr, nxtpwr)) 1294 if (pm_set_power(dip, i, nxtpwr, PM_LEVEL_DOWNONLY, 1295 PM_CANBLOCK_FAIL, 1, &unused) != DDI_SUCCESS && 1296 PM_CURPOWER(dip, i) != nxtpwr) { 1297 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) comp %d, " 1298 "%d->%d Failed\n", pmf, PM_DEVICE(dip), 1299 i, curpwr, nxtpwr)) 1300 timeleft = min_scan; 1301 continue; 1302 } else { 1303 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) comp %d, " 1304 "%d->%d, GOOD curpwr %d\n", pmf, 1305 PM_DEVICE(dip), i, curpwr, nxtpwr, 1306 cur_power(cp))) 1307 1308 if (nxtpwr == 0) /* component went off */ 1309 continue; 1310 1311 /* 1312 * scan to next lower level 1313 */ 1314 if (timeleft == 0) 1315 timeleft = max( 1316 1, cur_threshold(dip, i)); 1317 else 1318 timeleft = min(timeleft, 1319 max(1, cur_threshold(dip, i))); 1320 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) comp %d, " 1321 "timeleft(%lx)\n", pmf, PM_DEVICE(dip), 1322 i, timeleft)) 1323 } 1324 } else { /* comp not idle long enough */ 1325 if (timeleft == 0) 1326 timeleft = thresh - idletime; 1327 else 1328 timeleft = min(timeleft, (thresh - idletime)); 1329 PMD(PMD_SCAN, ("%s: %s@%s(%s#%d) comp %d, timeleft=" 1330 "%lx\n", pmf, PM_DEVICE(dip), i, timeleft)) 1331 } 1332 } 1333 ndi_devi_exit(pdip); 1334 kmem_free(timestamp, size); 1335 PMD(PMD_SCAN, ("%s: [END %s@%s(%s#%d)] timeleft(%lx)\n", pmf, 1336 PM_DEVICE(dip), timeleft)) 1337 1338 /* 1339 * if components are already at lowest level, timeleft is left 0 1340 */ 1341 return ((timeleft == 0) ? LONG_MAX : timeleft); 1342 } 1343 1344 /* 1345 * pm_scan_stop - cancel scheduled pm_rescan, 1346 * wait for termination of dispatched pm_scan thread 1347 * and active pm_scan_dev thread. 1348 */ 1349 void 1350 pm_scan_stop(dev_info_t *dip) 1351 { 1352 PMD_FUNC(pmf, "scan_stop") 1353 pm_scan_t *scanp; 1354 timeout_id_t scanid; 1355 1356 PMD(PMD_SCAN, ("%s: [BEGIN %s@%s(%s#%d)]\n", pmf, PM_DEVICE(dip))) 1357 PM_LOCK_DIP(dip); 1358 scanp = PM_GET_PM_SCAN(dip); 1359 if (!scanp) { 1360 PMD(PMD_SCAN, ("%s: [END %s@%s(%s#%d)] scan not initialized\n", 1361 pmf, PM_DEVICE(dip))) 1362 PM_UNLOCK_DIP(dip); 1363 return; 1364 } 1365 scanp->ps_scan_flags |= PM_SCAN_STOP; 1366 1367 /* cancel scheduled scan taskq */ 1368 while (scanp->ps_scan_id) { 1369 scanid = scanp->ps_scan_id; 1370 scanp->ps_scan_id = 0; 1371 PM_UNLOCK_DIP(dip); 1372 (void) untimeout(scanid); 1373 PM_LOCK_DIP(dip); 1374 } 1375 1376 while (scanp->ps_scan_flags & (PM_SCANNING | PM_SCAN_DISPATCHED)) { 1377 PM_UNLOCK_DIP(dip); 1378 delay(1); 1379 PM_LOCK_DIP(dip); 1380 } 1381 PM_UNLOCK_DIP(dip); 1382 PMD(PMD_SCAN, ("%s: [END %s@%s(%s#%d)]\n", pmf, PM_DEVICE(dip))) 1383 } 1384 1385 int 1386 pm_scan_stop_walk(dev_info_t *dip, void *arg) 1387 { 1388 _NOTE(ARGUNUSED(arg)) 1389 1390 if (!PM_GET_PM_SCAN(dip)) 1391 return (DDI_WALK_CONTINUE); 1392 ASSERT(!PM_ISBC(dip)); 1393 pm_scan_stop(dip); 1394 return (DDI_WALK_CONTINUE); 1395 } 1396 1397 /* 1398 * Converts a power level value to its index 1399 */ 1400 static int 1401 power_val_to_index(pm_component_t *cp, int val) 1402 { 1403 int limit, i, *ip; 1404 1405 ASSERT(val != PM_LEVEL_UPONLY && val != PM_LEVEL_DOWNONLY && 1406 val != PM_LEVEL_EXACT); 1407 /* convert power value into index (i) */ 1408 limit = cp->pmc_comp.pmc_numlevels; 1409 ip = cp->pmc_comp.pmc_lvals; 1410 for (i = 0; i < limit; i++) 1411 if (val == *ip++) 1412 return (i); 1413 return (-1); 1414 } 1415 1416 /* 1417 * Converts a numeric power level to a printable string 1418 */ 1419 static char * 1420 power_val_to_string(pm_component_t *cp, int val) 1421 { 1422 int index; 1423 1424 if (val == PM_LEVEL_UPONLY) 1425 return ("<UPONLY>"); 1426 1427 if (val == PM_LEVEL_UNKNOWN || 1428 (index = power_val_to_index(cp, val)) == -1) 1429 return ("<LEVEL_UNKNOWN>"); 1430 1431 return (cp->pmc_comp.pmc_lnames[index]); 1432 } 1433 1434 /* 1435 * Return true if this node has been claimed by a ppm. 1436 */ 1437 static int 1438 pm_ppm_claimed(dev_info_t *dip) 1439 { 1440 return (PPM(dip) != NULL); 1441 } 1442 1443 /* 1444 * A node which was voluntarily power managed has just used up its "free cycle" 1445 * and need is volpmd field cleared, and the same done to all its descendents 1446 */ 1447 static void 1448 pm_clear_volpm_dip(dev_info_t *dip) 1449 { 1450 PMD_FUNC(pmf, "clear_volpm_dip") 1451 1452 if (dip == NULL) 1453 return; 1454 PMD(PMD_NOINVOL, ("%s: clear volpm from %s@%s(%s#%d)\n", pmf, 1455 PM_DEVICE(dip))) 1456 DEVI(dip)->devi_pm_volpmd = 0; 1457 for (dip = ddi_get_child(dip); dip; dip = ddi_get_next_sibling(dip)) { 1458 pm_clear_volpm_dip(dip); 1459 } 1460 } 1461 1462 /* 1463 * A node which was voluntarily power managed has used up the "free cycles" 1464 * for the subtree that it is the root of. Scan through the list of detached 1465 * nodes and adjust the counts of any that are descendents of the node. 1466 */ 1467 static void 1468 pm_clear_volpm_list(dev_info_t *dip) 1469 { 1470 PMD_FUNC(pmf, "clear_volpm_list") 1471 char *pathbuf; 1472 size_t len; 1473 pm_noinvol_t *ip; 1474 1475 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1476 (void) ddi_pathname(dip, pathbuf); 1477 len = strlen(pathbuf); 1478 PMD(PMD_NOINVOL, ("%s: clear volpm list %s\n", pmf, pathbuf)) 1479 rw_enter(&pm_noinvol_rwlock, RW_WRITER); 1480 for (ip = pm_noinvol_head; ip; ip = ip->ni_next) { 1481 PMD(PMD_NOINVOL, ("%s: clear volpm: ni_path %s\n", pmf, 1482 ip->ni_path)) 1483 if (strncmp(pathbuf, ip->ni_path, len) == 0 && 1484 ip->ni_path[len] == '/') { 1485 PMD(PMD_NOINVOL, ("%s: clear volpm: %s\n", pmf, 1486 ip->ni_path)) 1487 ip->ni_volpmd = 0; 1488 ip->ni_wasvolpmd = 0; 1489 } 1490 } 1491 kmem_free(pathbuf, MAXPATHLEN); 1492 rw_exit(&pm_noinvol_rwlock); 1493 } 1494 1495 /* 1496 * Powers a device, suspending or resuming the driver if it is a backward 1497 * compatible device, calling into ppm to change power level. 1498 * Called with the component's power lock held. 1499 */ 1500 static int 1501 power_dev(dev_info_t *dip, int comp, int level, int old_level, 1502 pm_canblock_t canblock, pm_ppm_devlist_t **devlist) 1503 { 1504 PMD_FUNC(pmf, "power_dev") 1505 power_req_t power_req; 1506 int power_op_ret; /* DDI_SUCCESS or DDI_FAILURE */ 1507 int resume_needed = 0; 1508 int suspended = 0; 1509 int result; 1510 #ifdef PMDDEBUG 1511 struct pm_component *cp = PM_CP(dip, comp); 1512 #endif 1513 int bc = PM_ISBC(dip); 1514 int pm_all_components_off(dev_info_t *); 1515 int clearvolpmd = 0; 1516 char pathbuf[MAXNAMELEN]; 1517 #ifdef PMDDEBUG 1518 char *ppmname, *ppmaddr; 1519 #endif 1520 /* 1521 * If this is comp 0 of a backwards compat device and we are 1522 * going to take the power away, we need to detach it with 1523 * DDI_PM_SUSPEND command. 1524 */ 1525 if (bc && comp == 0 && POWERING_OFF(old_level, level)) { 1526 if (devi_detach(dip, DDI_PM_SUSPEND) != DDI_SUCCESS) { 1527 /* We could not suspend before turning cmpt zero off */ 1528 PMD(PMD_ERROR, ("%s: could not suspend %s@%s(%s#%d)\n", 1529 pmf, PM_DEVICE(dip))) 1530 return (DDI_FAILURE); 1531 } else { 1532 DEVI(dip)->devi_pm_flags |= PMC_SUSPENDED; 1533 suspended++; 1534 } 1535 } 1536 power_req.request_type = PMR_PPM_SET_POWER; 1537 power_req.req.ppm_set_power_req.who = dip; 1538 power_req.req.ppm_set_power_req.cmpt = comp; 1539 power_req.req.ppm_set_power_req.old_level = old_level; 1540 power_req.req.ppm_set_power_req.new_level = level; 1541 power_req.req.ppm_set_power_req.canblock = canblock; 1542 power_req.req.ppm_set_power_req.cookie = NULL; 1543 #ifdef PMDDEBUG 1544 if (pm_ppm_claimed(dip)) { 1545 ppmname = PM_NAME(PPM(dip)); 1546 ppmaddr = PM_ADDR(PPM(dip)); 1547 1548 } else { 1549 ppmname = "noppm"; 1550 ppmaddr = "0"; 1551 } 1552 PMD(PMD_PPM, ("%s: %s@%s(%s#%d):%s[%d] %s (%d) -> %s (%d) via %s@%s\n", 1553 pmf, PM_DEVICE(dip), cp->pmc_comp.pmc_name, comp, 1554 power_val_to_string(cp, old_level), old_level, 1555 power_val_to_string(cp, level), level, ppmname, ppmaddr)) 1556 #endif 1557 /* 1558 * If non-bc noinvolpm device is turning first comp on, or noinvolpm 1559 * bc device comp 0 is powering on, then we count it as a power cycle 1560 * against its voluntary count. 1561 */ 1562 if (DEVI(dip)->devi_pm_volpmd && 1563 (!bc && pm_all_components_off(dip) && level != 0) || 1564 (bc && comp == 0 && POWERING_ON(old_level, level))) 1565 clearvolpmd = 1; 1566 if ((power_op_ret = pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, 1567 &power_req, &result)) == DDI_SUCCESS) { 1568 /* 1569 * Now do involuntary pm accounting; If we've just cycled power 1570 * on a voluntarily pm'd node, and by inference on its entire 1571 * subtree, we need to set the subtree (including those nodes 1572 * already detached) volpmd counts to 0, and subtract out the 1573 * value of the current node's volpmd count from the ancestors 1574 */ 1575 if (clearvolpmd) { 1576 int volpmd = DEVI(dip)->devi_pm_volpmd; 1577 pm_clear_volpm_dip(dip); 1578 pm_clear_volpm_list(dip); 1579 if (volpmd) { 1580 (void) ddi_pathname(dip, pathbuf); 1581 (void) pm_noinvol_update(PM_BP_NOINVOL_POWER, 1582 volpmd, 0, pathbuf, dip); 1583 } 1584 } 1585 } else { 1586 PMD(PMD_FAIL, ("%s: can't set comp %d (%s) of %s@%s(%s#%d) " 1587 "to level %d (%s)\n", pmf, comp, cp->pmc_comp.pmc_name, 1588 PM_DEVICE(dip), level, power_val_to_string(cp, level))) 1589 } 1590 /* 1591 * If some other devices were also powered up (e.g. other cpus in 1592 * the same domain) return a pointer to that list 1593 */ 1594 if (devlist) { 1595 *devlist = (pm_ppm_devlist_t *) 1596 power_req.req.ppm_set_power_req.cookie; 1597 } 1598 /* 1599 * We will have to resume the device if the device is backwards compat 1600 * device and either of the following is true: 1601 * -This is comp 0 and we have successfully powered it up 1602 * -This is comp 0 and we have failed to power it down. Resume is 1603 * needed because we have suspended it above 1604 */ 1605 1606 if (bc && comp == 0) { 1607 ASSERT(PM_ISDIRECT(dip) || DEVI_IS_DETACHING(dip)); 1608 if (power_op_ret == DDI_SUCCESS) { 1609 if (POWERING_ON(old_level, level)) { 1610 /* 1611 * It must be either suspended or resumed 1612 * via pm_power_has_changed path 1613 */ 1614 ASSERT((DEVI(dip)->devi_pm_flags & 1615 PMC_SUSPENDED) || 1616 (PM_CP(dip, comp)->pmc_flags & 1617 PM_PHC_WHILE_SET_POWER)); 1618 1619 resume_needed = suspended; 1620 } 1621 } else { 1622 if (POWERING_OFF(old_level, level)) { 1623 /* 1624 * It must be either suspended or resumed 1625 * via pm_power_has_changed path 1626 */ 1627 ASSERT((DEVI(dip)->devi_pm_flags & 1628 PMC_SUSPENDED) || 1629 (PM_CP(dip, comp)->pmc_flags & 1630 PM_PHC_WHILE_SET_POWER)); 1631 1632 resume_needed = suspended; 1633 } 1634 } 1635 } 1636 if (resume_needed) { 1637 ASSERT(DEVI(dip)->devi_pm_flags & PMC_SUSPENDED); 1638 /* ppm is not interested in DDI_PM_RESUME */ 1639 if ((power_op_ret = devi_attach(dip, DDI_PM_RESUME)) == 1640 DDI_SUCCESS) { 1641 DEVI(dip)->devi_pm_flags &= ~PMC_SUSPENDED; 1642 } else 1643 cmn_err(CE_WARN, "!pm: Can't resume %s@%s(%s#%d)", 1644 PM_DEVICE(dip)); 1645 } 1646 return (power_op_ret); 1647 } 1648 1649 /* 1650 * Return true if we are the owner or a borrower of the devi lock. See 1651 * pm_lock_power_single() about borrowing the lock. 1652 */ 1653 static int 1654 pm_devi_lock_held(dev_info_t *dip) 1655 { 1656 lock_loan_t *cur; 1657 1658 if (DEVI_BUSY_OWNED(dip)) 1659 return (1); 1660 1661 /* return false if no locks borrowed */ 1662 if (lock_loan_head.pmlk_next == NULL) 1663 return (0); 1664 1665 mutex_enter(&pm_loan_lock); 1666 /* see if our thread is registered as a lock borrower. */ 1667 for (cur = lock_loan_head.pmlk_next; cur; cur = cur->pmlk_next) 1668 if (cur->pmlk_borrower == curthread) 1669 break; 1670 mutex_exit(&pm_loan_lock); 1671 1672 return (cur != NULL && cur->pmlk_lender == DEVI(dip)->devi_busy_thread); 1673 } 1674 1675 /* 1676 * pm_set_power: adjusts power level of device. Assumes device is power 1677 * manageable & component exists. 1678 * 1679 * Cases which require us to bring up devices we keep up ("wekeepups") for 1680 * backwards compatible devices: 1681 * component 0 is off and we're bringing it up from 0 1682 * bring up wekeepup first 1683 * and recursively when component 0 is off and we bring some other 1684 * component up from 0 1685 * For devices which are not backward compatible, our dependency notion is much 1686 * simpler. Unless all components are off, then wekeeps must be on. 1687 * We don't treat component 0 differently. 1688 * Canblock tells how to deal with a direct pm'd device. 1689 * Scan arg tells us if we were called from scan, in which case we don't need 1690 * to go back to the root node and walk down to change power. 1691 */ 1692 int 1693 pm_set_power(dev_info_t *dip, int comp, int level, int direction, 1694 pm_canblock_t canblock, int scan, int *retp) 1695 { 1696 PMD_FUNC(pmf, "set_power") 1697 char *pathbuf; 1698 pm_bp_child_pwrchg_t bpc; 1699 pm_sp_misc_t pspm; 1700 int ret = DDI_SUCCESS; 1701 int unused = DDI_SUCCESS; 1702 dev_info_t *pdip = ddi_get_parent(dip); 1703 1704 #ifdef DEBUG 1705 int diverted = 0; 1706 1707 /* 1708 * This prevents operations on the console from calling prom_printf and 1709 * either deadlocking or bringing up the console because of debug 1710 * output 1711 */ 1712 if (dip == cfb_dip) { 1713 diverted++; 1714 mutex_enter(&pm_debug_lock); 1715 pm_divertdebug++; 1716 mutex_exit(&pm_debug_lock); 1717 } 1718 #endif 1719 ASSERT(direction == PM_LEVEL_UPONLY || direction == PM_LEVEL_DOWNONLY || 1720 direction == PM_LEVEL_EXACT); 1721 PMD(PMD_SET, ("%s: %s@%s(%s#%d), comp=%d, dir=%s, new=%d\n", 1722 pmf, PM_DEVICE(dip), comp, pm_decode_direction(direction), level)) 1723 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1724 (void) ddi_pathname(dip, pathbuf); 1725 bpc.bpc_dip = dip; 1726 bpc.bpc_path = pathbuf; 1727 bpc.bpc_comp = comp; 1728 bpc.bpc_olevel = PM_CURPOWER(dip, comp); 1729 bpc.bpc_nlevel = level; 1730 pspm.pspm_direction = direction; 1731 pspm.pspm_errnop = retp; 1732 pspm.pspm_canblock = canblock; 1733 pspm.pspm_scan = scan; 1734 bpc.bpc_private = &pspm; 1735 1736 /* 1737 * If a config operation is being done (we've locked the parent) or 1738 * we already hold the power lock (we've locked the node) 1739 * then we can operate directly on the node because we have already 1740 * brought up all the ancestors, otherwise, we have to go back to the 1741 * top of the tree. 1742 */ 1743 if (pm_devi_lock_held(pdip) || pm_devi_lock_held(dip)) 1744 ret = pm_busop_set_power(dip, NULL, BUS_POWER_CHILD_PWRCHG, 1745 (void *)&bpc, (void *)&unused); 1746 else 1747 ret = pm_busop_bus_power(ddi_root_node(), NULL, 1748 BUS_POWER_CHILD_PWRCHG, (void *)&bpc, (void *)&unused); 1749 #ifdef DEBUG 1750 if (ret != DDI_SUCCESS || *retp != DDI_SUCCESS) { 1751 PMD(PMD_ERROR, ("%s: %s@%s(%s#%d) can't change power, ret=%d, " 1752 "errno=%d\n", pmf, PM_DEVICE(dip), ret, *retp)) 1753 } 1754 if (diverted) { 1755 mutex_enter(&pm_debug_lock); 1756 pm_divertdebug--; 1757 mutex_exit(&pm_debug_lock); 1758 } 1759 #endif 1760 kmem_free(pathbuf, MAXPATHLEN); 1761 return (ret); 1762 } 1763 1764 /* 1765 * If holddip is set, then if a dip is found we return with the node held. 1766 * 1767 * This code uses the same locking scheme as e_ddi_hold_devi_by_path 1768 * (resolve_pathname), but it does not drive attach. 1769 */ 1770 dev_info_t * 1771 pm_name_to_dip(char *pathname, int holddip) 1772 { 1773 struct pathname pn; 1774 char *component; 1775 dev_info_t *parent, *child; 1776 1777 if ((pathname == NULL) || (*pathname != '/')) 1778 return (NULL); 1779 1780 /* setup pathname and allocate component */ 1781 if (pn_get(pathname, UIO_SYSSPACE, &pn)) 1782 return (NULL); 1783 component = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1784 1785 /* start at top, process '/' component */ 1786 parent = child = ddi_root_node(); 1787 ndi_hold_devi(parent); 1788 pn_skipslash(&pn); 1789 ASSERT(i_ddi_devi_attached(parent)); 1790 1791 /* process components of pathname */ 1792 while (pn_pathleft(&pn)) { 1793 (void) pn_getcomponent(&pn, component); 1794 1795 /* enter parent and search for component child */ 1796 ndi_devi_enter(parent); 1797 child = ndi_devi_findchild(parent, component); 1798 if ((child == NULL) || !i_ddi_devi_attached(child)) { 1799 child = NULL; 1800 ndi_devi_exit(parent); 1801 ndi_rele_devi(parent); 1802 goto out; 1803 } 1804 1805 /* attached child found, hold child and release parent */ 1806 ndi_hold_devi(child); 1807 ndi_devi_exit(parent); 1808 ndi_rele_devi(parent); 1809 1810 /* child becomes parent, and process next component */ 1811 parent = child; 1812 pn_skipslash(&pn); 1813 1814 /* loop with active ndi_devi_hold of child->parent */ 1815 } 1816 1817 out: 1818 pn_free(&pn); 1819 kmem_free(component, MAXNAMELEN); 1820 1821 /* if we are not asked to return with hold, drop current hold */ 1822 if (child && !holddip) 1823 ndi_rele_devi(child); 1824 return (child); 1825 } 1826 1827 /* 1828 * Search for a dependency and mark it unsatisfied 1829 */ 1830 static void 1831 pm_unsatisfy(char *keeper, char *kept) 1832 { 1833 PMD_FUNC(pmf, "unsatisfy") 1834 pm_pdr_t *dp; 1835 1836 PMD(PMD_KEEPS, ("%s: keeper=%s, kept=%s\n", pmf, keeper, kept)) 1837 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 1838 if (!dp->pdr_isprop) { 1839 if (strcmp(dp->pdr_keeper, keeper) == 0 && 1840 (dp->pdr_kept_count > 0) && 1841 strcmp(dp->pdr_kept_paths[0], kept) == 0) { 1842 if (dp->pdr_satisfied) { 1843 dp->pdr_satisfied = 0; 1844 pm_unresolved_deps++; 1845 PMD(PMD_KEEPS, ("%s: clear satisfied, " 1846 "pm_unresolved_deps now %d\n", pmf, 1847 pm_unresolved_deps)) 1848 } 1849 } 1850 } 1851 } 1852 } 1853 1854 /* 1855 * Device dip is being un power managed, it keeps up count other devices. 1856 * We need to release any hold we have on the kept devices, and also 1857 * mark the dependency no longer satisfied. 1858 */ 1859 static void 1860 pm_unkeeps(int count, char *keeper, char **keptpaths, int pwr) 1861 { 1862 PMD_FUNC(pmf, "unkeeps") 1863 int i, j; 1864 dev_info_t *kept; 1865 dev_info_t *dip; 1866 struct pm_component *cp; 1867 int keeper_on = 0; 1868 1869 PMD(PMD_KEEPS, ("%s: count=%d, keeper=%s, keptpaths=%p\n", pmf, count, 1870 keeper, (void *)keptpaths)) 1871 /* 1872 * Try to grab keeper. Keeper may have gone away by now, 1873 * in this case, used the passed in value pwr 1874 */ 1875 dip = pm_name_to_dip(keeper, 1); 1876 for (i = 0; i < count; i++) { 1877 /* Release power hold */ 1878 kept = pm_name_to_dip(keptpaths[i], 1); 1879 if (kept) { 1880 PMD(PMD_KEEPS, ("%s: %s@%s(%s#%d)[%d]\n", pmf, 1881 PM_DEVICE(kept), i)) 1882 /* 1883 * We need to check if we skipped a bringup here 1884 * because we could have failed the bringup 1885 * (ie DIRECT PM device) and have 1886 * not increment the count. 1887 */ 1888 if ((dip != NULL) && (PM_GET_PM_INFO(dip) != NULL)) { 1889 keeper_on = 0; 1890 PM_LOCK_POWER(dip); 1891 for (j = 0; j < PM_NUMCMPTS(dip); j++) { 1892 cp = &DEVI(dip)->devi_pm_components[j]; 1893 if (cur_power(cp)) { 1894 keeper_on++; 1895 break; 1896 } 1897 } 1898 if (keeper_on && (PM_SKBU(kept) == 0)) { 1899 pm_rele_power(kept); 1900 DEVI(kept)->devi_pm_flags 1901 &= ~PMC_SKIP_BRINGUP; 1902 } 1903 PM_UNLOCK_POWER(dip); 1904 } else if (pwr) { 1905 if (PM_SKBU(kept) == 0) { 1906 pm_rele_power(kept); 1907 DEVI(kept)->devi_pm_flags 1908 &= ~PMC_SKIP_BRINGUP; 1909 } 1910 } 1911 ddi_release_devi(kept); 1912 } 1913 /* 1914 * mark this dependency not satisfied 1915 */ 1916 pm_unsatisfy(keeper, keptpaths[i]); 1917 } 1918 if (dip) 1919 ddi_release_devi(dip); 1920 } 1921 1922 /* 1923 * Device kept is being un power managed, it is kept up by keeper. 1924 * We need to mark the dependency no longer satisfied. 1925 */ 1926 static void 1927 pm_unkepts(char *kept, char *keeper) 1928 { 1929 PMD_FUNC(pmf, "unkepts") 1930 PMD(PMD_KEEPS, ("%s: kept=%s, keeper=%s\n", pmf, kept, keeper)) 1931 ASSERT(keeper != NULL); 1932 /* 1933 * mark this dependency not satisfied 1934 */ 1935 pm_unsatisfy(keeper, kept); 1936 } 1937 1938 /* 1939 * Removes dependency information and hold on the kepts, if the path is a 1940 * path of a keeper. 1941 */ 1942 static void 1943 pm_free_keeper(char *path, int pwr) 1944 { 1945 pm_pdr_t *dp; 1946 int i; 1947 size_t length; 1948 1949 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 1950 if (strcmp(dp->pdr_keeper, path) != 0) 1951 continue; 1952 /* 1953 * Remove all our kept holds and the dependency records, 1954 * then free up the kept lists. 1955 */ 1956 pm_unkeeps(dp->pdr_kept_count, path, dp->pdr_kept_paths, pwr); 1957 if (dp->pdr_kept_count) { 1958 for (i = 0; i < dp->pdr_kept_count; i++) { 1959 length = strlen(dp->pdr_kept_paths[i]); 1960 kmem_free(dp->pdr_kept_paths[i], length + 1); 1961 } 1962 kmem_free(dp->pdr_kept_paths, 1963 dp->pdr_kept_count * sizeof (char **)); 1964 dp->pdr_kept_paths = NULL; 1965 dp->pdr_kept_count = 0; 1966 } 1967 } 1968 } 1969 1970 /* 1971 * Removes the device represented by path from the list of kepts, if the 1972 * path is a path of a kept 1973 */ 1974 static void 1975 pm_free_kept(char *path) 1976 { 1977 pm_pdr_t *dp; 1978 int i; 1979 int j, count; 1980 size_t length; 1981 char **paths; 1982 1983 paths = NULL; 1984 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 1985 if (dp->pdr_kept_count == 0) 1986 continue; 1987 count = dp->pdr_kept_count; 1988 /* Remove this device from the kept path lists */ 1989 for (i = 0; i < count; i++) { 1990 if (strcmp(dp->pdr_kept_paths[i], path) == 0) { 1991 pm_unkepts(path, dp->pdr_keeper); 1992 length = strlen(dp->pdr_kept_paths[i]) + 1; 1993 kmem_free(dp->pdr_kept_paths[i], length); 1994 dp->pdr_kept_paths[i] = NULL; 1995 dp->pdr_kept_count--; 1996 } 1997 } 1998 /* Compact the kept paths array */ 1999 if (dp->pdr_kept_count) { 2000 length = dp->pdr_kept_count * sizeof (char **); 2001 paths = kmem_zalloc(length, KM_SLEEP); 2002 j = 0; 2003 for (i = 0; i < count; i++) { 2004 if (dp->pdr_kept_paths[i] != NULL) { 2005 paths[j] = dp->pdr_kept_paths[i]; 2006 j++; 2007 } 2008 } 2009 ASSERT(j == dp->pdr_kept_count); 2010 } 2011 /* Now free the old array and point to the new one */ 2012 kmem_free(dp->pdr_kept_paths, count * sizeof (char **)); 2013 dp->pdr_kept_paths = paths; 2014 } 2015 } 2016 2017 /* 2018 * Free the dependency information for a device. 2019 */ 2020 void 2021 pm_free_keeps(char *path, int pwr) 2022 { 2023 PMD_FUNC(pmf, "free_keeps") 2024 2025 #ifdef DEBUG 2026 int doprdeps = 0; 2027 void prdeps(char *); 2028 2029 PMD(PMD_KEEPS, ("%s: %s\n", pmf, path)) 2030 if (pm_debug & PMD_KEEPS) { 2031 doprdeps = 1; 2032 prdeps("pm_free_keeps before"); 2033 } 2034 #endif 2035 /* 2036 * First assume we are a keeper and remove all our kepts. 2037 */ 2038 pm_free_keeper(path, pwr); 2039 /* 2040 * Now assume we a kept device, and remove all our records. 2041 */ 2042 pm_free_kept(path); 2043 #ifdef DEBUG 2044 if (doprdeps) { 2045 prdeps("pm_free_keeps after"); 2046 } 2047 #endif 2048 } 2049 2050 static int 2051 pm_is_kept(char *path) 2052 { 2053 pm_pdr_t *dp; 2054 int i; 2055 2056 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 2057 if (dp->pdr_kept_count == 0) 2058 continue; 2059 for (i = 0; i < dp->pdr_kept_count; i++) { 2060 if (strcmp(dp->pdr_kept_paths[i], path) == 0) 2061 return (1); 2062 } 2063 } 2064 return (0); 2065 } 2066 2067 static void 2068 e_pm_hold_rele_power(dev_info_t *dip, int cnt) 2069 { 2070 PMD_FUNC(pmf, "hold_rele_power") 2071 2072 if ((dip == NULL) || 2073 (PM_GET_PM_INFO(dip) == NULL) || PM_ISBC(dip)) 2074 return; 2075 2076 PM_LOCK_POWER(dip); 2077 ASSERT(cnt >= 0 || (cnt < 0 && PM_KUC(dip) > 0)); 2078 PMD(PMD_KIDSUP, ("%s: kidsupcnt for %s@%s(%s#%d) %d->%d\n", pmf, 2079 PM_DEVICE(dip), PM_KUC(dip), (PM_KUC(dip) + cnt))) 2080 2081 PM_KUC(dip) += cnt; 2082 2083 PM_UNLOCK_POWER(dip); 2084 2085 if (cnt < 0 && PM_KUC(dip) == 0) 2086 pm_rescan(dip); 2087 } 2088 2089 #define MAX_PPM_HANDLERS 4 2090 2091 kmutex_t ppm_lock; /* in case we ever do multi-threaded startup */ 2092 2093 struct ppm_callbacks { 2094 int (*ppmc_func)(dev_info_t *); 2095 dev_info_t *ppmc_dip; 2096 } ppm_callbacks[MAX_PPM_HANDLERS + 1]; 2097 2098 2099 /* 2100 * This routine calls into all the registered ppms to notify them 2101 * that either all components of power-managed devices are at their 2102 * lowest levels or no longer all are at their lowest levels. 2103 */ 2104 static void 2105 pm_ppm_notify_all_lowest(dev_info_t *dip, int mode) 2106 { 2107 struct ppm_callbacks *ppmcp; 2108 power_req_t power_req; 2109 int result = 0; 2110 2111 power_req.request_type = PMR_PPM_ALL_LOWEST; 2112 power_req.req.ppm_all_lowest_req.mode = mode; 2113 mutex_enter(&ppm_lock); 2114 for (ppmcp = ppm_callbacks; ppmcp->ppmc_func; ppmcp++) 2115 (void) pm_ctlops((dev_info_t *)ppmcp->ppmc_dip, dip, 2116 DDI_CTLOPS_POWER, &power_req, &result); 2117 mutex_exit(&ppm_lock); 2118 if (mode == PM_ALL_LOWEST) { 2119 if (autoS3_enabled) { 2120 PMD(PMD_SX, ("pm_ppm_notify_all_lowest triggering " 2121 "autos3\n")) 2122 mutex_enter(&srn_clone_lock); 2123 if (srn_signal) { 2124 srn_inuse++; 2125 PMD(PMD_SX, ("(*srn_signal)(AUTOSX, 3)\n")) 2126 (*srn_signal)(SRN_TYPE_AUTOSX, 3); 2127 srn_inuse--; 2128 } else { 2129 PMD(PMD_SX, ("srn_signal NULL\n")) 2130 } 2131 mutex_exit(&srn_clone_lock); 2132 } else { 2133 PMD(PMD_SX, ("pm_ppm_notify_all_lowest autos3 " 2134 "disabled\n")); 2135 } 2136 } 2137 } 2138 2139 static void 2140 pm_set_pm_info(dev_info_t *dip, void *value) 2141 { 2142 DEVI(dip)->devi_pm_info = value; 2143 } 2144 2145 pm_rsvp_t *pm_blocked_list; 2146 2147 /* 2148 * Look up an entry in the blocked list by dip and component 2149 */ 2150 static pm_rsvp_t * 2151 pm_rsvp_lookup(dev_info_t *dip, int comp) 2152 { 2153 pm_rsvp_t *p; 2154 ASSERT(MUTEX_HELD(&pm_rsvp_lock)); 2155 for (p = pm_blocked_list; p; p = p->pr_next) 2156 if (p->pr_dip == dip && p->pr_comp == comp) { 2157 return (p); 2158 } 2159 return (NULL); 2160 } 2161 2162 /* 2163 * Called when a device which is direct power managed (or the parent or 2164 * dependent of such a device) changes power, or when a pm clone is closed 2165 * that was direct power managing a device. This call results in pm_blocked() 2166 * (below) returning. 2167 */ 2168 void 2169 pm_proceed(dev_info_t *dip, int cmd, int comp, int newlevel) 2170 { 2171 PMD_FUNC(pmf, "proceed") 2172 pm_rsvp_t *found = NULL; 2173 pm_rsvp_t *p; 2174 2175 mutex_enter(&pm_rsvp_lock); 2176 switch (cmd) { 2177 /* 2178 * we're giving up control, let any pending op continue 2179 */ 2180 case PMP_RELEASE: 2181 for (p = pm_blocked_list; p; p = p->pr_next) { 2182 if (dip == p->pr_dip) { 2183 p->pr_retval = PMP_RELEASE; 2184 PMD(PMD_DPM, ("%s: RELEASE %s@%s(%s#%d)\n", 2185 pmf, PM_DEVICE(dip))) 2186 cv_signal(&p->pr_cv); 2187 } 2188 } 2189 break; 2190 2191 /* 2192 * process has done PM_SET_CURRENT_POWER; let a matching request 2193 * succeed and a non-matching request for the same device fail 2194 */ 2195 case PMP_SETPOWER: 2196 found = pm_rsvp_lookup(dip, comp); 2197 if (!found) /* if driver not waiting */ 2198 break; 2199 /* 2200 * This cannot be pm_lower_power, since that can only happen 2201 * during detach or probe 2202 */ 2203 if (found->pr_newlevel <= newlevel) { 2204 found->pr_retval = PMP_SUCCEED; 2205 PMD(PMD_DPM, ("%s: SUCCEED %s@%s(%s#%d)\n", pmf, 2206 PM_DEVICE(dip))) 2207 } else { 2208 found->pr_retval = PMP_FAIL; 2209 PMD(PMD_DPM, ("%s: FAIL %s@%s(%s#%d)\n", pmf, 2210 PM_DEVICE(dip))) 2211 } 2212 cv_signal(&found->pr_cv); 2213 break; 2214 2215 default: 2216 panic("pm_proceed unknown cmd %d", cmd); 2217 } 2218 mutex_exit(&pm_rsvp_lock); 2219 } 2220 2221 /* 2222 * This routine dispatches new work to the dependency thread. Caller must 2223 * be prepared to block for memory if necessary. 2224 */ 2225 void 2226 pm_dispatch_to_dep_thread(int cmd, char *keeper, char *kept, int wait, 2227 int *res, int cached_pwr) 2228 { 2229 pm_dep_wk_t *new_work; 2230 2231 new_work = kmem_zalloc(sizeof (pm_dep_wk_t), KM_SLEEP); 2232 new_work->pdw_type = cmd; 2233 new_work->pdw_wait = wait; 2234 new_work->pdw_done = 0; 2235 new_work->pdw_ret = 0; 2236 new_work->pdw_pwr = cached_pwr; 2237 cv_init(&new_work->pdw_cv, NULL, CV_DEFAULT, NULL); 2238 if (keeper != NULL) { 2239 new_work->pdw_keeper = kmem_zalloc(strlen(keeper) + 1, 2240 KM_SLEEP); 2241 (void) strcpy(new_work->pdw_keeper, keeper); 2242 } 2243 if (kept != NULL) { 2244 new_work->pdw_kept = kmem_zalloc(strlen(kept) + 1, KM_SLEEP); 2245 (void) strcpy(new_work->pdw_kept, kept); 2246 } 2247 mutex_enter(&pm_dep_thread_lock); 2248 if (pm_dep_thread_workq == NULL) { 2249 pm_dep_thread_workq = new_work; 2250 pm_dep_thread_tail = new_work; 2251 new_work->pdw_next = NULL; 2252 } else { 2253 pm_dep_thread_tail->pdw_next = new_work; 2254 pm_dep_thread_tail = new_work; 2255 new_work->pdw_next = NULL; 2256 } 2257 cv_signal(&pm_dep_thread_cv); 2258 /* If caller asked for it, wait till it is done. */ 2259 if (wait) { 2260 while (!new_work->pdw_done) 2261 cv_wait(&new_work->pdw_cv, &pm_dep_thread_lock); 2262 /* 2263 * Pass return status, if any, back. 2264 */ 2265 if (res != NULL) 2266 *res = new_work->pdw_ret; 2267 /* 2268 * If we asked to wait, it is our job to free the request 2269 * structure. 2270 */ 2271 if (new_work->pdw_keeper) 2272 kmem_free(new_work->pdw_keeper, 2273 strlen(new_work->pdw_keeper) + 1); 2274 if (new_work->pdw_kept) 2275 kmem_free(new_work->pdw_kept, 2276 strlen(new_work->pdw_kept) + 1); 2277 kmem_free(new_work, sizeof (pm_dep_wk_t)); 2278 } 2279 mutex_exit(&pm_dep_thread_lock); 2280 } 2281 2282 /* 2283 * Release the pm resource for this device. 2284 */ 2285 void 2286 pm_rem_info(dev_info_t *dip) 2287 { 2288 PMD_FUNC(pmf, "rem_info") 2289 int i, count = 0; 2290 pm_info_t *info = PM_GET_PM_INFO(dip); 2291 dev_info_t *pdip = ddi_get_parent(dip); 2292 char *pathbuf; 2293 int work_type = PM_DEP_WK_DETACH; 2294 2295 ASSERT(info); 2296 2297 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 2298 if (PM_ISDIRECT(dip)) { 2299 info->pmi_dev_pm_state &= ~PM_DIRECT; 2300 ASSERT(info->pmi_clone); 2301 info->pmi_clone = 0; 2302 pm_proceed(dip, PMP_RELEASE, -1, -1); 2303 } 2304 ASSERT(!PM_GET_PM_SCAN(dip)); 2305 2306 /* 2307 * Now adjust parent's kidsupcnt. BC nodes we check only comp 0, 2308 * Others we check all components. BC node that has already 2309 * called pm_destroy_components() has zero component count. 2310 * Parents that get notification are not adjusted because their 2311 * kidsupcnt is always 0 (or 1 during configuration). 2312 */ 2313 PMD(PMD_KEEPS, ("%s: %s@%s(%s#%d) has %d components\n", pmf, 2314 PM_DEVICE(dip), PM_NUMCMPTS(dip))) 2315 2316 /* node is detached, so we can examine power without locking */ 2317 if (PM_ISBC(dip)) { 2318 count = (PM_CURPOWER(dip, 0) != 0); 2319 } else { 2320 for (i = 0; i < PM_NUMCMPTS(dip); i++) 2321 count += (PM_CURPOWER(dip, i) != 0); 2322 } 2323 2324 if (PM_NUMCMPTS(dip) && pdip && !PM_WANTS_NOTIFICATION(pdip)) 2325 e_pm_hold_rele_power(pdip, -count); 2326 2327 /* Schedule a request to clean up dependency records */ 2328 pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 2329 (void) ddi_pathname(dip, pathbuf); 2330 pm_dispatch_to_dep_thread(work_type, pathbuf, pathbuf, 2331 PM_DEP_NOWAIT, NULL, (count > 0)); 2332 kmem_free(pathbuf, MAXPATHLEN); 2333 2334 /* 2335 * Adjust the pm_comps_notlowest count since this device is 2336 * not being power-managed anymore. 2337 */ 2338 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 2339 pm_component_t *cp = PM_CP(dip, i); 2340 if (cp->pmc_cur_pwr != 0) 2341 PM_DECR_NOTLOWEST(dip) 2342 } 2343 /* 2344 * Once we clear the info pointer, it looks like it is not power 2345 * managed to everybody else. 2346 */ 2347 pm_set_pm_info(dip, NULL); 2348 kmem_free(info, sizeof (pm_info_t)); 2349 } 2350 2351 int 2352 pm_get_norm_pwrs(dev_info_t *dip, int **valuep, size_t *length) 2353 { 2354 int components = PM_NUMCMPTS(dip); 2355 int *bufp; 2356 size_t size; 2357 int i; 2358 2359 if (components <= 0) { 2360 cmn_err(CE_NOTE, "!pm: %s@%s(%s#%d) has no components, " 2361 "can't get normal power values\n", PM_DEVICE(dip)); 2362 return (DDI_FAILURE); 2363 } else { 2364 size = components * sizeof (int); 2365 bufp = kmem_alloc(size, KM_SLEEP); 2366 for (i = 0; i < components; i++) { 2367 bufp[i] = pm_get_normal_power(dip, i); 2368 } 2369 } 2370 *length = size; 2371 *valuep = bufp; 2372 return (DDI_SUCCESS); 2373 } 2374 2375 static int 2376 pm_reset_timestamps(dev_info_t *dip, void *arg) 2377 { 2378 _NOTE(ARGUNUSED(arg)) 2379 2380 int components; 2381 int i; 2382 2383 if (!PM_GET_PM_INFO(dip)) 2384 return (DDI_WALK_CONTINUE); 2385 components = PM_NUMCMPTS(dip); 2386 ASSERT(components > 0); 2387 PM_LOCK_BUSY(dip); 2388 for (i = 0; i < components; i++) { 2389 struct pm_component *cp; 2390 /* 2391 * If the component was not marked as busy, 2392 * reset its timestamp to now. 2393 */ 2394 cp = PM_CP(dip, i); 2395 if (cp->pmc_timestamp) 2396 cp->pmc_timestamp = gethrestime_sec(); 2397 } 2398 PM_UNLOCK_BUSY(dip); 2399 return (DDI_WALK_CONTINUE); 2400 } 2401 2402 /* 2403 * Convert a power level to an index into the levels array (or 2404 * just PM_LEVEL_UNKNOWN in that special case). 2405 */ 2406 static int 2407 pm_level_to_index(dev_info_t *dip, pm_component_t *cp, int level) 2408 { 2409 PMD_FUNC(pmf, "level_to_index") 2410 int i; 2411 int limit = cp->pmc_comp.pmc_numlevels; 2412 int *ip = cp->pmc_comp.pmc_lvals; 2413 2414 if (level == PM_LEVEL_UNKNOWN) 2415 return (level); 2416 2417 for (i = 0; i < limit; i++) { 2418 if (level == *ip++) { 2419 PMD(PMD_LEVEL, ("%s: %s@%s(%s#%d)[%d] to %x\n", 2420 pmf, PM_DEVICE(dip), 2421 (int)(cp - DEVI(dip)->devi_pm_components), level)) 2422 return (i); 2423 } 2424 } 2425 panic("pm_level_to_index: level %d not found for device " 2426 "%s@%s(%s#%d)", level, PM_DEVICE(dip)); 2427 /*NOTREACHED*/ 2428 } 2429 2430 /* 2431 * Internal function to set current power level 2432 */ 2433 static void 2434 e_pm_set_cur_pwr(dev_info_t *dip, pm_component_t *cp, int level) 2435 { 2436 PMD_FUNC(pmf, "set_cur_pwr") 2437 int curpwr = (cp->pmc_flags & PM_PHC_WHILE_SET_POWER ? 2438 cp->pmc_phc_pwr : cp->pmc_cur_pwr); 2439 2440 /* 2441 * Nothing to adjust if current & new levels are the same. 2442 */ 2443 if (curpwr != PM_LEVEL_UNKNOWN && 2444 level == cp->pmc_comp.pmc_lvals[curpwr]) 2445 return; 2446 2447 /* 2448 * Keep the count for comps doing transition to/from lowest 2449 * level. 2450 */ 2451 if (curpwr == 0) { 2452 PM_INCR_NOTLOWEST(dip); 2453 } else if (level == cp->pmc_comp.pmc_lvals[0]) { 2454 PM_DECR_NOTLOWEST(dip); 2455 } 2456 cp->pmc_phc_pwr = PM_LEVEL_UNKNOWN; 2457 cp->pmc_cur_pwr = pm_level_to_index(dip, cp, level); 2458 } 2459 2460 static int pm_phc_impl(dev_info_t *, int, int, int); 2461 2462 /* 2463 * This is the default method of setting the power of a device if no ppm 2464 * driver has claimed it. 2465 */ 2466 int 2467 pm_power(dev_info_t *dip, int comp, int level) 2468 { 2469 PMD_FUNC(pmf, "power") 2470 struct dev_ops *ops; 2471 int (*fn)(dev_info_t *, int, int); 2472 struct pm_component *cp = PM_CP(dip, comp); 2473 int retval; 2474 pm_info_t *info = PM_GET_PM_INFO(dip); 2475 2476 PMD(PMD_KIDSUP, ("%s: %s@%s(%s#%d), comp=%d, level=%d\n", pmf, 2477 PM_DEVICE(dip), comp, level)) 2478 if (!(ops = ddi_get_driver(dip))) { 2479 PMD(PMD_FAIL, ("%s: %s@%s(%s#%d) has no ops\n", pmf, 2480 PM_DEVICE(dip))) 2481 return (DDI_FAILURE); 2482 } 2483 if ((ops->devo_rev < 2) || !(fn = ops->devo_power)) { 2484 PMD(PMD_FAIL, ("%s: %s%s\n", pmf, 2485 (ops->devo_rev < 2 ? " wrong devo_rev" : ""), 2486 (!fn ? " devo_power NULL" : ""))) 2487 return (DDI_FAILURE); 2488 } 2489 cp->pmc_flags |= PM_POWER_OP; 2490 retval = (*fn)(dip, comp, level); 2491 cp->pmc_flags &= ~PM_POWER_OP; 2492 if (retval == DDI_SUCCESS) { 2493 e_pm_set_cur_pwr(dip, PM_CP(dip, comp), level); 2494 return (DDI_SUCCESS); 2495 } 2496 2497 /* 2498 * If pm_power_has_changed() detected a deadlock with pm_power() it 2499 * updated only the power level of the component. If our attempt to 2500 * set the device new to a power level above has failed we sync the 2501 * total power state via phc code now. 2502 */ 2503 if (cp->pmc_flags & PM_PHC_WHILE_SET_POWER) { 2504 int phc_lvl = 2505 cp->pmc_comp.pmc_lvals[cp->pmc_cur_pwr]; 2506 2507 ASSERT(info); 2508 (void) pm_phc_impl(dip, comp, phc_lvl, 0); 2509 PMD(PMD_PHC, ("%s: phc %s@%s(%s#%d) comp=%d level=%d\n", 2510 pmf, PM_DEVICE(dip), comp, phc_lvl)) 2511 } 2512 2513 PMD(PMD_FAIL, ("%s: can't set comp=%d (%s) of %s@%s(%s#%d) to " 2514 "level=%d (%s)\n", pmf, comp, cp->pmc_comp.pmc_name, PM_DEVICE(dip), 2515 level, power_val_to_string(cp, level))); 2516 return (DDI_FAILURE); 2517 } 2518 2519 int 2520 pm_unmanage(dev_info_t *dip) 2521 { 2522 PMD_FUNC(pmf, "unmanage") 2523 power_req_t power_req; 2524 int result, retval = 0; 2525 2526 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 2527 PMD(PMD_REMDEV | PMD_KIDSUP, ("%s: %s@%s(%s#%d)\n", pmf, 2528 PM_DEVICE(dip))) 2529 power_req.request_type = PMR_PPM_UNMANAGE; 2530 power_req.req.ppm_config_req.who = dip; 2531 if (pm_ppm_claimed(dip)) 2532 retval = pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, 2533 &power_req, &result); 2534 #ifdef DEBUG 2535 else 2536 retval = pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, 2537 &power_req, &result); 2538 #endif 2539 ASSERT(retval == DDI_SUCCESS); 2540 pm_rem_info(dip); 2541 return (retval); 2542 } 2543 2544 int 2545 pm_raise_power(dev_info_t *dip, int comp, int level) 2546 { 2547 if (level < 0) 2548 return (DDI_FAILURE); 2549 if (!e_pm_valid_info(dip, NULL) || !e_pm_valid_comp(dip, comp, NULL) || 2550 !e_pm_valid_power(dip, comp, level)) 2551 return (DDI_FAILURE); 2552 2553 return (dev_is_needed(dip, comp, level, PM_LEVEL_UPONLY)); 2554 } 2555 2556 int 2557 pm_lower_power(dev_info_t *dip, int comp, int level) 2558 { 2559 PMD_FUNC(pmf, "pm_lower_power") 2560 2561 if (!e_pm_valid_info(dip, NULL) || !e_pm_valid_comp(dip, comp, NULL) || 2562 !e_pm_valid_power(dip, comp, level)) { 2563 PMD(PMD_FAIL, ("%s: validation checks failed for %s@%s(%s#%d) " 2564 "comp=%d level=%d\n", pmf, PM_DEVICE(dip), comp, level)) 2565 return (DDI_FAILURE); 2566 } 2567 2568 if (!DEVI_IS_DETACHING(dip)) { 2569 PMD(PMD_FAIL, ("%s: %s@%s(%s#%d) not detaching\n", 2570 pmf, PM_DEVICE(dip))) 2571 return (DDI_FAILURE); 2572 } 2573 2574 /* 2575 * If we don't care about saving power, or we're treating this node 2576 * specially, then this is a no-op 2577 */ 2578 if (!PM_SCANABLE(dip) || pm_noinvol(dip)) { 2579 PMD(PMD_FAIL, ("%s: %s@%s(%s#%d) %s%s%s%s\n", 2580 pmf, PM_DEVICE(dip), 2581 !autopm_enabled ? "!autopm_enabled " : "", 2582 !PM_POLLING_CPUPM ? "!cpupm_polling " : "", 2583 PM_CPUPM_DISABLED ? "cpupm_disabled " : "", 2584 pm_noinvol(dip) ? "pm_noinvol()" : "")) 2585 return (DDI_SUCCESS); 2586 } 2587 2588 if (dev_is_needed(dip, comp, level, PM_LEVEL_DOWNONLY) != DDI_SUCCESS) { 2589 PMD(PMD_FAIL, ("%s: %s@%s(%s#%d) dev_is_needed failed\n", pmf, 2590 PM_DEVICE(dip))) 2591 return (DDI_FAILURE); 2592 } 2593 return (DDI_SUCCESS); 2594 } 2595 2596 /* 2597 * Find the entries struct for a given dip in the blocked list, return it locked 2598 */ 2599 static psce_t * 2600 pm_psc_dip_to_direct(dev_info_t *dip, pscc_t **psccp) 2601 { 2602 pscc_t *p; 2603 psce_t *psce; 2604 2605 rw_enter(&pm_pscc_direct_rwlock, RW_READER); 2606 for (p = pm_pscc_direct; p; p = p->pscc_next) { 2607 if (p->pscc_dip == dip) { 2608 *psccp = p; 2609 psce = p->pscc_entries; 2610 mutex_enter(&psce->psce_lock); 2611 ASSERT(psce); 2612 rw_exit(&pm_pscc_direct_rwlock); 2613 return (psce); 2614 } 2615 } 2616 rw_exit(&pm_pscc_direct_rwlock); 2617 panic("sunpm: no entry for dip %p in direct list", (void *)dip); 2618 /*NOTREACHED*/ 2619 } 2620 2621 /* 2622 * Write an entry indicating a power level change (to be passed to a process 2623 * later) in the given psce. 2624 * If we were called in the path that brings up the console fb in the 2625 * case of entering the prom, we don't want to sleep. If the alloc fails, then 2626 * we create a record that has a size of -1, a physaddr of NULL, and that 2627 * has the overflow flag set. 2628 */ 2629 static int 2630 psc_entry(ushort_t event, psce_t *psce, dev_info_t *dip, int comp, int new, 2631 int old, int which, pm_canblock_t canblock) 2632 { 2633 char buf[MAXNAMELEN]; 2634 pm_state_change_t *p; 2635 size_t size; 2636 caddr_t physpath = NULL; 2637 int overrun = 0; 2638 2639 ASSERT(MUTEX_HELD(&psce->psce_lock)); 2640 (void) ddi_pathname(dip, buf); 2641 size = strlen(buf) + 1; 2642 p = psce->psce_in; 2643 if (canblock == PM_CANBLOCK_BYPASS) { 2644 physpath = kmem_alloc(size, KM_NOSLEEP); 2645 if (physpath == NULL) { 2646 /* 2647 * mark current entry as overrun 2648 */ 2649 p->flags |= PSC_EVENT_LOST; 2650 size = (size_t)-1; 2651 } 2652 } else 2653 physpath = kmem_alloc(size, KM_SLEEP); 2654 if (p->size) { /* overflow; mark the next entry */ 2655 if (p->size != (size_t)-1) 2656 kmem_free(p->physpath, p->size); 2657 ASSERT(psce->psce_out == p); 2658 if (p == psce->psce_last) { 2659 psce->psce_first->flags |= PSC_EVENT_LOST; 2660 psce->psce_out = psce->psce_first; 2661 } else { 2662 (p + 1)->flags |= PSC_EVENT_LOST; 2663 psce->psce_out = (p + 1); 2664 } 2665 overrun++; 2666 } else if (physpath == NULL) { /* alloc failed, mark this entry */ 2667 p->flags |= PSC_EVENT_LOST; 2668 p->size = 0; 2669 p->physpath = NULL; 2670 } 2671 if (which == PSC_INTEREST) { 2672 mutex_enter(&pm_compcnt_lock); 2673 if (pm_comps_notlowest == 0) 2674 p->flags |= PSC_ALL_LOWEST; 2675 else 2676 p->flags &= ~PSC_ALL_LOWEST; 2677 mutex_exit(&pm_compcnt_lock); 2678 } 2679 p->event = event; 2680 p->timestamp = gethrestime_sec(); 2681 p->component = comp; 2682 p->old_level = old; 2683 p->new_level = new; 2684 p->physpath = physpath; 2685 p->size = size; 2686 if (physpath != NULL) 2687 (void) strcpy(p->physpath, buf); 2688 if (p == psce->psce_last) 2689 psce->psce_in = psce->psce_first; 2690 else 2691 psce->psce_in = ++p; 2692 mutex_exit(&psce->psce_lock); 2693 return (overrun); 2694 } 2695 2696 /* 2697 * Find the next entry on the interest list. We keep a pointer to the item we 2698 * last returned in the user's cooke. Returns a locked entries struct. 2699 */ 2700 static psce_t * 2701 psc_interest(void **cookie, pscc_t **psccp) 2702 { 2703 pscc_t *pscc; 2704 pscc_t **cookiep = (pscc_t **)cookie; 2705 2706 if (*cookiep == NULL) 2707 pscc = pm_pscc_interest; 2708 else 2709 pscc = (*cookiep)->pscc_next; 2710 if (pscc) { 2711 *cookiep = pscc; 2712 *psccp = pscc; 2713 mutex_enter(&pscc->pscc_entries->psce_lock); 2714 return (pscc->pscc_entries); 2715 } else { 2716 return (NULL); 2717 } 2718 } 2719 2720 /* 2721 * Create an entry for a process to pick up indicating a power level change. 2722 */ 2723 static void 2724 pm_enqueue_notify(ushort_t cmd, dev_info_t *dip, int comp, 2725 int newlevel, int oldlevel, pm_canblock_t canblock) 2726 { 2727 PMD_FUNC(pmf, "enqueue_notify") 2728 pscc_t *pscc; 2729 psce_t *psce; 2730 void *cookie = NULL; 2731 int overrun; 2732 2733 ASSERT(MUTEX_HELD(&pm_rsvp_lock)); 2734 switch (cmd) { 2735 case PSC_PENDING_CHANGE: /* only for controlling process */ 2736 PMD(PMD_DPM, ("%s: PENDING %s@%s(%s#%d), comp %d, %d -> %d\n", 2737 pmf, PM_DEVICE(dip), comp, oldlevel, newlevel)) 2738 psce = pm_psc_dip_to_direct(dip, &pscc); 2739 ASSERT(psce); 2740 PMD(PMD_IOCTL, ("%s: PENDING: %s@%s(%s#%d) pm_poll_cnt[%d] " 2741 "%d\n", pmf, PM_DEVICE(dip), pscc->pscc_clone, 2742 pm_poll_cnt[pscc->pscc_clone])) 2743 overrun = psc_entry(cmd, psce, dip, comp, newlevel, oldlevel, 2744 PSC_DIRECT, canblock); 2745 PMD(PMD_DPM, ("%s: sig %d\n", pmf, pscc->pscc_clone)) 2746 mutex_enter(&pm_clone_lock); 2747 if (!overrun) 2748 pm_poll_cnt[pscc->pscc_clone]++; 2749 cv_signal(&pm_clones_cv[pscc->pscc_clone]); 2750 pollwakeup(&pm_pollhead, (POLLRDNORM | POLLIN)); 2751 mutex_exit(&pm_clone_lock); 2752 break; 2753 case PSC_HAS_CHANGED: 2754 PMD(PMD_DPM, ("%s: HAS %s@%s(%s#%d), comp %d, %d -> %d\n", 2755 pmf, PM_DEVICE(dip), comp, oldlevel, newlevel)) 2756 if (PM_ISDIRECT(dip) && canblock != PM_CANBLOCK_BYPASS) { 2757 psce = pm_psc_dip_to_direct(dip, &pscc); 2758 PMD(PMD_IOCTL, ("%s: HAS: %s@%s(%s#%d) pm_poll_cnt[%d] " 2759 "%d\n", pmf, PM_DEVICE(dip), pscc->pscc_clone, 2760 pm_poll_cnt[pscc->pscc_clone])) 2761 overrun = psc_entry(cmd, psce, dip, comp, newlevel, 2762 oldlevel, PSC_DIRECT, canblock); 2763 PMD(PMD_DPM, ("%s: sig %d\n", pmf, pscc->pscc_clone)) 2764 mutex_enter(&pm_clone_lock); 2765 if (!overrun) 2766 pm_poll_cnt[pscc->pscc_clone]++; 2767 cv_signal(&pm_clones_cv[pscc->pscc_clone]); 2768 pollwakeup(&pm_pollhead, (POLLRDNORM | POLLIN)); 2769 mutex_exit(&pm_clone_lock); 2770 } 2771 mutex_enter(&pm_clone_lock); 2772 rw_enter(&pm_pscc_interest_rwlock, RW_READER); 2773 while ((psce = psc_interest(&cookie, &pscc)) != NULL) { 2774 (void) psc_entry(cmd, psce, dip, comp, newlevel, 2775 oldlevel, PSC_INTEREST, canblock); 2776 cv_signal(&pm_clones_cv[pscc->pscc_clone]); 2777 } 2778 rw_exit(&pm_pscc_interest_rwlock); 2779 mutex_exit(&pm_clone_lock); 2780 break; 2781 #ifdef DEBUG 2782 default: 2783 ASSERT(0); 2784 #endif 2785 } 2786 } 2787 2788 static void 2789 pm_enqueue_notify_others(pm_ppm_devlist_t **listp, pm_canblock_t canblock) 2790 { 2791 if (listp) { 2792 pm_ppm_devlist_t *p, *next = NULL; 2793 2794 for (p = *listp; p; p = next) { 2795 next = p->ppd_next; 2796 pm_enqueue_notify(PSC_HAS_CHANGED, p->ppd_who, 2797 p->ppd_cmpt, p->ppd_new_level, p->ppd_old_level, 2798 canblock); 2799 kmem_free(p, sizeof (pm_ppm_devlist_t)); 2800 } 2801 *listp = NULL; 2802 } 2803 } 2804 2805 /* 2806 * Try to get the power locks of the parent node and target (child) 2807 * node. Return true if successful (with both locks held) or false 2808 * (with no locks held). 2809 */ 2810 static int 2811 pm_try_parent_child_locks(dev_info_t *pdip, dev_info_t *dip) 2812 { 2813 if (ndi_devi_tryenter(pdip)) { 2814 if (PM_TRY_LOCK_POWER(dip)) { 2815 return (1); 2816 } 2817 ndi_devi_exit(pdip); 2818 } 2819 return (0); 2820 } 2821 2822 /* 2823 * Determine if the power lock owner is blocked by current thread. 2824 * returns : 2825 * 1 - If the thread owning the effective power lock (the first lock on 2826 * which a thread blocks when it does PM_LOCK_POWER) is blocked by 2827 * a mutex held by the current thread. 2828 * 2829 * 0 - otherwise 2830 * 2831 * Note : This function is called by pm_power_has_changed to determine whether 2832 * it is executing in parallel with pm_set_power. 2833 */ 2834 static int 2835 pm_blocked_by_us(dev_info_t *dip) 2836 { 2837 power_req_t power_req; 2838 kthread_t *owner; 2839 int result; 2840 kmutex_t *mp; 2841 dev_info_t *ppm = (dev_info_t *)DEVI(dip)->devi_pm_ppm; 2842 2843 power_req.request_type = PMR_PPM_POWER_LOCK_OWNER; 2844 power_req.req.ppm_power_lock_owner_req.who = dip; 2845 if (pm_ctlops(ppm, dip, DDI_CTLOPS_POWER, &power_req, &result) != 2846 DDI_SUCCESS) { 2847 /* 2848 * It is assumed that if the device is claimed by ppm, ppm 2849 * will always implement this request type and it'll always 2850 * return success. We panic here, if it fails. 2851 */ 2852 panic("pm: Can't determine power lock owner of %s@%s(%s#%d)\n", 2853 PM_DEVICE(dip)); 2854 /*NOTREACHED*/ 2855 } 2856 2857 if ((owner = power_req.req.ppm_power_lock_owner_req.owner) != NULL && 2858 owner->t_state == TS_SLEEP && 2859 owner->t_sobj_ops && 2860 SOBJ_TYPE(owner->t_sobj_ops) == SOBJ_MUTEX && 2861 (mp = (kmutex_t *)owner->t_wchan) && 2862 mutex_owner(mp) == curthread) 2863 return (1); 2864 2865 return (0); 2866 } 2867 2868 /* 2869 * Notify parent which wants to hear about a child's power changes. 2870 */ 2871 static void 2872 pm_notify_parent(dev_info_t *dip, 2873 dev_info_t *pdip, int comp, int old_level, int level) 2874 { 2875 pm_bp_has_changed_t bphc; 2876 pm_sp_misc_t pspm; 2877 char *pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2878 int result = DDI_SUCCESS; 2879 2880 bphc.bphc_dip = dip; 2881 bphc.bphc_path = ddi_pathname(dip, pathbuf); 2882 bphc.bphc_comp = comp; 2883 bphc.bphc_olevel = old_level; 2884 bphc.bphc_nlevel = level; 2885 pspm.pspm_canblock = PM_CANBLOCK_BLOCK; 2886 pspm.pspm_scan = 0; 2887 bphc.bphc_private = &pspm; 2888 (void) (*PM_BUS_POWER_FUNC(pdip))(pdip, NULL, 2889 BUS_POWER_HAS_CHANGED, (void *)&bphc, (void *)&result); 2890 kmem_free(pathbuf, MAXPATHLEN); 2891 } 2892 2893 /* 2894 * Check if we need to resume a BC device, and make the attach call as required. 2895 */ 2896 static int 2897 pm_check_and_resume(dev_info_t *dip, int comp, int old_level, int level) 2898 { 2899 int ret = DDI_SUCCESS; 2900 2901 if (PM_ISBC(dip) && comp == 0 && old_level == 0 && level != 0) { 2902 ASSERT(DEVI(dip)->devi_pm_flags & PMC_SUSPENDED); 2903 /* ppm is not interested in DDI_PM_RESUME */ 2904 if ((ret = devi_attach(dip, DDI_PM_RESUME)) != DDI_SUCCESS) 2905 /* XXX Should we mark it resumed, */ 2906 /* even though it failed? */ 2907 cmn_err(CE_WARN, "!pm: Can't resume %s@%s", 2908 PM_NAME(dip), PM_ADDR(dip)); 2909 DEVI(dip)->devi_pm_flags &= ~PMC_SUSPENDED; 2910 } 2911 2912 return (ret); 2913 } 2914 2915 /* 2916 * Tests outside the lock to see if we should bother to enqueue an entry 2917 * for any watching process. If yes, then caller will take the lock and 2918 * do the full protocol 2919 */ 2920 static int 2921 pm_watchers() 2922 { 2923 if (pm_processes_stopped) 2924 return (0); 2925 return (pm_pscc_direct || pm_pscc_interest); 2926 } 2927 2928 static int pm_phc_impl(dev_info_t *, int, int, int); 2929 2930 /* 2931 * A driver is reporting that the power of one of its device's components 2932 * has changed. Update the power state accordingly. 2933 */ 2934 int 2935 pm_power_has_changed(dev_info_t *dip, int comp, int level) 2936 { 2937 PMD_FUNC(pmf, "pm_power_has_changed") 2938 int ret; 2939 dev_info_t *pdip = ddi_get_parent(dip); 2940 struct pm_component *cp; 2941 int blocked, old_level; 2942 2943 if (level < 0) { 2944 PMD(PMD_FAIL, ("%s: %s@%s(%s#%d): bad level=%d\n", pmf, 2945 PM_DEVICE(dip), level)) 2946 return (DDI_FAILURE); 2947 } 2948 2949 PMD(PMD_KIDSUP | PMD_DEP, ("%s: %s@%s(%s#%d), comp=%d, level=%d\n", pmf, 2950 PM_DEVICE(dip), comp, level)) 2951 2952 if (!e_pm_valid_info(dip, NULL) || !e_pm_valid_comp(dip, comp, &cp) || 2953 !e_pm_valid_power(dip, comp, level)) 2954 return (DDI_FAILURE); 2955 2956 /* 2957 * A driver thread calling pm_power_has_changed and another thread 2958 * calling pm_set_power can deadlock. The problem is not resolvable 2959 * by changing lock order, so we use pm_blocked_by_us() to detect 2960 * this specific deadlock. If we can't get the lock immediately 2961 * and we are deadlocked, just update the component's level, do 2962 * notifications, and return. We intend to update the total power 2963 * state later (if the other thread fails to set power to the 2964 * desired level). If we were called because of a power change on a 2965 * component that isn't involved in a set_power op, update all state 2966 * immediately. 2967 */ 2968 cp = PM_CP(dip, comp); 2969 while (!pm_try_parent_child_locks(pdip, dip)) { 2970 if (((blocked = pm_blocked_by_us(dip)) != 0) && 2971 (cp->pmc_flags & PM_POWER_OP)) { 2972 if (pm_watchers()) { 2973 mutex_enter(&pm_rsvp_lock); 2974 pm_enqueue_notify(PSC_HAS_CHANGED, dip, comp, 2975 level, cur_power(cp), PM_CANBLOCK_BLOCK); 2976 mutex_exit(&pm_rsvp_lock); 2977 } 2978 if (pdip && PM_WANTS_NOTIFICATION(pdip)) 2979 pm_notify_parent(dip, 2980 pdip, comp, cur_power(cp), level); 2981 (void) pm_check_and_resume(dip, 2982 comp, cur_power(cp), level); 2983 2984 /* 2985 * Stash the old power index, update curpwr, and flag 2986 * that the total power state needs to be synched. 2987 */ 2988 cp->pmc_flags |= PM_PHC_WHILE_SET_POWER; 2989 /* 2990 * Several pm_power_has_changed calls could arrive 2991 * while the set power path remains blocked. Keep the 2992 * oldest old power and the newest new power of any 2993 * sequence of phc calls which arrive during deadlock. 2994 */ 2995 if (cp->pmc_phc_pwr == PM_LEVEL_UNKNOWN) 2996 cp->pmc_phc_pwr = cp->pmc_cur_pwr; 2997 cp->pmc_cur_pwr = 2998 pm_level_to_index(dip, cp, level); 2999 PMD(PMD_PHC, ("%s: deadlock for %s@%s(%s#%d), comp=%d, " 3000 "level=%d\n", pmf, PM_DEVICE(dip), comp, level)) 3001 return (DDI_SUCCESS); 3002 } else 3003 if (blocked) { /* blocked, but different cmpt? */ 3004 if (!ndi_devi_tryenter(pdip)) { 3005 cmn_err(CE_NOTE, 3006 "!pm: parent kuc not updated due " 3007 "to possible deadlock.\n"); 3008 return (pm_phc_impl(dip, 3009 comp, level, 1)); 3010 } 3011 old_level = cur_power(cp); 3012 if (pdip && !PM_WANTS_NOTIFICATION(pdip) && 3013 (!PM_ISBC(dip) || comp == 0) && 3014 POWERING_ON(old_level, level)) 3015 pm_hold_power(pdip); 3016 ret = pm_phc_impl(dip, comp, level, 1); 3017 if (pdip && !PM_WANTS_NOTIFICATION(pdip)) { 3018 if ((!PM_ISBC(dip) || 3019 comp == 0) && level == 0 && 3020 old_level != PM_LEVEL_UNKNOWN) 3021 pm_rele_power(pdip); 3022 } 3023 ndi_devi_exit(pdip); 3024 /* child lock not held: deadlock */ 3025 return (ret); 3026 } 3027 delay(1); 3028 PMD(PMD_PHC, ("%s: try lock again\n", pmf)) 3029 } 3030 3031 /* non-deadlock case */ 3032 old_level = cur_power(cp); 3033 if (pdip && !PM_WANTS_NOTIFICATION(pdip) && 3034 (!PM_ISBC(dip) || comp == 0) && POWERING_ON(old_level, level)) 3035 pm_hold_power(pdip); 3036 ret = pm_phc_impl(dip, comp, level, 1); 3037 if (pdip && !PM_WANTS_NOTIFICATION(pdip)) { 3038 if ((!PM_ISBC(dip) || comp == 0) && level == 0 && 3039 old_level != PM_LEVEL_UNKNOWN) 3040 pm_rele_power(pdip); 3041 } 3042 PM_UNLOCK_POWER(dip); 3043 ndi_devi_exit(pdip); 3044 return (ret); 3045 } 3046 3047 /* 3048 * Account for power changes to a component of the the console frame buffer. 3049 * If lowering power from full (or "unkown", which is treatd as full) 3050 * we will increment the "components off" count of the fb device. 3051 * Subsequent lowering of the same component doesn't affect the count. If 3052 * raising a component back to full power, we will decrement the count. 3053 * 3054 * Return: the increment value for pm_cfb_comps_off (-1, 0, or 1) 3055 */ 3056 static int 3057 calc_cfb_comps_incr(dev_info_t *dip, int cmpt, int old, int new) 3058 { 3059 struct pm_component *cp = PM_CP(dip, cmpt); 3060 int on = (old == PM_LEVEL_UNKNOWN || old == cp->pmc_norm_pwr); 3061 int want_normal = (new == cp->pmc_norm_pwr); 3062 int incr = 0; 3063 3064 if (on && !want_normal) 3065 incr = 1; 3066 else if (!on && want_normal) 3067 incr = -1; 3068 return (incr); 3069 } 3070 3071 /* 3072 * Adjust the count of console frame buffer components < full power. 3073 */ 3074 static void 3075 update_comps_off(int incr, dev_info_t *dip) 3076 { 3077 mutex_enter(&pm_cfb_lock); 3078 pm_cfb_comps_off += incr; 3079 ASSERT(pm_cfb_comps_off <= PM_NUMCMPTS(dip)); 3080 mutex_exit(&pm_cfb_lock); 3081 } 3082 3083 /* 3084 * Update the power state in the framework (via the ppm). The 'notify' 3085 * argument tells whether to notify watchers. Power lock is already held. 3086 */ 3087 static int 3088 pm_phc_impl(dev_info_t *dip, int comp, int level, int notify) 3089 { 3090 PMD_FUNC(pmf, "phc_impl") 3091 power_req_t power_req; 3092 int i, dodeps = 0; 3093 dev_info_t *pdip = ddi_get_parent(dip); 3094 int result; 3095 int old_level; 3096 struct pm_component *cp; 3097 int incr = 0; 3098 dev_info_t *ppm = (dev_info_t *)DEVI(dip)->devi_pm_ppm; 3099 int work_type = 0; 3100 char *pathbuf; 3101 3102 /* Must use "official" power level for this test. */ 3103 cp = PM_CP(dip, comp); 3104 old_level = (cp->pmc_flags & PM_PHC_WHILE_SET_POWER ? 3105 cp->pmc_phc_pwr : cp->pmc_cur_pwr); 3106 if (old_level != PM_LEVEL_UNKNOWN) 3107 old_level = cp->pmc_comp.pmc_lvals[old_level]; 3108 3109 if (level == old_level) { 3110 PMD(PMD_SET, ("%s: %s@%s(%s#%d), comp=%d is already at " 3111 "level=%d\n", pmf, PM_DEVICE(dip), comp, level)) 3112 return (DDI_SUCCESS); 3113 } 3114 3115 /* 3116 * Tell ppm about this. 3117 */ 3118 power_req.request_type = PMR_PPM_POWER_CHANGE_NOTIFY; 3119 power_req.req.ppm_notify_level_req.who = dip; 3120 power_req.req.ppm_notify_level_req.cmpt = comp; 3121 power_req.req.ppm_notify_level_req.new_level = level; 3122 power_req.req.ppm_notify_level_req.old_level = old_level; 3123 if (pm_ctlops(ppm, dip, DDI_CTLOPS_POWER, &power_req, 3124 &result) == DDI_FAILURE) { 3125 PMD(PMD_FAIL, ("%s: pm_ctlops %s@%s(%s#%d) to %d failed\n", 3126 pmf, PM_DEVICE(dip), level)) 3127 return (DDI_FAILURE); 3128 } 3129 3130 if (PM_IS_CFB(dip)) { 3131 incr = calc_cfb_comps_incr(dip, comp, old_level, level); 3132 3133 if (incr) { 3134 update_comps_off(incr, dip); 3135 PMD(PMD_CFB, ("%s: %s@%s(%s#%d) comp=%d %d->%d " 3136 "cfb_comps_off->%d\n", pmf, PM_DEVICE(dip), 3137 comp, old_level, level, pm_cfb_comps_off)) 3138 } 3139 } 3140 e_pm_set_cur_pwr(dip, PM_CP(dip, comp), level); 3141 result = DDI_SUCCESS; 3142 3143 if (notify) { 3144 if (pdip && PM_WANTS_NOTIFICATION(pdip)) 3145 pm_notify_parent(dip, pdip, comp, old_level, level); 3146 (void) pm_check_and_resume(dip, comp, old_level, level); 3147 } 3148 3149 /* 3150 * Decrement the dependency kidsup count if we turn a device 3151 * off. 3152 */ 3153 if (POWERING_OFF(old_level, level)) { 3154 dodeps = 1; 3155 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 3156 cp = PM_CP(dip, i); 3157 if (cur_power(cp)) { 3158 dodeps = 0; 3159 break; 3160 } 3161 } 3162 if (dodeps) 3163 work_type = PM_DEP_WK_POWER_OFF; 3164 } 3165 3166 /* 3167 * Increment if we turn it on. Check to see 3168 * if other comps are already on, if so, 3169 * dont increment. 3170 */ 3171 if (POWERING_ON(old_level, level)) { 3172 dodeps = 1; 3173 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 3174 cp = PM_CP(dip, i); 3175 if (comp == i) 3176 continue; 3177 /* -1 also treated as 0 in this case */ 3178 if (cur_power(cp) > 0) { 3179 dodeps = 0; 3180 break; 3181 } 3182 } 3183 if (dodeps) 3184 work_type = PM_DEP_WK_POWER_ON; 3185 } 3186 3187 if (dodeps) { 3188 pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 3189 (void) ddi_pathname(dip, pathbuf); 3190 pm_dispatch_to_dep_thread(work_type, pathbuf, NULL, 3191 PM_DEP_NOWAIT, NULL, 0); 3192 kmem_free(pathbuf, MAXPATHLEN); 3193 } 3194 3195 if (notify && (level != old_level) && pm_watchers()) { 3196 mutex_enter(&pm_rsvp_lock); 3197 pm_enqueue_notify(PSC_HAS_CHANGED, dip, comp, level, old_level, 3198 PM_CANBLOCK_BLOCK); 3199 mutex_exit(&pm_rsvp_lock); 3200 } 3201 3202 PMD(PMD_RESCAN, ("%s: %s@%s(%s#%d): pm_rescan\n", pmf, PM_DEVICE(dip))) 3203 pm_rescan(dip); 3204 return (DDI_SUCCESS); 3205 } 3206 3207 /* 3208 * This function is called at startup time to notify pm of the existence 3209 * of any platform power managers for this platform. As a result of 3210 * this registration, each function provided will be called each time 3211 * a device node is attached, until one returns true, and it must claim the 3212 * device node (by returning non-zero) if it wants to be involved in the 3213 * node's power management. If it does claim the node, then it will 3214 * subsequently be notified of attach and detach events. 3215 * 3216 */ 3217 3218 int 3219 pm_register_ppm(int (*func)(dev_info_t *), dev_info_t *dip) 3220 { 3221 PMD_FUNC(pmf, "register_ppm") 3222 struct ppm_callbacks *ppmcp; 3223 pm_component_t *cp; 3224 int i, pwr, result; 3225 power_req_t power_req; 3226 struct ppm_notify_level_req *p = &power_req.req.ppm_notify_level_req; 3227 void pm_ppm_claim(dev_info_t *); 3228 3229 mutex_enter(&ppm_lock); 3230 ppmcp = ppm_callbacks; 3231 for (i = 0; i < MAX_PPM_HANDLERS; i++, ppmcp++) { 3232 if (ppmcp->ppmc_func == NULL) { 3233 ppmcp->ppmc_func = func; 3234 ppmcp->ppmc_dip = dip; 3235 break; 3236 } 3237 } 3238 mutex_exit(&ppm_lock); 3239 3240 if (i >= MAX_PPM_HANDLERS) 3241 return (DDI_FAILURE); 3242 while ((dip = ddi_get_parent(dip)) != NULL) { 3243 if (dip != ddi_root_node() && PM_GET_PM_INFO(dip) == NULL) 3244 continue; 3245 pm_ppm_claim(dip); 3246 /* don't bother with the not power-manageable nodes */ 3247 if (pm_ppm_claimed(dip) && PM_GET_PM_INFO(dip)) { 3248 /* 3249 * Tell ppm about this. 3250 */ 3251 power_req.request_type = PMR_PPM_POWER_CHANGE_NOTIFY; 3252 p->old_level = PM_LEVEL_UNKNOWN; 3253 p->who = dip; 3254 PM_LOCK_POWER(dip); 3255 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 3256 cp = PM_CP(dip, i); 3257 pwr = cp->pmc_cur_pwr; 3258 if (pwr != PM_LEVEL_UNKNOWN) { 3259 p->cmpt = i; 3260 p->new_level = cur_power(cp); 3261 p->old_level = PM_LEVEL_UNKNOWN; 3262 if (pm_ctlops(PPM(dip), dip, 3263 DDI_CTLOPS_POWER, &power_req, 3264 &result) == DDI_FAILURE) { 3265 PMD(PMD_FAIL, ("%s: pc " 3266 "%s@%s(%s#%d) to %d " 3267 "fails\n", pmf, 3268 PM_DEVICE(dip), pwr)) 3269 } 3270 } 3271 } 3272 PM_UNLOCK_POWER(dip); 3273 } 3274 } 3275 return (DDI_SUCCESS); 3276 } 3277 3278 /* 3279 * Call the ppm's that have registered and adjust the devinfo struct as 3280 * appropriate. First one to claim it gets it. The sets of devices claimed 3281 * by each ppm are assumed to be disjoint. 3282 */ 3283 void 3284 pm_ppm_claim(dev_info_t *dip) 3285 { 3286 struct ppm_callbacks *ppmcp; 3287 3288 if (PPM(dip)) { 3289 return; 3290 } 3291 mutex_enter(&ppm_lock); 3292 for (ppmcp = ppm_callbacks; ppmcp->ppmc_func; ppmcp++) { 3293 if ((*ppmcp->ppmc_func)(dip)) { 3294 DEVI(dip)->devi_pm_ppm = 3295 (struct dev_info *)ppmcp->ppmc_dip; 3296 mutex_exit(&ppm_lock); 3297 return; 3298 } 3299 } 3300 mutex_exit(&ppm_lock); 3301 } 3302 3303 /* 3304 * Node is being detached so stop autopm until we see if it succeeds, in which 3305 * case pm_stop will be called. For backwards compatible devices we bring the 3306 * device up to full power on the assumption the detach will succeed. 3307 */ 3308 void 3309 pm_detaching(dev_info_t *dip) 3310 { 3311 PMD_FUNC(pmf, "detaching") 3312 pm_info_t *info = PM_GET_PM_INFO(dip); 3313 int iscons; 3314 3315 PMD(PMD_REMDEV, ("%s: %s@%s(%s#%d), %d comps\n", pmf, PM_DEVICE(dip), 3316 PM_NUMCMPTS(dip))) 3317 if (info == NULL) 3318 return; 3319 ASSERT(DEVI_IS_DETACHING(dip)); 3320 PM_LOCK_DIP(dip); 3321 info->pmi_dev_pm_state |= PM_DETACHING; 3322 PM_UNLOCK_DIP(dip); 3323 if (!PM_ISBC(dip)) 3324 pm_scan_stop(dip); 3325 3326 /* 3327 * console and old-style devices get brought up when detaching. 3328 */ 3329 iscons = PM_IS_CFB(dip); 3330 if (iscons || PM_ISBC(dip)) { 3331 (void) pm_all_to_normal(dip, PM_CANBLOCK_BYPASS); 3332 if (iscons) { 3333 mutex_enter(&pm_cfb_lock); 3334 while (cfb_inuse) { 3335 mutex_exit(&pm_cfb_lock); 3336 PMD(PMD_CFB, ("%s: delay; cfb_inuse\n", pmf)) 3337 delay(1); 3338 mutex_enter(&pm_cfb_lock); 3339 } 3340 ASSERT(cfb_dip_detaching == NULL); 3341 ASSERT(cfb_dip); 3342 cfb_dip_detaching = cfb_dip; /* case detach fails */ 3343 cfb_dip = NULL; 3344 mutex_exit(&pm_cfb_lock); 3345 } 3346 } 3347 } 3348 3349 /* 3350 * Node failed to detach. If it used to be autopm'd, make it so again. 3351 */ 3352 void 3353 pm_detach_failed(dev_info_t *dip) 3354 { 3355 PMD_FUNC(pmf, "detach_failed") 3356 pm_info_t *info = PM_GET_PM_INFO(dip); 3357 int pm_all_at_normal(dev_info_t *); 3358 3359 if (info == NULL) 3360 return; 3361 ASSERT(DEVI_IS_DETACHING(dip)); 3362 if (info->pmi_dev_pm_state & PM_DETACHING) { 3363 info->pmi_dev_pm_state &= ~PM_DETACHING; 3364 if (info->pmi_dev_pm_state & PM_ALLNORM_DEFERRED) { 3365 /* Make sure the operation is still needed */ 3366 if (!pm_all_at_normal(dip)) { 3367 if (pm_all_to_normal(dip, 3368 PM_CANBLOCK_FAIL) != DDI_SUCCESS) { 3369 PMD(PMD_ERROR, ("%s: could not bring " 3370 "%s@%s(%s#%d) to normal\n", pmf, 3371 PM_DEVICE(dip))) 3372 } 3373 } 3374 info->pmi_dev_pm_state &= ~PM_ALLNORM_DEFERRED; 3375 } 3376 } 3377 if (!PM_ISBC(dip)) { 3378 mutex_enter(&pm_scan_lock); 3379 if (PM_SCANABLE(dip)) 3380 pm_scan_init(dip); 3381 mutex_exit(&pm_scan_lock); 3382 pm_rescan(dip); 3383 } 3384 } 3385 3386 /* generic Backwards Compatible component */ 3387 static char *bc_names[] = {"off", "on"}; 3388 3389 static pm_comp_t bc_comp = {"unknown", 2, NULL, NULL, &bc_names[0]}; 3390 3391 static void 3392 e_pm_default_levels(dev_info_t *dip, pm_component_t *cp, int norm) 3393 { 3394 pm_comp_t *pmc; 3395 pmc = &cp->pmc_comp; 3396 pmc->pmc_numlevels = 2; 3397 pmc->pmc_lvals[0] = 0; 3398 pmc->pmc_lvals[1] = norm; 3399 e_pm_set_cur_pwr(dip, cp, norm); 3400 } 3401 3402 static void 3403 e_pm_default_components(dev_info_t *dip, int cmpts) 3404 { 3405 int i; 3406 pm_component_t *p = DEVI(dip)->devi_pm_components; 3407 3408 p = DEVI(dip)->devi_pm_components; 3409 for (i = 0; i < cmpts; i++, p++) { 3410 p->pmc_comp = bc_comp; /* struct assignment */ 3411 p->pmc_comp.pmc_lvals = kmem_zalloc(2 * sizeof (int), 3412 KM_SLEEP); 3413 p->pmc_comp.pmc_thresh = kmem_alloc(2 * sizeof (int), 3414 KM_SLEEP); 3415 p->pmc_comp.pmc_numlevels = 2; 3416 p->pmc_comp.pmc_thresh[0] = INT_MAX; 3417 p->pmc_comp.pmc_thresh[1] = INT_MAX; 3418 } 3419 } 3420 3421 /* 3422 * Called from functions that require components to exist already to allow 3423 * for their creation by parsing the pm-components property. 3424 * Device will not be power managed as a result of this call 3425 * No locking needed because we're single threaded by the ndi_devi_enter 3426 * done while attaching, and the device isn't visible until after it has 3427 * attached 3428 */ 3429 int 3430 pm_premanage(dev_info_t *dip, int style) 3431 { 3432 PMD_FUNC(pmf, "premanage") 3433 pm_comp_t *pcp, *compp; 3434 int cmpts, i, norm, error; 3435 pm_component_t *p = DEVI(dip)->devi_pm_components; 3436 pm_comp_t *pm_autoconfig(dev_info_t *, int *); 3437 3438 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 3439 /* 3440 * If this dip has already been processed, don't mess with it 3441 */ 3442 if (DEVI(dip)->devi_pm_flags & PMC_COMPONENTS_DONE) 3443 return (DDI_SUCCESS); 3444 if (DEVI(dip)->devi_pm_flags & PMC_COMPONENTS_FAILED) { 3445 return (DDI_FAILURE); 3446 } 3447 /* 3448 * Look up pm-components property and create components accordingly 3449 * If that fails, fall back to backwards compatibility 3450 */ 3451 if ((compp = pm_autoconfig(dip, &error)) == NULL) { 3452 /* 3453 * If error is set, the property existed but was not well formed 3454 */ 3455 if (error || (style == PM_STYLE_NEW)) { 3456 DEVI(dip)->devi_pm_flags |= PMC_COMPONENTS_FAILED; 3457 return (DDI_FAILURE); 3458 } 3459 /* 3460 * If they don't have the pm-components property, then we 3461 * want the old "no pm until PM_SET_DEVICE_THRESHOLDS ioctl" 3462 * behavior driver must have called pm_create_components, and 3463 * we need to flesh out dummy components 3464 */ 3465 if ((cmpts = PM_NUMCMPTS(dip)) == 0) { 3466 /* 3467 * Not really failure, but we don't want the 3468 * caller to treat it as success 3469 */ 3470 return (DDI_FAILURE); 3471 } 3472 DEVI(dip)->devi_pm_flags |= PMC_BC; 3473 e_pm_default_components(dip, cmpts); 3474 for (i = 0; i < cmpts; i++) { 3475 /* 3476 * if normal power not set yet, we don't really know 3477 * what *ANY* of the power values are. If normal 3478 * power is set, then we assume for this backwards 3479 * compatible case that the values are 0, normal power. 3480 */ 3481 norm = pm_get_normal_power(dip, i); 3482 if (norm == (uint_t)-1) { 3483 PMD(PMD_ERROR, ("%s: %s@%s(%s#%d)[%d]\n", pmf, 3484 PM_DEVICE(dip), i)) 3485 return (DDI_FAILURE); 3486 } 3487 /* 3488 * Components of BC devices start at their normal power, 3489 * so count them to be not at their lowest power. 3490 */ 3491 PM_INCR_NOTLOWEST(dip); 3492 e_pm_default_levels(dip, PM_CP(dip, i), norm); 3493 } 3494 } else { 3495 /* 3496 * e_pm_create_components was called from pm_autoconfig(), it 3497 * creates components with no descriptions (or known levels) 3498 */ 3499 cmpts = PM_NUMCMPTS(dip); 3500 ASSERT(cmpts != 0); 3501 pcp = compp; 3502 p = DEVI(dip)->devi_pm_components; 3503 for (i = 0; i < cmpts; i++, p++) { 3504 p->pmc_comp = *pcp++; /* struct assignment */ 3505 ASSERT(PM_CP(dip, i)->pmc_cur_pwr == 0); 3506 e_pm_set_cur_pwr(dip, PM_CP(dip, i), PM_LEVEL_UNKNOWN); 3507 } 3508 if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH) 3509 pm_set_device_threshold(dip, pm_cpu_idle_threshold, 3510 PMC_CPU_THRESH); 3511 else 3512 pm_set_device_threshold(dip, pm_system_idle_threshold, 3513 PMC_DEF_THRESH); 3514 kmem_free(compp, cmpts * sizeof (pm_comp_t)); 3515 } 3516 return (DDI_SUCCESS); 3517 } 3518 3519 /* 3520 * Called from during or after the device's attach to let us know it is ready 3521 * to play autopm. Look up the pm model and manage the device accordingly. 3522 * Returns system call errno value. 3523 * If DDI_ATTACH and DDI_DETACH were in same namespace, this would be 3524 * a little cleaner 3525 * 3526 * Called with dip lock held, return with dip lock unheld. 3527 */ 3528 3529 int 3530 e_pm_manage(dev_info_t *dip, int style) 3531 { 3532 PMD_FUNC(pmf, "e_manage") 3533 pm_info_t *info; 3534 dev_info_t *pdip = ddi_get_parent(dip); 3535 int pm_thresh_specd(dev_info_t *); 3536 int count; 3537 char *pathbuf; 3538 3539 if (pm_premanage(dip, style) != DDI_SUCCESS) { 3540 return (DDI_FAILURE); 3541 } 3542 PMD(PMD_KIDSUP, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 3543 ASSERT(PM_GET_PM_INFO(dip) == NULL); 3544 info = kmem_zalloc(sizeof (pm_info_t), KM_SLEEP); 3545 3546 /* 3547 * Now set up parent's kidsupcnt. BC nodes are assumed to start 3548 * out at their normal power, so they are "up", others start out 3549 * unknown, which is effectively "up". Parent which want notification 3550 * get kidsupcnt of 0 always. 3551 */ 3552 count = (PM_ISBC(dip)) ? 1 : PM_NUMCMPTS(dip); 3553 if (count && pdip && !PM_WANTS_NOTIFICATION(pdip)) 3554 e_pm_hold_rele_power(pdip, count); 3555 3556 pm_set_pm_info(dip, info); 3557 /* 3558 * Apply any recorded thresholds 3559 */ 3560 (void) pm_thresh_specd(dip); 3561 3562 /* 3563 * Do dependency processing. 3564 */ 3565 pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 3566 (void) ddi_pathname(dip, pathbuf); 3567 pm_dispatch_to_dep_thread(PM_DEP_WK_ATTACH, pathbuf, pathbuf, 3568 PM_DEP_NOWAIT, NULL, 0); 3569 kmem_free(pathbuf, MAXPATHLEN); 3570 3571 if (!PM_ISBC(dip)) { 3572 mutex_enter(&pm_scan_lock); 3573 if (PM_SCANABLE(dip)) { 3574 pm_scan_init(dip); 3575 mutex_exit(&pm_scan_lock); 3576 pm_rescan(dip); 3577 } else { 3578 mutex_exit(&pm_scan_lock); 3579 } 3580 } 3581 return (0); 3582 } 3583 3584 /* 3585 * This is the obsolete exported interface for a driver to find out its 3586 * "normal" (max) power. 3587 * We only get components destroyed while no power management is 3588 * going on (and the device is detached), so we don't need a mutex here 3589 */ 3590 int 3591 pm_get_normal_power(dev_info_t *dip, int comp) 3592 { 3593 3594 if (comp >= 0 && comp < PM_NUMCMPTS(dip)) { 3595 return (PM_CP(dip, comp)->pmc_norm_pwr); 3596 } 3597 return (DDI_FAILURE); 3598 } 3599 3600 /* 3601 * Fetches the current power level. Return DDI_SUCCESS or DDI_FAILURE. 3602 */ 3603 int 3604 pm_get_current_power(dev_info_t *dip, int comp, int *levelp) 3605 { 3606 if (comp >= 0 && comp < PM_NUMCMPTS(dip)) { 3607 *levelp = PM_CURPOWER(dip, comp); 3608 return (DDI_SUCCESS); 3609 } 3610 return (DDI_FAILURE); 3611 } 3612 3613 /* 3614 * Returns current threshold of indicated component 3615 */ 3616 static int 3617 cur_threshold(dev_info_t *dip, int comp) 3618 { 3619 pm_component_t *cp = PM_CP(dip, comp); 3620 int pwr; 3621 3622 if (PM_ISBC(dip)) { 3623 /* 3624 * backwards compatible nodes only have one threshold 3625 */ 3626 return (cp->pmc_comp.pmc_thresh[1]); 3627 } 3628 pwr = cp->pmc_cur_pwr; 3629 if (pwr == PM_LEVEL_UNKNOWN) { 3630 int thresh; 3631 if (DEVI(dip)->devi_pm_flags & PMC_NEXDEF_THRESH) 3632 thresh = pm_default_nexus_threshold; 3633 else if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH) 3634 thresh = pm_cpu_idle_threshold; 3635 else 3636 thresh = pm_system_idle_threshold; 3637 return (thresh); 3638 } 3639 ASSERT(cp->pmc_comp.pmc_thresh); 3640 return (cp->pmc_comp.pmc_thresh[pwr]); 3641 } 3642 3643 /* 3644 * Compute next lower component power level given power index. 3645 */ 3646 static int 3647 pm_next_lower_power(pm_component_t *cp, int pwrndx) 3648 { 3649 int nxt_pwr; 3650 3651 if (pwrndx == PM_LEVEL_UNKNOWN) { 3652 nxt_pwr = cp->pmc_comp.pmc_lvals[0]; 3653 } else { 3654 pwrndx--; 3655 ASSERT(pwrndx >= 0); 3656 nxt_pwr = cp->pmc_comp.pmc_lvals[pwrndx]; 3657 } 3658 return (nxt_pwr); 3659 } 3660 3661 /* 3662 * Update the maxpower (normal) power of a component. Note that the 3663 * component's power level is only changed if it's current power level 3664 * is higher than the new max power. 3665 */ 3666 int 3667 pm_update_maxpower(dev_info_t *dip, int comp, int level) 3668 { 3669 PMD_FUNC(pmf, "update_maxpower") 3670 int old; 3671 int result; 3672 3673 if (!e_pm_valid_info(dip, NULL) || !e_pm_valid_comp(dip, comp, NULL) || 3674 !e_pm_valid_power(dip, comp, level)) { 3675 PMD(PMD_FAIL, ("%s: validation checks failed for %s@%s(%s#%d) " 3676 "comp=%d level=%d\n", pmf, PM_DEVICE(dip), comp, level)) 3677 return (DDI_FAILURE); 3678 } 3679 old = e_pm_get_max_power(dip, comp); 3680 e_pm_set_max_power(dip, comp, level); 3681 3682 if (pm_set_power(dip, comp, level, PM_LEVEL_DOWNONLY, 3683 PM_CANBLOCK_BLOCK, 0, &result) != DDI_SUCCESS) { 3684 e_pm_set_max_power(dip, comp, old); 3685 PMD(PMD_FAIL, ("%s: %s@%s(%s#%d) pm_set_power failed\n", pmf, 3686 PM_DEVICE(dip))) 3687 return (DDI_FAILURE); 3688 } 3689 return (DDI_SUCCESS); 3690 } 3691 3692 /* 3693 * Bring all components of device to normal power 3694 */ 3695 int 3696 pm_all_to_normal(dev_info_t *dip, pm_canblock_t canblock) 3697 { 3698 PMD_FUNC(pmf, "all_to_normal") 3699 int *normal; 3700 int i, ncomps, result; 3701 size_t size; 3702 int changefailed = 0; 3703 3704 PMD(PMD_ALLNORM, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 3705 ASSERT(PM_GET_PM_INFO(dip)); 3706 if (pm_get_norm_pwrs(dip, &normal, &size) != DDI_SUCCESS) { 3707 PMD(PMD_ALLNORM, ("%s: can't get norm pwrs for " 3708 "%s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 3709 return (DDI_FAILURE); 3710 } 3711 ncomps = PM_NUMCMPTS(dip); 3712 for (i = 0; i < ncomps; i++) { 3713 if (pm_set_power(dip, i, normal[i], 3714 PM_LEVEL_UPONLY, canblock, 0, &result) != DDI_SUCCESS) { 3715 changefailed++; 3716 PMD(PMD_ALLNORM | PMD_FAIL, ("%s: failed to set " 3717 "%s@%s(%s#%d)[%d] to %d, errno %d\n", pmf, 3718 PM_DEVICE(dip), i, normal[i], result)) 3719 } 3720 } 3721 kmem_free(normal, size); 3722 if (changefailed) { 3723 PMD(PMD_FAIL, ("%s: failed to set %d comps %s@%s(%s#%d) " 3724 "to full power\n", pmf, changefailed, PM_DEVICE(dip))) 3725 return (DDI_FAILURE); 3726 } 3727 return (DDI_SUCCESS); 3728 } 3729 3730 /* 3731 * Returns true if all components of device are at normal power 3732 */ 3733 int 3734 pm_all_at_normal(dev_info_t *dip) 3735 { 3736 PMD_FUNC(pmf, "all_at_normal") 3737 int *normal; 3738 int i; 3739 size_t size; 3740 3741 PMD(PMD_ALLNORM, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 3742 if (pm_get_norm_pwrs(dip, &normal, &size) != DDI_SUCCESS) { 3743 PMD(PMD_ALLNORM, ("%s: can't get normal power\n", pmf)) 3744 return (DDI_FAILURE); 3745 } 3746 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 3747 int current = PM_CURPOWER(dip, i); 3748 if (normal[i] > current) { 3749 PMD(PMD_ALLNORM, ("%s: %s@%s(%s#%d) comp=%d, " 3750 "norm=%d, cur=%d\n", pmf, PM_DEVICE(dip), i, 3751 normal[i], current)) 3752 break; 3753 } 3754 } 3755 kmem_free(normal, size); 3756 if (i != PM_NUMCMPTS(dip)) { 3757 return (0); 3758 } 3759 return (1); 3760 } 3761 3762 static void bring_pmdep_up(dev_info_t *, int); 3763 3764 static void 3765 bring_wekeeps_up(char *keeper) 3766 { 3767 PMD_FUNC(pmf, "bring_wekeeps_up") 3768 int i; 3769 pm_pdr_t *dp; 3770 pm_info_t *wku_info; 3771 char *kept_path; 3772 dev_info_t *kept; 3773 3774 if (panicstr) { 3775 return; 3776 } 3777 /* 3778 * We process the request even if the keeper detaches because 3779 * detach processing expects this to increment kidsupcnt of kept. 3780 */ 3781 PMD(PMD_BRING, ("%s: keeper= %s\n", pmf, keeper)) 3782 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 3783 if (strcmp(dp->pdr_keeper, keeper) != 0) 3784 continue; 3785 for (i = 0; i < dp->pdr_kept_count; i++) { 3786 kept_path = dp->pdr_kept_paths[i]; 3787 if (kept_path == NULL) 3788 continue; 3789 ASSERT(kept_path[0] != '\0'); 3790 if ((kept = pm_name_to_dip(kept_path, 1)) == NULL) 3791 continue; 3792 wku_info = PM_GET_PM_INFO(kept); 3793 if (wku_info == NULL) { 3794 if (kept) 3795 ddi_release_devi(kept); 3796 continue; 3797 } 3798 /* 3799 * Don't mess with it if it is being detached, it isn't 3800 * safe to call its power entry point 3801 */ 3802 if (wku_info->pmi_dev_pm_state & PM_DETACHING) { 3803 if (kept) 3804 ddi_release_devi(kept); 3805 continue; 3806 } 3807 bring_pmdep_up(kept, 1); 3808 ddi_release_devi(kept); 3809 } 3810 } 3811 } 3812 3813 /* 3814 * Bring up the 'kept' device passed as argument 3815 */ 3816 static void 3817 bring_pmdep_up(dev_info_t *kept_dip, int hold) 3818 { 3819 PMD_FUNC(pmf, "bring_pmdep_up") 3820 int is_all_at_normal = 0; 3821 3822 /* 3823 * If the kept device has been unmanaged, do nothing. 3824 */ 3825 if (!PM_GET_PM_INFO(kept_dip)) 3826 return; 3827 3828 /* Just ignore DIRECT PM device till they are released. */ 3829 if (!pm_processes_stopped && PM_ISDIRECT(kept_dip) && 3830 !(is_all_at_normal = pm_all_at_normal(kept_dip))) { 3831 PMD(PMD_BRING, ("%s: can't bring up PM_DIRECT %s@%s(%s#%d) " 3832 "controlling process did something else\n", pmf, 3833 PM_DEVICE(kept_dip))) 3834 DEVI(kept_dip)->devi_pm_flags |= PMC_SKIP_BRINGUP; 3835 return; 3836 } 3837 /* if we got here the keeper had a transition from OFF->ON */ 3838 if (hold) 3839 pm_hold_power(kept_dip); 3840 3841 if (!is_all_at_normal) 3842 (void) pm_all_to_normal(kept_dip, PM_CANBLOCK_FAIL); 3843 } 3844 3845 /* 3846 * A bunch of stuff that belongs only to the next routine (or two) 3847 */ 3848 3849 static const char namestr[] = "NAME="; 3850 static const int nameln = sizeof (namestr) - 1; 3851 static const char pmcompstr[] = "pm-components"; 3852 3853 struct pm_comp_pkg { 3854 pm_comp_t *comp; 3855 struct pm_comp_pkg *next; 3856 }; 3857 3858 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9') 3859 3860 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \ 3861 ((ch) >= 'A' && (ch) <= 'F')) 3862 3863 /* 3864 * Rather than duplicate this code ... 3865 * (this code excerpted from the function that follows it) 3866 */ 3867 #define FINISH_COMP { \ 3868 ASSERT(compp); \ 3869 compp->pmc_lnames_sz = size; \ 3870 tp = compp->pmc_lname_buf = kmem_alloc(size, KM_SLEEP); \ 3871 compp->pmc_numlevels = level; \ 3872 compp->pmc_lnames = kmem_alloc(level * sizeof (char *), KM_SLEEP); \ 3873 compp->pmc_lvals = kmem_alloc(level * sizeof (int), KM_SLEEP); \ 3874 compp->pmc_thresh = kmem_alloc(level * sizeof (int), KM_SLEEP); \ 3875 /* copy string out of prop array into buffer */ \ 3876 for (j = 0; j < level; j++) { \ 3877 compp->pmc_thresh[j] = INT_MAX; /* only [0] sticks */ \ 3878 compp->pmc_lvals[j] = lvals[j]; \ 3879 (void) strcpy(tp, lnames[j]); \ 3880 compp->pmc_lnames[j] = tp; \ 3881 tp += lszs[j]; \ 3882 } \ 3883 ASSERT(tp > compp->pmc_lname_buf && tp <= \ 3884 compp->pmc_lname_buf + compp->pmc_lnames_sz); \ 3885 } 3886 3887 /* 3888 * Create (empty) component data structures. 3889 */ 3890 static void 3891 e_pm_create_components(dev_info_t *dip, int num_components) 3892 { 3893 struct pm_component *compp, *ocompp; 3894 int i, size = 0; 3895 3896 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 3897 ASSERT(!DEVI(dip)->devi_pm_components); 3898 ASSERT(!(DEVI(dip)->devi_pm_flags & PMC_COMPONENTS_DONE)); 3899 size = sizeof (struct pm_component) * num_components; 3900 3901 compp = kmem_zalloc(size, KM_SLEEP); 3902 ocompp = compp; 3903 DEVI(dip)->devi_pm_comp_size = size; 3904 DEVI(dip)->devi_pm_num_components = num_components; 3905 PM_LOCK_BUSY(dip); 3906 for (i = 0; i < num_components; i++) { 3907 compp->pmc_timestamp = gethrestime_sec(); 3908 compp->pmc_norm_pwr = (uint_t)-1; 3909 compp++; 3910 } 3911 PM_UNLOCK_BUSY(dip); 3912 DEVI(dip)->devi_pm_components = ocompp; 3913 DEVI(dip)->devi_pm_flags |= PMC_COMPONENTS_DONE; 3914 } 3915 3916 /* 3917 * Parse hex or decimal value from char string 3918 */ 3919 static char * 3920 pm_parsenum(char *cp, int *valp) 3921 { 3922 int ch, offset; 3923 char numbuf[256]; 3924 char *np = numbuf; 3925 int value = 0; 3926 3927 ch = *cp++; 3928 if (isdigit(ch)) { 3929 if (ch == '0') { 3930 if ((ch = *cp++) == 'x' || ch == 'X') { 3931 ch = *cp++; 3932 while (isxdigit(ch)) { 3933 *np++ = (char)ch; 3934 ch = *cp++; 3935 } 3936 *np = 0; 3937 cp--; 3938 goto hexval; 3939 } else { 3940 goto digit; 3941 } 3942 } else { 3943 digit: 3944 while (isdigit(ch)) { 3945 *np++ = (char)ch; 3946 ch = *cp++; 3947 } 3948 *np = 0; 3949 cp--; 3950 goto decval; 3951 } 3952 } else 3953 return (NULL); 3954 3955 hexval: 3956 offset = 0; 3957 for (np = numbuf; *np; np++) { 3958 if (*np >= 'a' && *np <= 'f') 3959 offset = 'a' - 10; 3960 else if (*np >= 'A' && *np <= 'F') 3961 offset = 'A' - 10; 3962 else if (*np >= '0' && *np <= '9') 3963 offset = '0'; 3964 value *= 16; 3965 value += *np - offset; 3966 } 3967 *valp = value; 3968 return (cp); 3969 3970 decval: 3971 offset = '0'; 3972 for (np = numbuf; *np; np++) { 3973 value *= 10; 3974 value += *np - offset; 3975 } 3976 *valp = value; 3977 return (cp); 3978 } 3979 3980 /* 3981 * Set max (previously documented as "normal") power. 3982 */ 3983 static void 3984 e_pm_set_max_power(dev_info_t *dip, int component_number, int level) 3985 { 3986 PM_CP(dip, component_number)->pmc_norm_pwr = level; 3987 } 3988 3989 /* 3990 * Get max (previously documented as "normal") power. 3991 */ 3992 static int 3993 e_pm_get_max_power(dev_info_t *dip, int component_number) 3994 { 3995 return (PM_CP(dip, component_number)->pmc_norm_pwr); 3996 } 3997 3998 /* 3999 * Internal routine for destroying components 4000 * It is called even when there might not be any, so it must be forgiving. 4001 */ 4002 static void 4003 e_pm_destroy_components(dev_info_t *dip) 4004 { 4005 int i; 4006 struct pm_component *cp; 4007 4008 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 4009 if (PM_NUMCMPTS(dip) == 0) 4010 return; 4011 cp = DEVI(dip)->devi_pm_components; 4012 ASSERT(cp); 4013 for (i = 0; i < PM_NUMCMPTS(dip); i++, cp++) { 4014 int nlevels = cp->pmc_comp.pmc_numlevels; 4015 kmem_free(cp->pmc_comp.pmc_lvals, nlevels * sizeof (int)); 4016 kmem_free(cp->pmc_comp.pmc_thresh, nlevels * sizeof (int)); 4017 /* 4018 * For BC nodes, the rest is static in bc_comp, so skip it 4019 */ 4020 if (PM_ISBC(dip)) 4021 continue; 4022 kmem_free(cp->pmc_comp.pmc_name, cp->pmc_comp.pmc_name_sz); 4023 kmem_free(cp->pmc_comp.pmc_lnames, nlevels * sizeof (char *)); 4024 kmem_free(cp->pmc_comp.pmc_lname_buf, 4025 cp->pmc_comp.pmc_lnames_sz); 4026 } 4027 kmem_free(DEVI(dip)->devi_pm_components, DEVI(dip)->devi_pm_comp_size); 4028 DEVI(dip)->devi_pm_components = NULL; 4029 DEVI(dip)->devi_pm_num_components = 0; 4030 DEVI(dip)->devi_pm_flags &= 4031 ~(PMC_COMPONENTS_DONE | PMC_COMPONENTS_FAILED); 4032 } 4033 4034 /* 4035 * Read the pm-components property (if there is one) and use it to set up 4036 * components. Returns a pointer to an array of component structures if 4037 * pm-components found and successfully parsed, else returns NULL. 4038 * Sets error return *errp to true to indicate a failure (as opposed to no 4039 * property being present). 4040 */ 4041 pm_comp_t * 4042 pm_autoconfig(dev_info_t *dip, int *errp) 4043 { 4044 PMD_FUNC(pmf, "autoconfig") 4045 uint_t nelems; 4046 char **pp; 4047 pm_comp_t *compp = NULL; 4048 int i, j, level, components = 0; 4049 size_t size = 0; 4050 struct pm_comp_pkg *p, *ptail; 4051 struct pm_comp_pkg *phead = NULL; 4052 int *lvals = NULL; 4053 int *lszs = NULL; 4054 int *np = NULL; 4055 int npi = 0; 4056 char **lnames = NULL; 4057 char *cp, *tp; 4058 pm_comp_t *ret = NULL; 4059 4060 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 4061 *errp = 0; /* assume success */ 4062 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 4063 (char *)pmcompstr, &pp, &nelems) != DDI_PROP_SUCCESS) { 4064 return (NULL); 4065 } 4066 4067 if (nelems < 3) { /* need at least one name and two levels */ 4068 goto errout; 4069 } 4070 4071 /* 4072 * pm_create_components is no longer allowed 4073 */ 4074 if (PM_NUMCMPTS(dip) != 0) { 4075 PMD(PMD_ERROR, ("%s: %s@%s(%s#%d) has %d comps\n", 4076 pmf, PM_DEVICE(dip), PM_NUMCMPTS(dip))) 4077 goto errout; 4078 } 4079 4080 lvals = kmem_alloc(nelems * sizeof (int), KM_SLEEP); 4081 lszs = kmem_alloc(nelems * sizeof (int), KM_SLEEP); 4082 lnames = kmem_alloc(nelems * sizeof (char *), KM_SLEEP); 4083 np = kmem_alloc(nelems * sizeof (int), KM_SLEEP); 4084 4085 level = 0; 4086 phead = NULL; 4087 for (i = 0; i < nelems; i++) { 4088 cp = pp[i]; 4089 if (!isdigit(*cp)) { /* must be name */ 4090 if (strncmp(cp, namestr, nameln) != 0) { 4091 goto errout; 4092 } 4093 if (i != 0) { 4094 if (level == 0) { /* no level spec'd */ 4095 PMD(PMD_ERROR, ("%s: no level spec'd\n", 4096 pmf)) 4097 goto errout; 4098 } 4099 np[npi++] = lvals[level - 1]; 4100 /* finish up previous component levels */ 4101 FINISH_COMP; 4102 } 4103 cp += nameln; 4104 if (!*cp) { 4105 PMD(PMD_ERROR, ("%s: nsa\n", pmf)) 4106 goto errout; 4107 } 4108 p = kmem_zalloc(sizeof (*phead), KM_SLEEP); 4109 if (phead == NULL) { 4110 phead = ptail = p; 4111 } else { 4112 ptail->next = p; 4113 ptail = p; 4114 } 4115 compp = p->comp = kmem_zalloc(sizeof (pm_comp_t), 4116 KM_SLEEP); 4117 compp->pmc_name_sz = strlen(cp) + 1; 4118 compp->pmc_name = kmem_zalloc(compp->pmc_name_sz, 4119 KM_SLEEP); 4120 (void) strncpy(compp->pmc_name, cp, compp->pmc_name_sz); 4121 components++; 4122 level = 0; 4123 } else { /* better be power level <num>=<name> */ 4124 #ifdef DEBUG 4125 tp = cp; 4126 #endif 4127 if (i == 0 || 4128 (cp = pm_parsenum(cp, &lvals[level])) == NULL) { 4129 PMD(PMD_ERROR, ("%s: parsenum(%s)\n", pmf, tp)) 4130 goto errout; 4131 } 4132 #ifdef DEBUG 4133 tp = cp; 4134 #endif 4135 if (*cp++ != '=' || !*cp) { 4136 PMD(PMD_ERROR, ("%s: ex =, got %s\n", pmf, tp)) 4137 goto errout; 4138 } 4139 4140 lszs[level] = strlen(cp) + 1; 4141 size += lszs[level]; 4142 lnames[level] = cp; /* points into prop string */ 4143 level++; 4144 } 4145 } 4146 np[npi++] = lvals[level - 1]; 4147 if (level == 0) { /* ended with a name */ 4148 PMD(PMD_ERROR, ("%s: ewn\n", pmf)) 4149 goto errout; 4150 } 4151 FINISH_COMP; 4152 4153 4154 /* 4155 * Now we have a list of components--we have to return instead an 4156 * array of them, but we can just copy the top level and leave 4157 * the rest as is 4158 */ 4159 (void) e_pm_create_components(dip, components); 4160 for (i = 0; i < components; i++) 4161 e_pm_set_max_power(dip, i, np[i]); 4162 4163 ret = kmem_zalloc(components * sizeof (pm_comp_t), KM_SLEEP); 4164 for (i = 0, p = phead; i < components; i++) { 4165 ASSERT(p); 4166 /* 4167 * Now sanity-check values: levels must be monotonically 4168 * increasing 4169 */ 4170 if (p->comp->pmc_numlevels < 2) { 4171 PMD(PMD_ERROR, ("%s: comp %s of %s@%s(%s#%d) only %d " 4172 "levels\n", pmf, 4173 p->comp->pmc_name, PM_DEVICE(dip), 4174 p->comp->pmc_numlevels)) 4175 goto errout; 4176 } 4177 for (j = 0; j < p->comp->pmc_numlevels; j++) { 4178 if ((p->comp->pmc_lvals[j] < 0) || ((j > 0) && 4179 (p->comp->pmc_lvals[j] <= 4180 p->comp->pmc_lvals[j - 1]))) { 4181 PMD(PMD_ERROR, ("%s: comp %s of %s@%s(%s#%d) " 4182 "not mono. incr, %d follows %d\n", pmf, 4183 p->comp->pmc_name, PM_DEVICE(dip), 4184 p->comp->pmc_lvals[j], 4185 p->comp->pmc_lvals[j - 1])) 4186 goto errout; 4187 } 4188 } 4189 ret[i] = *p->comp; /* struct assignment */ 4190 for (j = 0; j < i; j++) { 4191 /* 4192 * Test for unique component names 4193 */ 4194 if (strcmp(ret[j].pmc_name, ret[i].pmc_name) == 0) { 4195 PMD(PMD_ERROR, ("%s: %s of %s@%s(%s#%d) not " 4196 "unique\n", pmf, ret[j].pmc_name, 4197 PM_DEVICE(dip))) 4198 goto errout; 4199 } 4200 } 4201 ptail = p; 4202 p = p->next; 4203 phead = p; /* errout depends on phead making sense */ 4204 kmem_free(ptail->comp, sizeof (*ptail->comp)); 4205 kmem_free(ptail, sizeof (*ptail)); 4206 } 4207 out: 4208 ddi_prop_free(pp); 4209 if (lvals) 4210 kmem_free(lvals, nelems * sizeof (int)); 4211 if (lszs) 4212 kmem_free(lszs, nelems * sizeof (int)); 4213 if (lnames) 4214 kmem_free(lnames, nelems * sizeof (char *)); 4215 if (np) 4216 kmem_free(np, nelems * sizeof (int)); 4217 return (ret); 4218 4219 errout: 4220 e_pm_destroy_components(dip); 4221 *errp = 1; /* signal failure */ 4222 cmn_err(CE_CONT, "!pm: %s property ", pmcompstr); 4223 for (i = 0; i < nelems - 1; i++) 4224 cmn_err(CE_CONT, "!'%s', ", pp[i]); 4225 if (nelems != 0) 4226 cmn_err(CE_CONT, "!'%s'", pp[nelems - 1]); 4227 cmn_err(CE_CONT, "! for %s@%s(%s#%d) is ill-formed.\n", PM_DEVICE(dip)); 4228 for (p = phead; p; ) { 4229 pm_comp_t *pp; 4230 int n; 4231 4232 ptail = p; 4233 /* 4234 * Free component data structures 4235 */ 4236 pp = p->comp; 4237 n = pp->pmc_numlevels; 4238 if (pp->pmc_name_sz) { 4239 kmem_free(pp->pmc_name, pp->pmc_name_sz); 4240 } 4241 if (pp->pmc_lnames_sz) { 4242 kmem_free(pp->pmc_lname_buf, pp->pmc_lnames_sz); 4243 } 4244 if (pp->pmc_lnames) { 4245 kmem_free(pp->pmc_lnames, n * (sizeof (char *))); 4246 } 4247 if (pp->pmc_thresh) { 4248 kmem_free(pp->pmc_thresh, n * (sizeof (int))); 4249 } 4250 if (pp->pmc_lvals) { 4251 kmem_free(pp->pmc_lvals, n * (sizeof (int))); 4252 } 4253 p = ptail->next; 4254 kmem_free(ptail, sizeof (*ptail)); 4255 } 4256 if (ret != NULL) 4257 kmem_free(ret, components * sizeof (pm_comp_t)); 4258 ret = NULL; 4259 goto out; 4260 } 4261 4262 /* 4263 * Set threshold values for a devices components by dividing the target 4264 * threshold (base) by the number of transitions and assign each transition 4265 * that threshold. This will get the entire device down in the target time if 4266 * all components are idle and even if there are dependencies among components. 4267 * 4268 * Devices may well get powered all the way down before the target time, but 4269 * at least the EPA will be happy. 4270 */ 4271 void 4272 pm_set_device_threshold(dev_info_t *dip, int base, int flag) 4273 { 4274 PMD_FUNC(pmf, "set_device_threshold") 4275 int target_threshold = (base * 95) / 100; 4276 int level, comp; /* loop counters */ 4277 int transitions = 0; 4278 int ncomp = PM_NUMCMPTS(dip); 4279 int thresh; 4280 int remainder; 4281 pm_comp_t *pmc; 4282 int i; 4283 4284 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 4285 PM_LOCK_DIP(dip); 4286 /* 4287 * First we handle the easy one. If we're setting the default 4288 * threshold for a node with children, then we set it to the 4289 * default nexus threshold (currently 0) and mark it as default 4290 * nexus threshold instead 4291 */ 4292 if (PM_IS_NEXUS(dip)) { 4293 if (flag == PMC_DEF_THRESH) { 4294 PMD(PMD_THRESH, ("%s: [%s@%s(%s#%d) NEXDEF]\n", pmf, 4295 PM_DEVICE(dip))) 4296 thresh = pm_default_nexus_threshold; 4297 for (comp = 0; comp < ncomp; comp++) { 4298 pmc = &PM_CP(dip, comp)->pmc_comp; 4299 for (level = 1; level < pmc->pmc_numlevels; 4300 level++) { 4301 pmc->pmc_thresh[level] = thresh; 4302 } 4303 } 4304 DEVI(dip)->devi_pm_dev_thresh = 4305 pm_default_nexus_threshold; 4306 /* 4307 * If the nexus node is being reconfigured back to 4308 * the default threshold, adjust the notlowest count. 4309 */ 4310 if (DEVI(dip)->devi_pm_flags & 4311 (PMC_DEV_THRESH|PMC_COMP_THRESH)) { 4312 PM_LOCK_POWER(dip); 4313 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 4314 if (PM_CURPOWER(dip, i) == 0) 4315 continue; 4316 mutex_enter(&pm_compcnt_lock); 4317 ASSERT(pm_comps_notlowest); 4318 pm_comps_notlowest--; 4319 PMD(PMD_LEVEL, ("%s: %s@%s(%s#%d) decr " 4320 "notlowest to %d\n", pmf, 4321 PM_DEVICE(dip), pm_comps_notlowest)) 4322 if (pm_comps_notlowest == 0) 4323 pm_ppm_notify_all_lowest(dip, 4324 PM_ALL_LOWEST); 4325 mutex_exit(&pm_compcnt_lock); 4326 } 4327 PM_UNLOCK_POWER(dip); 4328 } 4329 DEVI(dip)->devi_pm_flags &= PMC_THRESH_NONE; 4330 DEVI(dip)->devi_pm_flags |= PMC_NEXDEF_THRESH; 4331 PM_UNLOCK_DIP(dip); 4332 return; 4333 } else if (DEVI(dip)->devi_pm_flags & PMC_NEXDEF_THRESH) { 4334 /* 4335 * If the nexus node is being configured for a 4336 * non-default threshold, include that node in 4337 * the notlowest accounting. 4338 */ 4339 PM_LOCK_POWER(dip); 4340 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 4341 if (PM_CURPOWER(dip, i) == 0) 4342 continue; 4343 mutex_enter(&pm_compcnt_lock); 4344 if (pm_comps_notlowest == 0) 4345 pm_ppm_notify_all_lowest(dip, 4346 PM_NOT_ALL_LOWEST); 4347 pm_comps_notlowest++; 4348 PMD(PMD_LEVEL, ("%s: %s@%s(%s#%d) incr " 4349 "notlowest to %d\n", pmf, 4350 PM_DEVICE(dip), pm_comps_notlowest)) 4351 mutex_exit(&pm_compcnt_lock); 4352 } 4353 PM_UNLOCK_POWER(dip); 4354 } 4355 } 4356 /* 4357 * Compute the total number of transitions for all components 4358 * of the device. Distribute the threshold evenly over them 4359 */ 4360 for (comp = 0; comp < ncomp; comp++) { 4361 pmc = &PM_CP(dip, comp)->pmc_comp; 4362 ASSERT(pmc->pmc_numlevels > 1); 4363 transitions += pmc->pmc_numlevels - 1; 4364 } 4365 ASSERT(transitions); 4366 thresh = target_threshold / transitions; 4367 4368 for (comp = 0; comp < ncomp; comp++) { 4369 pmc = &PM_CP(dip, comp)->pmc_comp; 4370 for (level = 1; level < pmc->pmc_numlevels; level++) { 4371 pmc->pmc_thresh[level] = thresh; 4372 } 4373 } 4374 4375 #ifdef DEBUG 4376 for (comp = 0; comp < ncomp; comp++) { 4377 pmc = &PM_CP(dip, comp)->pmc_comp; 4378 for (level = 1; level < pmc->pmc_numlevels; level++) { 4379 PMD(PMD_THRESH, ("%s: thresh before %s@%s(%s#%d) " 4380 "comp=%d, level=%d, %d\n", pmf, PM_DEVICE(dip), 4381 comp, level, pmc->pmc_thresh[level])) 4382 } 4383 } 4384 #endif 4385 /* 4386 * Distribute any remainder till they are all gone 4387 */ 4388 remainder = target_threshold - thresh * transitions; 4389 level = 1; 4390 #ifdef DEBUG 4391 PMD(PMD_THRESH, ("%s: remainder=%d target_threshold=%d thresh=%d " 4392 "trans=%d\n", pmf, remainder, target_threshold, thresh, 4393 transitions)) 4394 #endif 4395 while (remainder > 0) { 4396 comp = 0; 4397 while (remainder && (comp < ncomp)) { 4398 pmc = &PM_CP(dip, comp)->pmc_comp; 4399 if (level < pmc->pmc_numlevels) { 4400 pmc->pmc_thresh[level] += 1; 4401 remainder--; 4402 } 4403 comp++; 4404 } 4405 level++; 4406 } 4407 #ifdef DEBUG 4408 for (comp = 0; comp < ncomp; comp++) { 4409 pmc = &PM_CP(dip, comp)->pmc_comp; 4410 for (level = 1; level < pmc->pmc_numlevels; level++) { 4411 PMD(PMD_THRESH, ("%s: thresh after %s@%s(%s#%d) " 4412 "comp=%d level=%d, %d\n", pmf, PM_DEVICE(dip), 4413 comp, level, pmc->pmc_thresh[level])) 4414 } 4415 } 4416 #endif 4417 ASSERT(PM_IAM_LOCKING_DIP(dip)); 4418 DEVI(dip)->devi_pm_dev_thresh = base; 4419 DEVI(dip)->devi_pm_flags &= PMC_THRESH_NONE; 4420 DEVI(dip)->devi_pm_flags |= flag; 4421 PM_UNLOCK_DIP(dip); 4422 } 4423 4424 /* 4425 * Called when there is no old-style platform power management driver 4426 */ 4427 static int 4428 ddi_no_platform_power(power_req_t *req) 4429 { 4430 _NOTE(ARGUNUSED(req)) 4431 return (DDI_FAILURE); 4432 } 4433 4434 /* 4435 * This function calls the entry point supplied by the platform-specific 4436 * pm driver to bring the device component 'pm_cmpt' to power level 'pm_level'. 4437 * The use of global for getting the function name from platform-specific 4438 * pm driver is not ideal, but it is simple and efficient. 4439 * The previous property lookup was being done in the idle loop on swift 4440 * systems without pmc chips and hurt deskbench performance as well as 4441 * violating scheduler locking rules 4442 */ 4443 int (*pm_platform_power)(power_req_t *) = ddi_no_platform_power; 4444 4445 /* 4446 * Old obsolete interface for a device to request a power change (but only 4447 * an increase in power) 4448 */ 4449 int 4450 ddi_dev_is_needed(dev_info_t *dip, int cmpt, int level) 4451 { 4452 return (pm_raise_power(dip, cmpt, level)); 4453 } 4454 4455 /* 4456 * The old obsolete interface to platform power management. Only used by 4457 * Gypsy platform and APM on X86. 4458 */ 4459 int 4460 ddi_power(dev_info_t *dip, int pm_cmpt, int pm_level) 4461 { 4462 power_req_t request; 4463 4464 request.request_type = PMR_SET_POWER; 4465 request.req.set_power_req.who = dip; 4466 request.req.set_power_req.cmpt = pm_cmpt; 4467 request.req.set_power_req.level = pm_level; 4468 return (ddi_ctlops(dip, dip, DDI_CTLOPS_POWER, &request, NULL)); 4469 } 4470 4471 /* 4472 * A driver can invoke this from its detach routine when DDI_SUSPEND is 4473 * passed. Returns true if subsequent processing could result in power being 4474 * removed from the device. The arg is not currently used because it is 4475 * implicit in the operation of cpr/DR. 4476 */ 4477 int 4478 ddi_removing_power(dev_info_t *dip) 4479 { 4480 _NOTE(ARGUNUSED(dip)) 4481 return (pm_powering_down); 4482 } 4483 4484 /* 4485 * Returns true if a device indicates that its parent handles suspend/resume 4486 * processing for it. 4487 */ 4488 int 4489 e_ddi_parental_suspend_resume(dev_info_t *dip) 4490 { 4491 return (DEVI(dip)->devi_pm_flags & PMC_PARENTAL_SR); 4492 } 4493 4494 /* 4495 * Called for devices which indicate that their parent does suspend/resume 4496 * handling for them 4497 */ 4498 int 4499 e_ddi_suspend(dev_info_t *dip, ddi_detach_cmd_t cmd) 4500 { 4501 power_req_t request; 4502 request.request_type = PMR_SUSPEND; 4503 request.req.suspend_req.who = dip; 4504 request.req.suspend_req.cmd = cmd; 4505 return (ddi_ctlops(dip, dip, DDI_CTLOPS_POWER, &request, NULL)); 4506 } 4507 4508 /* 4509 * Called for devices which indicate that their parent does suspend/resume 4510 * handling for them 4511 */ 4512 int 4513 e_ddi_resume(dev_info_t *dip, ddi_attach_cmd_t cmd) 4514 { 4515 power_req_t request; 4516 request.request_type = PMR_RESUME; 4517 request.req.resume_req.who = dip; 4518 request.req.resume_req.cmd = cmd; 4519 return (ddi_ctlops(dip, dip, DDI_CTLOPS_POWER, &request, NULL)); 4520 } 4521 4522 /* 4523 * Old obsolete exported interface for drivers to create components. 4524 * This is now handled by exporting the pm-components property. 4525 */ 4526 int 4527 pm_create_components(dev_info_t *dip, int num_components) 4528 { 4529 PMD_FUNC(pmf, "pm_create_components") 4530 4531 if (num_components < 1) 4532 return (DDI_FAILURE); 4533 4534 if (!DEVI_IS_ATTACHING(dip)) { 4535 return (DDI_FAILURE); 4536 } 4537 4538 /* don't need to lock dip because attach is single threaded */ 4539 if (DEVI(dip)->devi_pm_components) { 4540 PMD(PMD_ERROR, ("%s: %s@%s(%s#%d) already has %d\n", pmf, 4541 PM_DEVICE(dip), PM_NUMCMPTS(dip))) 4542 return (DDI_FAILURE); 4543 } 4544 e_pm_create_components(dip, num_components); 4545 DEVI(dip)->devi_pm_flags |= PMC_BC; 4546 e_pm_default_components(dip, num_components); 4547 return (DDI_SUCCESS); 4548 } 4549 4550 /* 4551 * Obsolete interface previously called by drivers to destroy their components 4552 * at detach time. This is now done automatically. However, we need to keep 4553 * this for the old drivers. 4554 */ 4555 void 4556 pm_destroy_components(dev_info_t *dip) 4557 { 4558 PMD_FUNC(pmf, "pm_destroy_components") 4559 dev_info_t *pdip = ddi_get_parent(dip); 4560 4561 PMD(PMD_REMDEV | PMD_KIDSUP, ("%s: %s@%s(%s#%d)\n", pmf, 4562 PM_DEVICE(dip))) 4563 ASSERT(DEVI_IS_DETACHING(dip)); 4564 #ifdef DEBUG 4565 if (!PM_ISBC(dip)) 4566 cmn_err(CE_WARN, "!driver exporting pm-components property " 4567 "(%s@%s) calls pm_destroy_components", PM_NAME(dip), 4568 PM_ADDR(dip)); 4569 #endif 4570 /* 4571 * We ignore this unless this is an old-style driver, except for 4572 * printing the message above 4573 */ 4574 if (PM_NUMCMPTS(dip) == 0 || !PM_ISBC(dip)) { 4575 PMD(PMD_REMDEV, ("%s: ignore %s@%s(%s#%d)\n", pmf, 4576 PM_DEVICE(dip))) 4577 return; 4578 } 4579 ASSERT(PM_GET_PM_INFO(dip)); 4580 4581 /* 4582 * pm_unmanage will clear info pointer later, after dealing with 4583 * dependencies 4584 */ 4585 ASSERT(!PM_GET_PM_SCAN(dip)); /* better be gone already */ 4586 /* 4587 * Now adjust parent's kidsupcnt. We check only comp 0. 4588 * Parents that get notification are not adjusted because their 4589 * kidsupcnt is always 0 (or 1 during probe and attach). 4590 */ 4591 if ((PM_CURPOWER(dip, 0) != 0) && pdip && !PM_WANTS_NOTIFICATION(pdip)) 4592 pm_rele_power(pdip); 4593 #ifdef DEBUG 4594 else { 4595 PMD(PMD_KIDSUP, ("%s: kuc stays %s@%s(%s#%d) comps gone\n", 4596 pmf, PM_DEVICE(dip))) 4597 } 4598 #endif 4599 e_pm_destroy_components(dip); 4600 /* 4601 * Forget we ever knew anything about the components of this device 4602 */ 4603 DEVI(dip)->devi_pm_flags &= 4604 ~(PMC_BC | PMC_COMPONENTS_DONE | PMC_COMPONENTS_FAILED); 4605 } 4606 4607 /* 4608 * Exported interface for a driver to set a component busy. 4609 */ 4610 int 4611 pm_busy_component(dev_info_t *dip, int cmpt) 4612 { 4613 struct pm_component *cp; 4614 4615 ASSERT(dip != NULL); 4616 if (!e_pm_valid_info(dip, NULL) || !e_pm_valid_comp(dip, cmpt, &cp)) 4617 return (DDI_FAILURE); 4618 PM_LOCK_BUSY(dip); 4619 cp->pmc_busycount++; 4620 cp->pmc_timestamp = 0; 4621 PM_UNLOCK_BUSY(dip); 4622 return (DDI_SUCCESS); 4623 } 4624 4625 /* 4626 * Exported interface for a driver to set a component idle. 4627 */ 4628 int 4629 pm_idle_component(dev_info_t *dip, int cmpt) 4630 { 4631 PMD_FUNC(pmf, "pm_idle_component") 4632 struct pm_component *cp; 4633 pm_scan_t *scanp = PM_GET_PM_SCAN(dip); 4634 4635 if (!e_pm_valid_info(dip, NULL) || !e_pm_valid_comp(dip, cmpt, &cp)) 4636 return (DDI_FAILURE); 4637 4638 PM_LOCK_BUSY(dip); 4639 if (cp->pmc_busycount) { 4640 if (--(cp->pmc_busycount) == 0) 4641 cp->pmc_timestamp = gethrestime_sec(); 4642 } else { 4643 cp->pmc_timestamp = gethrestime_sec(); 4644 } 4645 4646 PM_UNLOCK_BUSY(dip); 4647 4648 /* 4649 * if device becomes idle during idle down period, try scan it down 4650 */ 4651 if (scanp && PM_IS_PID(dip)) { 4652 PMD(PMD_IDLEDOWN, ("%s: %s@%s(%s#%d) idle.\n", pmf, 4653 PM_DEVICE(dip))) 4654 pm_rescan(dip); 4655 return (DDI_SUCCESS); 4656 } 4657 4658 /* 4659 * handle scan not running with nexus threshold == 0 4660 */ 4661 4662 if (PM_IS_NEXUS(dip) && (cp->pmc_busycount == 0)) { 4663 pm_rescan(dip); 4664 } 4665 4666 return (DDI_SUCCESS); 4667 } 4668 4669 /* 4670 * This is the old obsolete interface called by drivers to set their normal 4671 * power. Thus we can't fix its behavior or return a value. 4672 * This functionality is replaced by the pm-component property. 4673 * We'll only get components destroyed while no power management is 4674 * going on (and the device is detached), so we don't need a mutex here 4675 */ 4676 void 4677 pm_set_normal_power(dev_info_t *dip, int comp, int level) 4678 { 4679 PMD_FUNC(pmf, "set_normal_power") 4680 #ifdef DEBUG 4681 if (!PM_ISBC(dip)) 4682 cmn_err(CE_WARN, "!call to pm_set_normal_power() by %s@%s " 4683 "(driver exporting pm-components property) ignored", 4684 PM_NAME(dip), PM_ADDR(dip)); 4685 #endif 4686 if (PM_ISBC(dip)) { 4687 PMD(PMD_NORM, ("%s: %s@%s(%s#%d) set normal power comp=%d, " 4688 "level=%d\n", pmf, PM_DEVICE(dip), comp, level)) 4689 e_pm_set_max_power(dip, comp, level); 4690 e_pm_default_levels(dip, PM_CP(dip, comp), level); 4691 } 4692 } 4693 4694 /* 4695 * Called on a successfully detached driver to free pm resources 4696 */ 4697 static void 4698 pm_stop(dev_info_t *dip) 4699 { 4700 PMD_FUNC(pmf, "stop") 4701 dev_info_t *pdip = ddi_get_parent(dip); 4702 4703 ASSERT(!PM_IAM_LOCKING_DIP(dip)); 4704 /* stopping scan, destroy scan data structure */ 4705 if (!PM_ISBC(dip)) { 4706 pm_scan_stop(dip); 4707 pm_scan_fini(dip); 4708 } 4709 4710 if (PM_GET_PM_INFO(dip) != NULL) { 4711 if (pm_unmanage(dip) == DDI_SUCCESS) { 4712 /* 4713 * Old style driver may have called 4714 * pm_destroy_components already, but just in case ... 4715 */ 4716 e_pm_destroy_components(dip); 4717 } else { 4718 PMD(PMD_FAIL, ("%s: can't pm_unmanage %s@%s(%s#%d)\n", 4719 pmf, PM_DEVICE(dip))) 4720 } 4721 } else { 4722 if (PM_NUMCMPTS(dip)) 4723 e_pm_destroy_components(dip); 4724 else { 4725 if (DEVI(dip)->devi_pm_flags & PMC_NOPMKID) { 4726 DEVI(dip)->devi_pm_flags &= ~PMC_NOPMKID; 4727 if (pdip && !PM_WANTS_NOTIFICATION(pdip)) { 4728 pm_rele_power(pdip); 4729 } else if (pdip && 4730 MDI_VHCI(pdip) && MDI_CLIENT(dip)) { 4731 (void) mdi_power(pdip, 4732 MDI_PM_RELE_POWER, 4733 (void *)dip, NULL, 0); 4734 } 4735 } 4736 } 4737 } 4738 } 4739 4740 /* 4741 * The node is the subject of a reparse pm props ioctl. Throw away the old 4742 * info and start over. 4743 */ 4744 int 4745 e_new_pm_props(dev_info_t *dip) 4746 { 4747 if (PM_GET_PM_INFO(dip) != NULL) { 4748 pm_stop(dip); 4749 4750 if (e_pm_manage(dip, PM_STYLE_NEW) != DDI_SUCCESS) { 4751 return (DDI_FAILURE); 4752 } 4753 } 4754 e_pm_props(dip); 4755 return (DDI_SUCCESS); 4756 } 4757 4758 /* 4759 * Device has been attached, so process its pm properties 4760 */ 4761 void 4762 e_pm_props(dev_info_t *dip) 4763 { 4764 char *pp; 4765 int len; 4766 int flags = 0; 4767 int propflag = DDI_PROP_DONTPASS|DDI_PROP_CANSLEEP; 4768 4769 /* 4770 * It doesn't matter if we do this more than once, we should always 4771 * get the same answers, and if not, then the last one in is the 4772 * best one. 4773 */ 4774 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, propflag, "pm-hardware-state", 4775 (caddr_t)&pp, &len) == DDI_PROP_SUCCESS) { 4776 if (strcmp(pp, "needs-suspend-resume") == 0) { 4777 flags = PMC_NEEDS_SR; 4778 } else if (strcmp(pp, "no-suspend-resume") == 0) { 4779 flags = PMC_NO_SR; 4780 } else if (strcmp(pp, "parental-suspend-resume") == 0) { 4781 flags = PMC_PARENTAL_SR; 4782 } else { 4783 cmn_err(CE_NOTE, "!device %s@%s has unrecognized " 4784 "%s property value '%s'", PM_NAME(dip), 4785 PM_ADDR(dip), "pm-hardware-state", pp); 4786 } 4787 kmem_free(pp, len); 4788 } 4789 /* 4790 * This next segment (PMC_WANTS_NOTIFY) is in 4791 * support of nexus drivers which will want to be involved in 4792 * (or at least notified of) their child node's power level transitions. 4793 * "pm-want-child-notification?" is defined by the parent. 4794 */ 4795 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, propflag, 4796 "pm-want-child-notification?") && PM_HAS_BUS_POWER(dip)) 4797 flags |= PMC_WANTS_NOTIFY; 4798 ASSERT(PM_HAS_BUS_POWER(dip) || !ddi_prop_exists(DDI_DEV_T_ANY, 4799 dip, propflag, "pm-want-child-notification?")); 4800 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, propflag, 4801 "no-involuntary-power-cycles")) 4802 flags |= PMC_NO_INVOL; 4803 /* 4804 * Is the device a CPU device? 4805 */ 4806 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, propflag, "pm-class", 4807 (caddr_t)&pp, &len) == DDI_PROP_SUCCESS) { 4808 if (strcmp(pp, "CPU") == 0) { 4809 flags |= PMC_CPU_DEVICE; 4810 } else { 4811 cmn_err(CE_NOTE, "!device %s@%s has unrecognized " 4812 "%s property value '%s'", PM_NAME(dip), 4813 PM_ADDR(dip), "pm-class", pp); 4814 } 4815 kmem_free(pp, len); 4816 } 4817 /* devfs single threads us */ 4818 DEVI(dip)->devi_pm_flags |= flags; 4819 } 4820 4821 /* 4822 * This is the DDI_CTLOPS_POWER handler that is used when there is no ppm 4823 * driver which has claimed a node. 4824 * Sets old_power in arg struct. 4825 */ 4826 static int 4827 pm_default_ctlops(dev_info_t *dip, dev_info_t *rdip, 4828 ddi_ctl_enum_t ctlop, void *arg, void *result) 4829 { 4830 _NOTE(ARGUNUSED(dip)) 4831 PMD_FUNC(pmf, "ctlops") 4832 power_req_t *reqp = (power_req_t *)arg; 4833 int retval; 4834 dev_info_t *target_dip; 4835 int new_level, old_level, cmpt; 4836 #ifdef PMDDEBUG 4837 char *format; 4838 #endif 4839 4840 /* 4841 * The interface for doing the actual power level changes is now 4842 * through the DDI_CTLOPS_POWER bus_ctl, so that we can plug in 4843 * different platform-specific power control drivers. 4844 * 4845 * This driver implements the "default" version of this interface. 4846 * If no ppm driver has been installed then this interface is called 4847 * instead. 4848 */ 4849 ASSERT(dip == NULL); 4850 switch (ctlop) { 4851 case DDI_CTLOPS_POWER: 4852 switch (reqp->request_type) { 4853 case PMR_PPM_SET_POWER: 4854 { 4855 target_dip = reqp->req.ppm_set_power_req.who; 4856 ASSERT(target_dip == rdip); 4857 new_level = reqp->req.ppm_set_power_req.new_level; 4858 cmpt = reqp->req.ppm_set_power_req.cmpt; 4859 /* pass back old power for the PM_LEVEL_UNKNOWN case */ 4860 old_level = PM_CURPOWER(target_dip, cmpt); 4861 reqp->req.ppm_set_power_req.old_level = old_level; 4862 retval = pm_power(target_dip, cmpt, new_level); 4863 PMD(PMD_PPM, ("%s: PPM_SET_POWER %s@%s(%s#%d)[%d] %d->" 4864 "%d %s\n", pmf, PM_DEVICE(target_dip), cmpt, 4865 old_level, new_level, (retval == DDI_SUCCESS ? 4866 "chd" : "no chg"))) 4867 return (retval); 4868 } 4869 4870 case PMR_PPM_PRE_DETACH: 4871 case PMR_PPM_POST_DETACH: 4872 case PMR_PPM_PRE_ATTACH: 4873 case PMR_PPM_POST_ATTACH: 4874 case PMR_PPM_PRE_PROBE: 4875 case PMR_PPM_POST_PROBE: 4876 case PMR_PPM_PRE_RESUME: 4877 case PMR_PPM_INIT_CHILD: 4878 case PMR_PPM_UNINIT_CHILD: 4879 #ifdef PMDDEBUG 4880 switch (reqp->request_type) { 4881 case PMR_PPM_PRE_DETACH: 4882 format = "%s: PMR_PPM_PRE_DETACH " 4883 "%s@%s(%s#%d)\n"; 4884 break; 4885 case PMR_PPM_POST_DETACH: 4886 format = "%s: PMR_PPM_POST_DETACH " 4887 "%s@%s(%s#%d) rets %d\n"; 4888 break; 4889 case PMR_PPM_PRE_ATTACH: 4890 format = "%s: PMR_PPM_PRE_ATTACH " 4891 "%s@%s(%s#%d)\n"; 4892 break; 4893 case PMR_PPM_POST_ATTACH: 4894 format = "%s: PMR_PPM_POST_ATTACH " 4895 "%s@%s(%s#%d) rets %d\n"; 4896 break; 4897 case PMR_PPM_PRE_PROBE: 4898 format = "%s: PMR_PPM_PRE_PROBE " 4899 "%s@%s(%s#%d)\n"; 4900 break; 4901 case PMR_PPM_POST_PROBE: 4902 format = "%s: PMR_PPM_POST_PROBE " 4903 "%s@%s(%s#%d) rets %d\n"; 4904 break; 4905 case PMR_PPM_PRE_RESUME: 4906 format = "%s: PMR_PPM_PRE_RESUME " 4907 "%s@%s(%s#%d) rets %d\n"; 4908 break; 4909 case PMR_PPM_INIT_CHILD: 4910 format = "%s: PMR_PPM_INIT_CHILD " 4911 "%s@%s(%s#%d)\n"; 4912 break; 4913 case PMR_PPM_UNINIT_CHILD: 4914 format = "%s: PMR_PPM_UNINIT_CHILD " 4915 "%s@%s(%s#%d)\n"; 4916 break; 4917 default: 4918 break; 4919 } 4920 PMD(PMD_PPM, (format, pmf, PM_DEVICE(rdip), 4921 reqp->req.ppm_config_req.result)) 4922 #endif 4923 return (DDI_SUCCESS); 4924 4925 case PMR_PPM_POWER_CHANGE_NOTIFY: 4926 /* 4927 * Nothing for us to do 4928 */ 4929 ASSERT(reqp->req.ppm_notify_level_req.who == rdip); 4930 PMD(PMD_PPM, ("%s: PMR_PPM_POWER_CHANGE_NOTIFY " 4931 "%s@%s(%s#%d)[%d] %d->%d\n", pmf, 4932 PM_DEVICE(reqp->req.ppm_notify_level_req.who), 4933 reqp->req.ppm_notify_level_req.cmpt, 4934 PM_CURPOWER(reqp->req.ppm_notify_level_req.who, 4935 reqp->req.ppm_notify_level_req.cmpt), 4936 reqp->req.ppm_notify_level_req.new_level)) 4937 return (DDI_SUCCESS); 4938 4939 case PMR_PPM_UNMANAGE: 4940 PMD(PMD_PPM, ("%s: PMR_PPM_UNMANAGE %s@%s(%s#%d)\n", 4941 pmf, PM_DEVICE(rdip))) 4942 return (DDI_SUCCESS); 4943 4944 case PMR_PPM_LOCK_POWER: 4945 pm_lock_power_single(reqp->req.ppm_lock_power_req.who); 4946 return (DDI_SUCCESS); 4947 4948 case PMR_PPM_UNLOCK_POWER: 4949 pm_unlock_power_single( 4950 reqp->req.ppm_unlock_power_req.who); 4951 return (DDI_SUCCESS); 4952 4953 case PMR_PPM_TRY_LOCK_POWER: 4954 *(int *)result = pm_try_locking_power_single( 4955 reqp->req.ppm_lock_power_req.who); 4956 return (DDI_SUCCESS); 4957 4958 case PMR_PPM_POWER_LOCK_OWNER: 4959 target_dip = reqp->req.ppm_power_lock_owner_req.who; 4960 ASSERT(target_dip == rdip); 4961 reqp->req.ppm_power_lock_owner_req.owner = 4962 DEVI(rdip)->devi_busy_thread; 4963 return (DDI_SUCCESS); 4964 default: 4965 PMD(PMD_ERROR, ("%s: default!\n", pmf)) 4966 return (DDI_FAILURE); 4967 } 4968 4969 default: 4970 PMD(PMD_ERROR, ("%s: unknown\n", pmf)) 4971 return (DDI_FAILURE); 4972 } 4973 } 4974 4975 /* 4976 * We overload the bus_ctl ops here--perhaps we ought to have a distinct 4977 * power_ops struct for this functionality instead? 4978 * However, we only ever do this on a ppm driver. 4979 */ 4980 int 4981 pm_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v) 4982 { 4983 int (*fp)(); 4984 4985 /* if no ppm handler, call the default routine */ 4986 if (d == NULL) { 4987 return (pm_default_ctlops(d, r, op, a, v)); 4988 } 4989 if (!d || !r) 4990 return (DDI_FAILURE); 4991 ASSERT(DEVI(d)->devi_ops && DEVI(d)->devi_ops->devo_bus_ops && 4992 DEVI(d)->devi_ops->devo_bus_ops->bus_ctl); 4993 4994 fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl; 4995 return ((*fp)(d, r, op, a, v)); 4996 } 4997 4998 /* 4999 * Called on a node when attach completes or the driver makes its first pm 5000 * call (whichever comes first). 5001 * In the attach case, device may not be power manageable at all. 5002 * Don't need to lock the dip because we're single threaded by the devfs code 5003 */ 5004 static int 5005 pm_start(dev_info_t *dip) 5006 { 5007 PMD_FUNC(pmf, "start") 5008 int ret; 5009 dev_info_t *pdip = ddi_get_parent(dip); 5010 int e_pm_manage(dev_info_t *, int); 5011 void pm_noinvol_specd(dev_info_t *dip); 5012 5013 e_pm_props(dip); 5014 pm_noinvol_specd(dip); 5015 /* 5016 * If this dip has already been processed, don't mess with it 5017 * (but decrement the speculative count we did above, as whatever 5018 * code put it under pm already will have dealt with it) 5019 */ 5020 if (PM_GET_PM_INFO(dip)) { 5021 PMD(PMD_KIDSUP, ("%s: pm already done for %s@%s(%s#%d)\n", 5022 pmf, PM_DEVICE(dip))) 5023 return (0); 5024 } 5025 ret = e_pm_manage(dip, PM_STYLE_UNKNOWN); 5026 5027 if (PM_GET_PM_INFO(dip) == NULL) { 5028 /* 5029 * keep the kidsupcount increment as is 5030 */ 5031 DEVI(dip)->devi_pm_flags |= PMC_NOPMKID; 5032 if (pdip && !PM_WANTS_NOTIFICATION(pdip)) { 5033 pm_hold_power(pdip); 5034 } else if (pdip && MDI_VHCI(pdip) && MDI_CLIENT(dip)) { 5035 (void) mdi_power(pdip, MDI_PM_HOLD_POWER, 5036 (void *)dip, NULL, 0); 5037 } 5038 5039 PMD(PMD_KIDSUP, ("%s: pm of %s@%s(%s#%d) failed, parent " 5040 "left up\n", pmf, PM_DEVICE(dip))) 5041 } 5042 5043 return (ret); 5044 } 5045 5046 /* 5047 * Keep a list of recorded thresholds. For now we just keep a list and 5048 * search it linearly. We don't expect too many entries. Can always hash it 5049 * later if we need to. 5050 */ 5051 void 5052 pm_record_thresh(pm_thresh_rec_t *rp) 5053 { 5054 pm_thresh_rec_t *pptr, *ptr; 5055 5056 ASSERT(*rp->ptr_physpath); 5057 rw_enter(&pm_thresh_rwlock, RW_WRITER); 5058 for (pptr = NULL, ptr = pm_thresh_head; 5059 ptr; pptr = ptr, ptr = ptr->ptr_next) { 5060 if (strcmp(rp->ptr_physpath, ptr->ptr_physpath) == 0) { 5061 /* replace this one */ 5062 rp->ptr_next = ptr->ptr_next; 5063 if (pptr) { 5064 pptr->ptr_next = rp; 5065 } else { 5066 pm_thresh_head = rp; 5067 } 5068 rw_exit(&pm_thresh_rwlock); 5069 kmem_free(ptr, ptr->ptr_size); 5070 return; 5071 } 5072 continue; 5073 } 5074 /* 5075 * There was not a match in the list, insert this one in front 5076 */ 5077 if (pm_thresh_head) { 5078 rp->ptr_next = pm_thresh_head; 5079 pm_thresh_head = rp; 5080 } else { 5081 rp->ptr_next = NULL; 5082 pm_thresh_head = rp; 5083 } 5084 rw_exit(&pm_thresh_rwlock); 5085 } 5086 5087 /* 5088 * Create a new dependency record and hang a new dependency entry off of it 5089 */ 5090 pm_pdr_t * 5091 newpdr(char *kept, char *keeps, int isprop) 5092 { 5093 size_t size = strlen(kept) + strlen(keeps) + 2 + sizeof (pm_pdr_t); 5094 pm_pdr_t *p = kmem_zalloc(size, KM_SLEEP); 5095 p->pdr_size = size; 5096 p->pdr_isprop = isprop; 5097 p->pdr_kept_paths = NULL; 5098 p->pdr_kept_count = 0; 5099 p->pdr_kept = (char *)((intptr_t)p + sizeof (pm_pdr_t)); 5100 (void) strcpy(p->pdr_kept, kept); 5101 p->pdr_keeper = (char *)((intptr_t)p->pdr_kept + strlen(kept) + 1); 5102 (void) strcpy(p->pdr_keeper, keeps); 5103 ASSERT((intptr_t)p->pdr_keeper + strlen(p->pdr_keeper) + 1 <= 5104 (intptr_t)p + size); 5105 ASSERT((intptr_t)p->pdr_kept + strlen(p->pdr_kept) + 1 <= 5106 (intptr_t)p + size); 5107 return (p); 5108 } 5109 5110 /* 5111 * Keep a list of recorded dependencies. We only keep the 5112 * keeper -> kept list for simplification. At this point We do not 5113 * care about whether the devices are attached or not yet, 5114 * this would be done in pm_keeper() and pm_kept(). 5115 * If a PM_RESET_PM happens, then we tear down and forget the dependencies, 5116 * and it is up to the user to issue the ioctl again if they want it 5117 * (e.g. pmconfig) 5118 * Returns true if dependency already exists in the list. 5119 */ 5120 int 5121 pm_record_keeper(char *kept, char *keeper, int isprop) 5122 { 5123 PMD_FUNC(pmf, "record_keeper") 5124 pm_pdr_t *npdr, *ppdr, *pdr; 5125 5126 PMD(PMD_KEEPS, ("%s: %s, %s\n", pmf, kept, keeper)) 5127 ASSERT(kept && keeper); 5128 #ifdef DEBUG 5129 if (pm_debug & PMD_KEEPS) 5130 prdeps("pm_record_keeper entry"); 5131 #endif 5132 for (ppdr = NULL, pdr = pm_dep_head; pdr; 5133 ppdr = pdr, pdr = pdr->pdr_next) { 5134 PMD(PMD_KEEPS, ("%s: check %s, %s\n", pmf, pdr->pdr_kept, 5135 pdr->pdr_keeper)) 5136 if (strcmp(kept, pdr->pdr_kept) == 0 && 5137 strcmp(keeper, pdr->pdr_keeper) == 0) { 5138 PMD(PMD_KEEPS, ("%s: match\n", pmf)) 5139 return (1); 5140 } 5141 } 5142 /* 5143 * We did not find any match, so we have to make an entry 5144 */ 5145 npdr = newpdr(kept, keeper, isprop); 5146 if (ppdr) { 5147 ASSERT(ppdr->pdr_next == NULL); 5148 ppdr->pdr_next = npdr; 5149 } else { 5150 ASSERT(pm_dep_head == NULL); 5151 pm_dep_head = npdr; 5152 } 5153 #ifdef DEBUG 5154 if (pm_debug & PMD_KEEPS) 5155 prdeps("pm_record_keeper after new record"); 5156 #endif 5157 if (!isprop) 5158 pm_unresolved_deps++; 5159 else 5160 pm_prop_deps++; 5161 return (0); 5162 } 5163 5164 /* 5165 * Look up this device in the set of devices we've seen ioctls for 5166 * to see if we are holding a threshold spec for it. If so, make it so. 5167 * At ioctl time, we were given the physical path of the device. 5168 */ 5169 int 5170 pm_thresh_specd(dev_info_t *dip) 5171 { 5172 void pm_apply_recorded_thresh(dev_info_t *, pm_thresh_rec_t *); 5173 char *path = 0; 5174 char pathbuf[MAXNAMELEN]; 5175 pm_thresh_rec_t *rp; 5176 5177 path = ddi_pathname(dip, pathbuf); 5178 5179 rw_enter(&pm_thresh_rwlock, RW_READER); 5180 for (rp = pm_thresh_head; rp; rp = rp->ptr_next) { 5181 if (strcmp(rp->ptr_physpath, path) != 0) 5182 continue; 5183 pm_apply_recorded_thresh(dip, rp); 5184 rw_exit(&pm_thresh_rwlock); 5185 return (1); 5186 } 5187 rw_exit(&pm_thresh_rwlock); 5188 return (0); 5189 } 5190 5191 static int 5192 pm_set_keeping(dev_info_t *keeper, dev_info_t *kept) 5193 { 5194 PMD_FUNC(pmf, "set_keeping") 5195 int j, up = 0; 5196 void prdeps(char *); 5197 5198 PMD(PMD_KEEPS, ("%s: keeper=%s@%s(%s#%d), kept=%s@%s(%s#%d)\n", pmf, 5199 PM_DEVICE(keeper), PM_DEVICE(kept))) 5200 #ifdef DEBUG 5201 if (pm_debug & PMD_KEEPS) 5202 prdeps("Before PAD\n"); 5203 #endif 5204 ASSERT(keeper != kept); 5205 if (PM_GET_PM_INFO(keeper) == NULL) { 5206 cmn_err(CE_CONT, "!device %s@%s(%s#%d) keeps up device " 5207 "%s@%s(%s#%d), but the former is not power managed", 5208 PM_DEVICE(keeper), PM_DEVICE(kept)); 5209 PMD((PMD_FAIL | PMD_KEEPS), ("%s: keeper %s@%s(%s#%d) is not" 5210 "power managed\n", pmf, PM_DEVICE(keeper))) 5211 return (0); 5212 } 5213 if (PM_GET_PM_INFO(kept) == NULL) { 5214 cmn_err(CE_CONT, "!device %s@%s(%s#%d) keeps up device " 5215 "%s@%s(%s#%d), but the latter is not power managed", 5216 PM_DEVICE(keeper), PM_DEVICE(kept)); 5217 PMD((PMD_FAIL | PMD_KEEPS), ("%s: kept %s@%s(%s#%d) is not" 5218 "power managed\n", pmf, PM_DEVICE(kept))) 5219 return (0); 5220 } 5221 5222 PM_LOCK_POWER(keeper); 5223 for (j = 0; j < PM_NUMCMPTS(keeper); j++) { 5224 if (PM_CURPOWER(keeper, j)) { 5225 up++; 5226 break; 5227 } 5228 } 5229 if (up) { 5230 /* Bringup and maintain a hold on the kept */ 5231 PMD(PMD_KEEPS, ("%s: place a hold on kept %s@%s(%s#%d)\n", pmf, 5232 PM_DEVICE(kept))) 5233 bring_pmdep_up(kept, 1); 5234 } 5235 PM_UNLOCK_POWER(keeper); 5236 #ifdef DEBUG 5237 if (pm_debug & PMD_KEEPS) 5238 prdeps("After PAD\n"); 5239 #endif 5240 return (1); 5241 } 5242 5243 /* 5244 * Should this device keep up another device? 5245 * Look up this device in the set of devices we've seen ioctls for 5246 * to see if we are holding a dependency spec for it. If so, make it so. 5247 * Because we require the kept device to be attached already in order to 5248 * make the list entry (and hold it), we only need to look for keepers. 5249 * At ioctl time, we were given the physical path of the device. 5250 */ 5251 int 5252 pm_keeper(char *keeper) 5253 { 5254 PMD_FUNC(pmf, "keeper") 5255 int pm_apply_recorded_dep(dev_info_t *, pm_pdr_t *); 5256 dev_info_t *dip; 5257 pm_pdr_t *dp; 5258 dev_info_t *kept = NULL; 5259 int ret = 0; 5260 int i; 5261 5262 if (!pm_unresolved_deps && !pm_prop_deps) 5263 return (0); 5264 ASSERT(keeper != NULL); 5265 dip = pm_name_to_dip(keeper, 1); 5266 if (dip == NULL) 5267 return (0); 5268 PMD(PMD_KEEPS, ("%s: keeper=%s\n", pmf, keeper)) 5269 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 5270 if (!dp->pdr_isprop) { 5271 if (!pm_unresolved_deps) 5272 continue; 5273 PMD(PMD_KEEPS, ("%s: keeper %s\n", pmf, dp->pdr_keeper)) 5274 if (dp->pdr_satisfied) { 5275 PMD(PMD_KEEPS, ("%s: satisfied\n", pmf)) 5276 continue; 5277 } 5278 if (strcmp(dp->pdr_keeper, keeper) == 0) { 5279 ret += pm_apply_recorded_dep(dip, dp); 5280 } 5281 } else { 5282 if (strcmp(dp->pdr_keeper, keeper) != 0) 5283 continue; 5284 for (i = 0; i < dp->pdr_kept_count; i++) { 5285 if (dp->pdr_kept_paths[i] == NULL) 5286 continue; 5287 kept = pm_name_to_dip(dp->pdr_kept_paths[i], 1); 5288 if (kept == NULL) 5289 continue; 5290 ASSERT(ddi_prop_exists(DDI_DEV_T_ANY, kept, 5291 DDI_PROP_DONTPASS, dp->pdr_kept)); 5292 PMD(PMD_KEEPS, ("%s: keeper=%s@%s(%s#%d), " 5293 "kept=%s@%s(%s#%d) keptcnt=%d\n", 5294 pmf, PM_DEVICE(dip), PM_DEVICE(kept), 5295 dp->pdr_kept_count)) 5296 if (kept != dip) { 5297 ret += pm_set_keeping(dip, kept); 5298 } 5299 ddi_release_devi(kept); 5300 } 5301 5302 } 5303 } 5304 ddi_release_devi(dip); 5305 return (ret); 5306 } 5307 5308 /* 5309 * Should this device be kept up by another device? 5310 * Look up all dependency recorded from PM_ADD_DEPENDENT and 5311 * PM_ADD_DEPENDENT_PROPERTY ioctls. Record down on the keeper's 5312 * kept device lists. 5313 */ 5314 static int 5315 pm_kept(char *keptp) 5316 { 5317 PMD_FUNC(pmf, "kept") 5318 pm_pdr_t *dp; 5319 int found = 0; 5320 int ret = 0; 5321 dev_info_t *keeper; 5322 dev_info_t *kept; 5323 size_t length; 5324 int i; 5325 char **paths; 5326 char *path; 5327 5328 ASSERT(keptp != NULL); 5329 kept = pm_name_to_dip(keptp, 1); 5330 if (kept == NULL) 5331 return (0); 5332 PMD(PMD_KEEPS, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(kept))) 5333 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 5334 if (dp->pdr_isprop) { 5335 PMD(PMD_KEEPS, ("%s: property %s\n", pmf, dp->pdr_kept)) 5336 if (ddi_prop_exists(DDI_DEV_T_ANY, kept, 5337 DDI_PROP_DONTPASS, dp->pdr_kept)) { 5338 /* 5339 * Dont allow self dependency. 5340 */ 5341 if (strcmp(dp->pdr_keeper, keptp) == 0) 5342 continue; 5343 keeper = pm_name_to_dip(dp->pdr_keeper, 1); 5344 if (keeper == NULL) 5345 continue; 5346 PMD(PMD_KEEPS, ("%s: adding to kepts path list " 5347 "%p\n", pmf, (void *)kept)) 5348 #ifdef DEBUG 5349 if (pm_debug & PMD_DEP) 5350 prdeps("Before Adding from pm_kept\n"); 5351 #endif 5352 /* 5353 * Add ourselves to the dip list. 5354 */ 5355 if (dp->pdr_kept_count == 0) { 5356 length = strlen(keptp) + 1; 5357 path = 5358 kmem_alloc(length, KM_SLEEP); 5359 paths = kmem_alloc(sizeof (char **), 5360 KM_SLEEP); 5361 (void) strcpy(path, keptp); 5362 paths[0] = path; 5363 dp->pdr_kept_paths = paths; 5364 dp->pdr_kept_count++; 5365 } else { 5366 /* Check to see if already on list */ 5367 for (i = 0; i < dp->pdr_kept_count; 5368 i++) { 5369 if (strcmp(keptp, 5370 dp->pdr_kept_paths[i]) 5371 == 0) { 5372 found++; 5373 break; 5374 } 5375 } 5376 if (found) { 5377 ddi_release_devi(keeper); 5378 continue; 5379 } 5380 length = dp->pdr_kept_count * 5381 sizeof (char **); 5382 paths = kmem_alloc( 5383 length + sizeof (char **), 5384 KM_SLEEP); 5385 if (dp->pdr_kept_count) { 5386 bcopy(dp->pdr_kept_paths, 5387 paths, length); 5388 kmem_free(dp->pdr_kept_paths, 5389 length); 5390 } 5391 dp->pdr_kept_paths = paths; 5392 length = strlen(keptp) + 1; 5393 path = 5394 kmem_alloc(length, KM_SLEEP); 5395 (void) strcpy(path, keptp); 5396 dp->pdr_kept_paths[i] = path; 5397 dp->pdr_kept_count++; 5398 } 5399 #ifdef DEBUG 5400 if (pm_debug & PMD_DEP) 5401 prdeps("After from pm_kept\n"); 5402 #endif 5403 if (keeper) { 5404 ret += pm_set_keeping(keeper, kept); 5405 ddi_release_devi(keeper); 5406 } 5407 } 5408 } else { 5409 /* 5410 * pm_keeper would be called later to do 5411 * the actual pm_set_keeping. 5412 */ 5413 PMD(PMD_KEEPS, ("%s: adding to kepts path list %p\n", 5414 pmf, (void *)kept)) 5415 #ifdef DEBUG 5416 if (pm_debug & PMD_DEP) 5417 prdeps("Before Adding from pm_kept\n"); 5418 #endif 5419 if (strcmp(keptp, dp->pdr_kept) == 0) { 5420 if (dp->pdr_kept_paths == NULL) { 5421 length = strlen(keptp) + 1; 5422 path = 5423 kmem_alloc(length, KM_SLEEP); 5424 paths = kmem_alloc(sizeof (char **), 5425 KM_SLEEP); 5426 (void) strcpy(path, keptp); 5427 paths[0] = path; 5428 dp->pdr_kept_paths = paths; 5429 dp->pdr_kept_count++; 5430 } 5431 } 5432 #ifdef DEBUG 5433 if (pm_debug & PMD_DEP) 5434 prdeps("After from pm_kept\n"); 5435 #endif 5436 } 5437 } 5438 ddi_release_devi(kept); 5439 return (ret); 5440 } 5441 5442 /* 5443 * Apply a recorded dependency. dp specifies the dependency, and 5444 * keeper is already known to be the device that keeps up the other (kept) one. 5445 * We have to the whole tree for the "kept" device, then apply 5446 * the dependency (which may already be applied). 5447 */ 5448 int 5449 pm_apply_recorded_dep(dev_info_t *keeper, pm_pdr_t *dp) 5450 { 5451 PMD_FUNC(pmf, "apply_recorded_dep") 5452 dev_info_t *kept = NULL; 5453 int ret = 0; 5454 char *keptp = NULL; 5455 5456 /* 5457 * Device to Device dependency can only be 1 to 1. 5458 */ 5459 if (dp->pdr_kept_paths == NULL) 5460 return (0); 5461 keptp = dp->pdr_kept_paths[0]; 5462 if (keptp == NULL) 5463 return (0); 5464 ASSERT(*keptp != '\0'); 5465 kept = pm_name_to_dip(keptp, 1); 5466 if (kept == NULL) 5467 return (0); 5468 if (kept) { 5469 PMD(PMD_KEEPS, ("%s: keeper=%s, kept=%s\n", pmf, 5470 dp->pdr_keeper, keptp)) 5471 if (pm_set_keeping(keeper, kept)) { 5472 ASSERT(dp->pdr_satisfied == 0); 5473 dp->pdr_satisfied = 1; 5474 ASSERT(pm_unresolved_deps); 5475 pm_unresolved_deps--; 5476 ret++; 5477 } 5478 } 5479 ddi_release_devi(kept); 5480 5481 return (ret); 5482 } 5483 5484 /* 5485 * Called from common/io/pm.c 5486 */ 5487 int 5488 pm_cur_power(pm_component_t *cp) 5489 { 5490 return (cur_power(cp)); 5491 } 5492 5493 /* 5494 * External interface to sanity-check a power level. 5495 */ 5496 int 5497 pm_valid_power(dev_info_t *dip, int comp, int level) 5498 { 5499 PMD_FUNC(pmf, "valid_power") 5500 5501 if (comp >= 0 && comp < PM_NUMCMPTS(dip) && level >= 0) 5502 return (e_pm_valid_power(dip, comp, level)); 5503 else { 5504 PMD(PMD_FAIL, ("%s: comp=%d, ncomp=%d, level=%d\n", 5505 pmf, comp, PM_NUMCMPTS(dip), level)) 5506 return (0); 5507 } 5508 } 5509 5510 /* 5511 * Called when a device that is direct power managed needs to change state. 5512 * This routine arranges to block the request until the process managing 5513 * the device makes the change (or some other incompatible change) or 5514 * the process closes /dev/pm. 5515 */ 5516 static int 5517 pm_block(dev_info_t *dip, int comp, int newpower, int oldpower) 5518 { 5519 pm_rsvp_t *new = kmem_zalloc(sizeof (*new), KM_SLEEP); 5520 int ret = 0; 5521 void pm_dequeue_blocked(pm_rsvp_t *); 5522 void pm_enqueue_blocked(pm_rsvp_t *); 5523 5524 ASSERT(!pm_processes_stopped); 5525 ASSERT(PM_IAM_LOCKING_DIP(dip)); 5526 new->pr_dip = dip; 5527 new->pr_comp = comp; 5528 new->pr_newlevel = newpower; 5529 new->pr_oldlevel = oldpower; 5530 cv_init(&new->pr_cv, NULL, CV_DEFAULT, NULL); 5531 mutex_enter(&pm_rsvp_lock); 5532 pm_enqueue_blocked(new); 5533 pm_enqueue_notify(PSC_PENDING_CHANGE, dip, comp, newpower, oldpower, 5534 PM_CANBLOCK_BLOCK); 5535 PM_UNLOCK_DIP(dip); 5536 /* 5537 * truss may make the cv_wait_sig return prematurely 5538 */ 5539 while (ret == 0) { 5540 /* 5541 * Normally there will be no user context involved, but if 5542 * there is (e.g. we are here via an ioctl call to a driver) 5543 * then we should allow the process to abort the request, 5544 * or we get an unkillable process if the same thread does 5545 * PM_DIRECT_PM and pm_raise_power 5546 */ 5547 if (cv_wait_sig(&new->pr_cv, &pm_rsvp_lock) == 0) { 5548 ret = PMP_FAIL; 5549 } else { 5550 ret = new->pr_retval; 5551 } 5552 } 5553 pm_dequeue_blocked(new); 5554 mutex_exit(&pm_rsvp_lock); 5555 cv_destroy(&new->pr_cv); 5556 kmem_free(new, sizeof (*new)); 5557 return (ret); 5558 } 5559 5560 /* 5561 * Returns true if the process is interested in power level changes (has issued 5562 * PM_GET_STATE_CHANGE ioctl). 5563 */ 5564 int 5565 pm_interest_registered(int clone) 5566 { 5567 ASSERT(clone >= 0 && clone < PM_MAX_CLONE - 1); 5568 return (pm_interest[clone]); 5569 } 5570 5571 static void pm_enqueue_pscc(pscc_t *, pscc_t **); 5572 5573 /* 5574 * Process with clone has just done PM_DIRECT_PM on dip, or has asked to 5575 * watch all state transitions (dip == NULL). Set up data 5576 * structs to communicate with process about state changes. 5577 */ 5578 void 5579 pm_register_watcher(int clone, dev_info_t *dip) 5580 { 5581 pscc_t *p; 5582 psce_t *psce; 5583 5584 /* 5585 * We definitely need a control struct, then we have to search to see 5586 * there is already an entries struct (in the dip != NULL case). 5587 */ 5588 pscc_t *pscc = kmem_zalloc(sizeof (*pscc), KM_SLEEP); 5589 pscc->pscc_clone = clone; 5590 pscc->pscc_dip = dip; 5591 5592 if (dip) { 5593 int found = 0; 5594 rw_enter(&pm_pscc_direct_rwlock, RW_WRITER); 5595 for (p = pm_pscc_direct; p; p = p->pscc_next) { 5596 /* 5597 * Already an entry for this clone, so just use it 5598 * for the new one (for the case where a single 5599 * process is watching multiple devices) 5600 */ 5601 if (p->pscc_clone == clone) { 5602 pscc->pscc_entries = p->pscc_entries; 5603 pscc->pscc_entries->psce_references++; 5604 found++; 5605 break; 5606 } 5607 } 5608 if (!found) { /* create a new one */ 5609 psce = kmem_zalloc(sizeof (psce_t), KM_SLEEP); 5610 mutex_init(&psce->psce_lock, NULL, MUTEX_DEFAULT, NULL); 5611 psce->psce_first = 5612 kmem_zalloc(sizeof (pm_state_change_t) * PSCCOUNT, 5613 KM_SLEEP); 5614 psce->psce_in = psce->psce_out = psce->psce_first; 5615 psce->psce_last = &psce->psce_first[PSCCOUNT - 1]; 5616 psce->psce_references = 1; 5617 pscc->pscc_entries = psce; 5618 } 5619 pm_enqueue_pscc(pscc, &pm_pscc_direct); 5620 rw_exit(&pm_pscc_direct_rwlock); 5621 } else { 5622 ASSERT(!pm_interest_registered(clone)); 5623 rw_enter(&pm_pscc_interest_rwlock, RW_WRITER); 5624 #ifdef DEBUG 5625 for (p = pm_pscc_interest; p; p = p->pscc_next) { 5626 /* 5627 * Should not be an entry for this clone! 5628 */ 5629 ASSERT(p->pscc_clone != clone); 5630 } 5631 #endif 5632 psce = kmem_zalloc(sizeof (psce_t), KM_SLEEP); 5633 psce->psce_first = kmem_zalloc(sizeof (pm_state_change_t) * 5634 PSCCOUNT, KM_SLEEP); 5635 psce->psce_in = psce->psce_out = psce->psce_first; 5636 psce->psce_last = &psce->psce_first[PSCCOUNT - 1]; 5637 psce->psce_references = 1; 5638 pscc->pscc_entries = psce; 5639 pm_enqueue_pscc(pscc, &pm_pscc_interest); 5640 pm_interest[clone] = 1; 5641 rw_exit(&pm_pscc_interest_rwlock); 5642 } 5643 } 5644 5645 /* 5646 * Remove the given entry from the blocked list 5647 */ 5648 void 5649 pm_dequeue_blocked(pm_rsvp_t *p) 5650 { 5651 ASSERT(MUTEX_HELD(&pm_rsvp_lock)); 5652 if (pm_blocked_list == p) { 5653 ASSERT(p->pr_prev == NULL); 5654 if (p->pr_next != NULL) 5655 p->pr_next->pr_prev = NULL; 5656 pm_blocked_list = p->pr_next; 5657 } else { 5658 ASSERT(p->pr_prev != NULL); 5659 p->pr_prev->pr_next = p->pr_next; 5660 if (p->pr_next != NULL) 5661 p->pr_next->pr_prev = p->pr_prev; 5662 } 5663 } 5664 5665 /* 5666 * Remove the given control struct from the given list 5667 */ 5668 static void 5669 pm_dequeue_pscc(pscc_t *p, pscc_t **list) 5670 { 5671 if (*list == p) { 5672 ASSERT(p->pscc_prev == NULL); 5673 if (p->pscc_next != NULL) 5674 p->pscc_next->pscc_prev = NULL; 5675 *list = p->pscc_next; 5676 } else { 5677 ASSERT(p->pscc_prev != NULL); 5678 p->pscc_prev->pscc_next = p->pscc_next; 5679 if (p->pscc_next != NULL) 5680 p->pscc_next->pscc_prev = p->pscc_prev; 5681 } 5682 } 5683 5684 /* 5685 * Stick the control struct specified on the front of the list 5686 */ 5687 static void 5688 pm_enqueue_pscc(pscc_t *p, pscc_t **list) 5689 { 5690 pscc_t *h; /* entry at head of list */ 5691 if ((h = *list) == NULL) { 5692 *list = p; 5693 ASSERT(p->pscc_next == NULL); 5694 ASSERT(p->pscc_prev == NULL); 5695 } else { 5696 p->pscc_next = h; 5697 ASSERT(h->pscc_prev == NULL); 5698 h->pscc_prev = p; 5699 ASSERT(p->pscc_prev == NULL); 5700 *list = p; 5701 } 5702 } 5703 5704 /* 5705 * If dip is NULL, process is closing "clone" clean up all its registrations. 5706 * Otherwise only clean up those for dip because process is just giving up 5707 * control of a direct device. 5708 */ 5709 void 5710 pm_deregister_watcher(int clone, dev_info_t *dip) 5711 { 5712 pscc_t *p, *pn; 5713 psce_t *psce; 5714 int found = 0; 5715 5716 if (dip == NULL) { 5717 rw_enter(&pm_pscc_interest_rwlock, RW_WRITER); 5718 for (p = pm_pscc_interest; p; p = pn) { 5719 pn = p->pscc_next; 5720 if (p->pscc_clone == clone) { 5721 pm_dequeue_pscc(p, &pm_pscc_interest); 5722 psce = p->pscc_entries; 5723 ASSERT(psce->psce_references == 1); 5724 mutex_destroy(&psce->psce_lock); 5725 kmem_free(psce->psce_first, 5726 sizeof (pm_state_change_t) * PSCCOUNT); 5727 kmem_free(psce, sizeof (*psce)); 5728 kmem_free(p, sizeof (*p)); 5729 } 5730 } 5731 pm_interest[clone] = 0; 5732 rw_exit(&pm_pscc_interest_rwlock); 5733 } 5734 found = 0; 5735 rw_enter(&pm_pscc_direct_rwlock, RW_WRITER); 5736 for (p = pm_pscc_direct; p; p = pn) { 5737 pn = p->pscc_next; 5738 if ((dip && p->pscc_dip == dip) || 5739 (dip == NULL && clone == p->pscc_clone)) { 5740 ASSERT(clone == p->pscc_clone); 5741 found++; 5742 /* 5743 * Remove from control list 5744 */ 5745 pm_dequeue_pscc(p, &pm_pscc_direct); 5746 /* 5747 * If we're the last reference, free the 5748 * entries struct. 5749 */ 5750 psce = p->pscc_entries; 5751 ASSERT(psce); 5752 if (psce->psce_references == 1) { 5753 kmem_free(psce->psce_first, 5754 PSCCOUNT * sizeof (pm_state_change_t)); 5755 kmem_free(psce, sizeof (*psce)); 5756 } else { 5757 psce->psce_references--; 5758 } 5759 kmem_free(p, sizeof (*p)); 5760 } 5761 } 5762 ASSERT(dip == NULL || found); 5763 rw_exit(&pm_pscc_direct_rwlock); 5764 } 5765 5766 /* 5767 * Search the indicated list for an entry that matches clone, and return a 5768 * pointer to it. To be interesting, the entry must have something ready to 5769 * be passed up to the controlling process. 5770 * The returned entry will be locked upon return from this call. 5771 */ 5772 static psce_t * 5773 pm_psc_find_clone(int clone, pscc_t **list, krwlock_t *lock) 5774 { 5775 pscc_t *p; 5776 psce_t *psce; 5777 rw_enter(lock, RW_READER); 5778 for (p = *list; p; p = p->pscc_next) { 5779 if (clone == p->pscc_clone) { 5780 psce = p->pscc_entries; 5781 mutex_enter(&psce->psce_lock); 5782 if (psce->psce_out->size) { 5783 rw_exit(lock); 5784 return (psce); 5785 } else { 5786 mutex_exit(&psce->psce_lock); 5787 } 5788 } 5789 } 5790 rw_exit(lock); 5791 return (NULL); 5792 } 5793 5794 static psce_t *pm_psc_find_clone(int, pscc_t **, krwlock_t *); 5795 /* 5796 * Find an entry for a particular clone in the direct list. 5797 */ 5798 psce_t * 5799 pm_psc_clone_to_direct(int clone) 5800 { 5801 return (pm_psc_find_clone(clone, &pm_pscc_direct, 5802 &pm_pscc_direct_rwlock)); 5803 } 5804 5805 /* 5806 * Find an entry for a particular clone in the interest list. 5807 */ 5808 psce_t * 5809 pm_psc_clone_to_interest(int clone) 5810 { 5811 return (pm_psc_find_clone(clone, &pm_pscc_interest, 5812 &pm_pscc_interest_rwlock)); 5813 } 5814 5815 /* 5816 * Put the given entry at the head of the blocked list 5817 */ 5818 void 5819 pm_enqueue_blocked(pm_rsvp_t *p) 5820 { 5821 ASSERT(MUTEX_HELD(&pm_rsvp_lock)); 5822 ASSERT(p->pr_next == NULL); 5823 ASSERT(p->pr_prev == NULL); 5824 if (pm_blocked_list != NULL) { 5825 p->pr_next = pm_blocked_list; 5826 ASSERT(pm_blocked_list->pr_prev == NULL); 5827 pm_blocked_list->pr_prev = p; 5828 pm_blocked_list = p; 5829 } else { 5830 pm_blocked_list = p; 5831 } 5832 } 5833 5834 /* 5835 * Sets every power managed device back to its default threshold 5836 */ 5837 void 5838 pm_all_to_default_thresholds(void) 5839 { 5840 ddi_walk_devs(ddi_root_node(), pm_set_dev_thr_walk, 5841 (void *) &pm_system_idle_threshold); 5842 } 5843 5844 static int 5845 pm_set_dev_thr_walk(dev_info_t *dip, void *arg) 5846 { 5847 int thr = (int)(*(int *)arg); 5848 5849 if (!PM_GET_PM_INFO(dip)) 5850 return (DDI_WALK_CONTINUE); 5851 pm_set_device_threshold(dip, thr, PMC_DEF_THRESH); 5852 return (DDI_WALK_CONTINUE); 5853 } 5854 5855 /* 5856 * Returns the current threshold value (in seconds) for the indicated component 5857 */ 5858 int 5859 pm_current_threshold(dev_info_t *dip, int comp, int *threshp) 5860 { 5861 if (comp < 0 || comp >= PM_NUMCMPTS(dip)) { 5862 return (DDI_FAILURE); 5863 } else { 5864 *threshp = cur_threshold(dip, comp); 5865 return (DDI_SUCCESS); 5866 } 5867 } 5868 5869 /* 5870 * To be called when changing the power level of a component of a device. 5871 * On some platforms, changing power on one device may require that power 5872 * be changed on other, related devices in the same transaction. Thus, we 5873 * always pass this request to the platform power manager so that all the 5874 * affected devices will be locked. 5875 */ 5876 void 5877 pm_lock_power(dev_info_t *dip) 5878 { 5879 power_req_t power_req; 5880 int result; 5881 5882 power_req.request_type = PMR_PPM_LOCK_POWER; 5883 power_req.req.ppm_lock_power_req.who = dip; 5884 (void) pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, &power_req, &result); 5885 } 5886 5887 /* 5888 * Release the lock (or locks) acquired to change the power of a device. 5889 * See comments for pm_lock_power. 5890 */ 5891 void 5892 pm_unlock_power(dev_info_t *dip) 5893 { 5894 power_req_t power_req; 5895 int result; 5896 5897 power_req.request_type = PMR_PPM_UNLOCK_POWER; 5898 power_req.req.ppm_unlock_power_req.who = dip; 5899 (void) pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, &power_req, &result); 5900 } 5901 5902 5903 /* 5904 * Attempt (without blocking) to acquire the lock(s) needed to change the 5905 * power of a component of a device. See comments for pm_lock_power. 5906 * 5907 * Return: 1 if lock(s) acquired, 0 if not. 5908 */ 5909 int 5910 pm_try_locking_power(dev_info_t *dip) 5911 { 5912 power_req_t power_req; 5913 int result; 5914 5915 power_req.request_type = PMR_PPM_TRY_LOCK_POWER; 5916 power_req.req.ppm_lock_power_req.who = dip; 5917 (void) pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, &power_req, &result); 5918 return (result); 5919 } 5920 5921 5922 /* 5923 * Lock power state of a device. 5924 * 5925 * The implementation handles a special case where another thread may have 5926 * acquired the lock and created/launched this thread to do the work. If 5927 * the lock cannot be acquired immediately, we check to see if this thread 5928 * is registered as a borrower of the lock. If so, we may proceed without 5929 * the lock. This assumes that the lending thread blocks on the completion 5930 * of this thread. 5931 * 5932 * Note 1: for use by ppm only. 5933 * 5934 * Note 2: On failing to get the lock immediately, we search lock_loan list 5935 * for curthread (as borrower of the lock). On a hit, we check that the 5936 * lending thread already owns the lock we want. It is safe to compare 5937 * devi_busy_thread and thread id of the lender because in the == case (the 5938 * only one we care about) we know that the owner is blocked. Similarly, 5939 * If we find that curthread isn't registered as a lock borrower, it is safe 5940 * to use the blocking call (ndi_devi_enter) because we know that if we 5941 * weren't already listed as a borrower (upstream on the call stack) we won't 5942 * become one. 5943 */ 5944 void 5945 pm_lock_power_single(dev_info_t *dip) 5946 { 5947 lock_loan_t *cur; 5948 5949 /* if the lock is available, we are done. */ 5950 if (ndi_devi_tryenter(dip)) 5951 return; 5952 5953 mutex_enter(&pm_loan_lock); 5954 /* see if our thread is registered as a lock borrower. */ 5955 for (cur = lock_loan_head.pmlk_next; cur; cur = cur->pmlk_next) 5956 if (cur->pmlk_borrower == curthread) 5957 break; 5958 mutex_exit(&pm_loan_lock); 5959 5960 /* if this thread not already registered, it is safe to block */ 5961 if (cur == NULL) 5962 ndi_devi_enter(dip); 5963 else { 5964 /* registered: does lender own the lock we want? */ 5965 if (cur->pmlk_lender == DEVI(dip)->devi_busy_thread) { 5966 ASSERT(cur->pmlk_dip == NULL || cur->pmlk_dip == dip); 5967 cur->pmlk_dip = dip; 5968 } else /* no: just block for it */ 5969 ndi_devi_enter(dip); 5970 5971 } 5972 } 5973 5974 /* 5975 * Drop the lock on the device's power state. See comment for 5976 * pm_lock_power_single() for special implementation considerations. 5977 * 5978 * Note: for use by ppm only. 5979 */ 5980 void 5981 pm_unlock_power_single(dev_info_t *dip) 5982 { 5983 lock_loan_t *cur; 5984 5985 /* optimization: mutex not needed to check empty list */ 5986 if (lock_loan_head.pmlk_next == NULL) { 5987 ndi_devi_exit(dip); 5988 return; 5989 } 5990 5991 mutex_enter(&pm_loan_lock); 5992 /* see if our thread is registered as a lock borrower. */ 5993 for (cur = lock_loan_head.pmlk_next; cur; cur = cur->pmlk_next) 5994 if (cur->pmlk_borrower == curthread) 5995 break; 5996 mutex_exit(&pm_loan_lock); 5997 5998 if (cur == NULL || cur->pmlk_dip != dip) 5999 /* we acquired the lock directly, so return it */ 6000 ndi_devi_exit(dip); 6001 } 6002 6003 /* 6004 * Try to take the lock for changing the power level of a component. 6005 * 6006 * Note: for use by ppm only. 6007 */ 6008 int 6009 pm_try_locking_power_single(dev_info_t *dip) 6010 { 6011 return (ndi_devi_tryenter(dip)); 6012 } 6013 6014 #ifdef DEBUG 6015 /* 6016 * The following are used only to print out data structures for debugging 6017 */ 6018 void 6019 prdeps(char *msg) 6020 { 6021 6022 pm_pdr_t *rp; 6023 int i; 6024 6025 pm_log("pm_dep_head %s %p\n", msg, (void *)pm_dep_head); 6026 for (rp = pm_dep_head; rp; rp = rp->pdr_next) { 6027 pm_log("%p: %s keeper %s, kept %s, kept count %d, next %p\n", 6028 (void *)rp, (rp->pdr_isprop ? "property" : "device"), 6029 rp->pdr_keeper, rp->pdr_kept, rp->pdr_kept_count, 6030 (void *)rp->pdr_next); 6031 if (rp->pdr_kept_count != 0) { 6032 pm_log("kept list = "); 6033 i = 0; 6034 while (i < rp->pdr_kept_count) { 6035 pm_log("%s ", rp->pdr_kept_paths[i]); 6036 i++; 6037 } 6038 pm_log("\n"); 6039 } 6040 } 6041 } 6042 6043 void 6044 pr_noinvol(char *hdr) 6045 { 6046 pm_noinvol_t *ip; 6047 6048 pm_log("%s\n", hdr); 6049 rw_enter(&pm_noinvol_rwlock, RW_READER); 6050 for (ip = pm_noinvol_head; ip; ip = ip->ni_next) 6051 pm_log("\tmaj %d, flags %x, noinvolpm %d %s\n", 6052 ip->ni_major, ip->ni_flags, ip->ni_noinvolpm, ip->ni_path); 6053 rw_exit(&pm_noinvol_rwlock); 6054 } 6055 #endif 6056 6057 /* 6058 * Attempt to apply the thresholds indicated by rp to the node specified by 6059 * dip. 6060 */ 6061 void 6062 pm_apply_recorded_thresh(dev_info_t *dip, pm_thresh_rec_t *rp) 6063 { 6064 PMD_FUNC(pmf, "apply_recorded_thresh") 6065 int i, j; 6066 int comps = PM_NUMCMPTS(dip); 6067 struct pm_component *cp; 6068 pm_pte_t *ep; 6069 int pm_valid_thresh(dev_info_t *, pm_thresh_rec_t *); 6070 6071 PMD(PMD_THRESH, ("%s: part: %s@%s(%s#%d), rp %p, %s\n", pmf, 6072 PM_DEVICE(dip), (void *)rp, rp->ptr_physpath)) 6073 PM_LOCK_DIP(dip); 6074 if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip) || !pm_valid_thresh(dip, rp)) { 6075 PMD(PMD_FAIL, ("%s: part: %s@%s(%s#%d) PM_GET_PM_INFO %p\n", 6076 pmf, PM_DEVICE(dip), (void*)PM_GET_PM_INFO(dip))) 6077 PMD(PMD_FAIL, ("%s: part: %s@%s(%s#%d) PM_ISBC %d\n", 6078 pmf, PM_DEVICE(dip), PM_ISBC(dip))) 6079 PMD(PMD_FAIL, ("%s: part: %s@%s(%s#%d) pm_valid_thresh %d\n", 6080 pmf, PM_DEVICE(dip), pm_valid_thresh(dip, rp))) 6081 PM_UNLOCK_DIP(dip); 6082 return; 6083 } 6084 6085 ep = rp->ptr_entries; 6086 /* 6087 * Here we do the special case of a device threshold 6088 */ 6089 if (rp->ptr_numcomps == 0) { /* PM_SET_DEVICE_THRESHOLD product */ 6090 ASSERT(ep && ep->pte_numthresh == 1); 6091 PMD(PMD_THRESH, ("%s: set dev thr %s@%s(%s#%d) to 0x%x\n", 6092 pmf, PM_DEVICE(dip), ep->pte_thresh[0])) 6093 PM_UNLOCK_DIP(dip); 6094 pm_set_device_threshold(dip, ep->pte_thresh[0], PMC_DEV_THRESH); 6095 if (PM_SCANABLE(dip)) 6096 pm_rescan(dip); 6097 return; 6098 } 6099 for (i = 0; i < comps; i++) { 6100 cp = PM_CP(dip, i); 6101 for (j = 0; j < ep->pte_numthresh; j++) { 6102 PMD(PMD_THRESH, ("%s: set thr %d for %s@%s(%s#%d)[%d] " 6103 "to %x\n", pmf, j, PM_DEVICE(dip), 6104 i, ep->pte_thresh[j])) 6105 cp->pmc_comp.pmc_thresh[j + 1] = ep->pte_thresh[j]; 6106 } 6107 ep++; 6108 } 6109 DEVI(dip)->devi_pm_flags &= PMC_THRESH_NONE; 6110 DEVI(dip)->devi_pm_flags |= PMC_COMP_THRESH; 6111 PM_UNLOCK_DIP(dip); 6112 6113 if (PM_SCANABLE(dip)) 6114 pm_rescan(dip); 6115 } 6116 6117 /* 6118 * Returns true if the threshold specified by rp could be applied to dip 6119 * (that is, the number of components and transitions are the same) 6120 */ 6121 int 6122 pm_valid_thresh(dev_info_t *dip, pm_thresh_rec_t *rp) 6123 { 6124 PMD_FUNC(pmf, "valid_thresh") 6125 int comps, i; 6126 pm_component_t *cp; 6127 pm_pte_t *ep; 6128 6129 if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) { 6130 PMD(PMD_ERROR, ("%s: %s: no pm_info or BC\n", pmf, 6131 rp->ptr_physpath)) 6132 return (0); 6133 } 6134 /* 6135 * Special case: we represent the PM_SET_DEVICE_THRESHOLD case by 6136 * an entry with numcomps == 0, (since we don't know how many 6137 * components there are in advance). This is always a valid 6138 * spec. 6139 */ 6140 if (rp->ptr_numcomps == 0) { 6141 ASSERT(rp->ptr_entries && rp->ptr_entries->pte_numthresh == 1); 6142 return (1); 6143 } 6144 if (rp->ptr_numcomps != (comps = PM_NUMCMPTS(dip))) { 6145 PMD(PMD_ERROR, ("%s: comp # mm (dip %d cmd %d) for %s\n", 6146 pmf, PM_NUMCMPTS(dip), rp->ptr_numcomps, rp->ptr_physpath)) 6147 return (0); 6148 } 6149 ep = rp->ptr_entries; 6150 for (i = 0; i < comps; i++) { 6151 cp = PM_CP(dip, i); 6152 if ((ep + i)->pte_numthresh != 6153 cp->pmc_comp.pmc_numlevels - 1) { 6154 PMD(PMD_ERROR, ("%s: %s[%d]: thresh=%d, record=%d\n", 6155 pmf, rp->ptr_physpath, i, 6156 cp->pmc_comp.pmc_numlevels - 1, 6157 (ep + i)->pte_numthresh)) 6158 return (0); 6159 } 6160 } 6161 return (1); 6162 } 6163 6164 /* 6165 * Remove any recorded threshold for device physpath 6166 * We know there will be at most one. 6167 */ 6168 void 6169 pm_unrecord_threshold(char *physpath) 6170 { 6171 pm_thresh_rec_t *pptr, *ptr; 6172 6173 rw_enter(&pm_thresh_rwlock, RW_WRITER); 6174 for (pptr = NULL, ptr = pm_thresh_head; ptr; ptr = ptr->ptr_next) { 6175 if (strcmp(physpath, ptr->ptr_physpath) == 0) { 6176 if (pptr) { 6177 pptr->ptr_next = ptr->ptr_next; 6178 } else { 6179 ASSERT(pm_thresh_head == ptr); 6180 pm_thresh_head = ptr->ptr_next; 6181 } 6182 kmem_free(ptr, ptr->ptr_size); 6183 break; 6184 } 6185 pptr = ptr; 6186 } 6187 rw_exit(&pm_thresh_rwlock); 6188 } 6189 6190 /* 6191 * Discard all recorded thresholds. We are returning to the default pm state. 6192 */ 6193 void 6194 pm_discard_thresholds(void) 6195 { 6196 pm_thresh_rec_t *rp; 6197 rw_enter(&pm_thresh_rwlock, RW_WRITER); 6198 while (pm_thresh_head) { 6199 rp = pm_thresh_head; 6200 pm_thresh_head = rp->ptr_next; 6201 kmem_free(rp, rp->ptr_size); 6202 } 6203 rw_exit(&pm_thresh_rwlock); 6204 } 6205 6206 /* 6207 * Discard all recorded dependencies. We are returning to the default pm state. 6208 */ 6209 void 6210 pm_discard_dependencies(void) 6211 { 6212 pm_pdr_t *rp; 6213 int i; 6214 size_t length; 6215 6216 #ifdef DEBUG 6217 if (pm_debug & PMD_DEP) 6218 prdeps("Before discard\n"); 6219 #endif 6220 ddi_walk_devs(ddi_root_node(), pm_discard_dep_walk, NULL); 6221 6222 #ifdef DEBUG 6223 if (pm_debug & PMD_DEP) 6224 prdeps("After discard\n"); 6225 #endif 6226 while (pm_dep_head) { 6227 rp = pm_dep_head; 6228 if (!rp->pdr_isprop) { 6229 ASSERT(rp->pdr_satisfied == 0); 6230 ASSERT(pm_unresolved_deps); 6231 pm_unresolved_deps--; 6232 } else { 6233 ASSERT(pm_prop_deps); 6234 pm_prop_deps--; 6235 } 6236 pm_dep_head = rp->pdr_next; 6237 if (rp->pdr_kept_count) { 6238 for (i = 0; i < rp->pdr_kept_count; i++) { 6239 length = strlen(rp->pdr_kept_paths[i]) + 1; 6240 kmem_free(rp->pdr_kept_paths[i], length); 6241 } 6242 kmem_free(rp->pdr_kept_paths, 6243 rp->pdr_kept_count * sizeof (char **)); 6244 } 6245 kmem_free(rp, rp->pdr_size); 6246 } 6247 } 6248 6249 6250 static int 6251 pm_discard_dep_walk(dev_info_t *dip, void *arg) 6252 { 6253 _NOTE(ARGUNUSED(arg)) 6254 char *pathbuf; 6255 6256 if (PM_GET_PM_INFO(dip) == NULL) 6257 return (DDI_WALK_CONTINUE); 6258 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6259 (void) ddi_pathname(dip, pathbuf); 6260 pm_free_keeper(pathbuf, 0); 6261 kmem_free(pathbuf, MAXPATHLEN); 6262 return (DDI_WALK_CONTINUE); 6263 } 6264 6265 static int 6266 pm_kept_walk(dev_info_t *dip, void *arg) 6267 { 6268 _NOTE(ARGUNUSED(arg)) 6269 char *pathbuf; 6270 6271 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6272 (void) ddi_pathname(dip, pathbuf); 6273 (void) pm_kept(pathbuf); 6274 kmem_free(pathbuf, MAXPATHLEN); 6275 6276 return (DDI_WALK_CONTINUE); 6277 } 6278 6279 static int 6280 pm_keeper_walk(dev_info_t *dip, void *arg) 6281 { 6282 _NOTE(ARGUNUSED(arg)) 6283 char *pathbuf; 6284 6285 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6286 (void) ddi_pathname(dip, pathbuf); 6287 (void) pm_keeper(pathbuf); 6288 kmem_free(pathbuf, MAXPATHLEN); 6289 6290 return (DDI_WALK_CONTINUE); 6291 } 6292 6293 static char * 6294 pdw_type_decode(int type) 6295 { 6296 switch (type) { 6297 case PM_DEP_WK_POWER_ON: 6298 return ("power on"); 6299 case PM_DEP_WK_POWER_OFF: 6300 return ("power off"); 6301 case PM_DEP_WK_DETACH: 6302 return ("detach"); 6303 case PM_DEP_WK_REMOVE_DEP: 6304 return ("remove dep"); 6305 case PM_DEP_WK_BRINGUP_SELF: 6306 return ("bringup self"); 6307 case PM_DEP_WK_RECORD_KEEPER: 6308 return ("add dependent"); 6309 case PM_DEP_WK_RECORD_KEEPER_PROP: 6310 return ("add dependent property"); 6311 case PM_DEP_WK_KEPT: 6312 return ("kept"); 6313 case PM_DEP_WK_KEEPER: 6314 return ("keeper"); 6315 case PM_DEP_WK_ATTACH: 6316 return ("attach"); 6317 case PM_DEP_WK_CHECK_KEPT: 6318 return ("check kept"); 6319 case PM_DEP_WK_CPR_SUSPEND: 6320 return ("suspend"); 6321 case PM_DEP_WK_CPR_RESUME: 6322 return ("resume"); 6323 default: 6324 return ("unknown"); 6325 } 6326 6327 } 6328 6329 static void 6330 pm_rele_dep(char *keeper) 6331 { 6332 PMD_FUNC(pmf, "rele_dep") 6333 pm_pdr_t *dp; 6334 char *kept_path = NULL; 6335 dev_info_t *kept = NULL; 6336 int count = 0; 6337 6338 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 6339 if (strcmp(dp->pdr_keeper, keeper) != 0) 6340 continue; 6341 for (count = 0; count < dp->pdr_kept_count; count++) { 6342 kept_path = dp->pdr_kept_paths[count]; 6343 if (kept_path == NULL) 6344 continue; 6345 kept = pm_name_to_dip(kept_path, 1); 6346 if (kept) { 6347 PMD(PMD_KEEPS, ("%s: release kept=%s@%s(%s#%d) " 6348 "of keeper=%s\n", pmf, PM_DEVICE(kept), 6349 keeper)) 6350 ASSERT(DEVI(kept)->devi_pm_kidsupcnt > 0); 6351 pm_rele_power(kept); 6352 ddi_release_devi(kept); 6353 } 6354 } 6355 } 6356 } 6357 6358 /* 6359 * Called when we are just released from direct PM. Bring ourself up 6360 * if our keeper is up since dependency is not honored while a kept 6361 * device is under direct PM. 6362 */ 6363 static void 6364 pm_bring_self_up(char *keptpath) 6365 { 6366 PMD_FUNC(pmf, "bring_self_up") 6367 dev_info_t *kept; 6368 dev_info_t *keeper; 6369 pm_pdr_t *dp; 6370 int i, j; 6371 int up = 0; 6372 6373 kept = pm_name_to_dip(keptpath, 1); 6374 if (kept == NULL) 6375 return; 6376 PMD(PMD_KEEPS, ("%s: kept=%s@%s(%s#%d)\n", pmf, PM_DEVICE(kept))) 6377 for (dp = pm_dep_head; dp; dp = dp->pdr_next) { 6378 if (dp->pdr_kept_count == 0) 6379 continue; 6380 for (i = 0; i < dp->pdr_kept_count; i++) { 6381 if (strcmp(dp->pdr_kept_paths[i], keptpath) != 0) 6382 continue; 6383 keeper = pm_name_to_dip(dp->pdr_keeper, 1); 6384 if (keeper) { 6385 PMD(PMD_KEEPS, ("%s: keeper=%s@%s(%s#%d)\n", 6386 pmf, PM_DEVICE(keeper))) 6387 PM_LOCK_POWER(keeper); 6388 for (j = 0; j < PM_NUMCMPTS(keeper); 6389 j++) { 6390 if (PM_CURPOWER(keeper, j)) { 6391 PMD(PMD_KEEPS, ("%s: comp=" 6392 "%d is up\n", pmf, j)) 6393 up++; 6394 } 6395 } 6396 if (up) { 6397 if (PM_SKBU(kept)) 6398 DEVI(kept)->devi_pm_flags &= 6399 ~PMC_SKIP_BRINGUP; 6400 bring_pmdep_up(kept, 1); 6401 } 6402 PM_UNLOCK_POWER(keeper); 6403 ddi_release_devi(keeper); 6404 } 6405 } 6406 } 6407 ddi_release_devi(kept); 6408 } 6409 6410 static void 6411 pm_process_dep_request(pm_dep_wk_t *work) 6412 { 6413 PMD_FUNC(pmf, "dep_req") 6414 int ret; 6415 6416 PMD(PMD_DEP, ("%s: work=%s\n", pmf, 6417 pdw_type_decode(work->pdw_type))) 6418 PMD(PMD_DEP, ("%s: keeper=%s, kept=%s\n", pmf, 6419 (work->pdw_keeper ? work->pdw_keeper : "NULL"), 6420 (work->pdw_kept ? work->pdw_kept : "NULL"))) 6421 6422 ret = 0; 6423 switch (work->pdw_type) { 6424 case PM_DEP_WK_POWER_ON: 6425 /* Bring up the kept devices and put a hold on them */ 6426 bring_wekeeps_up(work->pdw_keeper); 6427 break; 6428 case PM_DEP_WK_POWER_OFF: 6429 /* Release the kept devices */ 6430 pm_rele_dep(work->pdw_keeper); 6431 break; 6432 case PM_DEP_WK_DETACH: 6433 pm_free_keeps(work->pdw_keeper, work->pdw_pwr); 6434 break; 6435 case PM_DEP_WK_REMOVE_DEP: 6436 pm_discard_dependencies(); 6437 break; 6438 case PM_DEP_WK_BRINGUP_SELF: 6439 /* 6440 * We deferred satisfying our dependency till now, so satisfy 6441 * it again and bring ourselves up. 6442 */ 6443 pm_bring_self_up(work->pdw_kept); 6444 break; 6445 case PM_DEP_WK_RECORD_KEEPER: 6446 (void) pm_record_keeper(work->pdw_kept, work->pdw_keeper, 0); 6447 ddi_walk_devs(ddi_root_node(), pm_kept_walk, NULL); 6448 ddi_walk_devs(ddi_root_node(), pm_keeper_walk, NULL); 6449 break; 6450 case PM_DEP_WK_RECORD_KEEPER_PROP: 6451 (void) pm_record_keeper(work->pdw_kept, work->pdw_keeper, 1); 6452 ddi_walk_devs(ddi_root_node(), pm_keeper_walk, NULL); 6453 ddi_walk_devs(ddi_root_node(), pm_kept_walk, NULL); 6454 break; 6455 case PM_DEP_WK_KEPT: 6456 ret = pm_kept(work->pdw_kept); 6457 PMD(PMD_DEP, ("%s: PM_DEP_WK_KEPT: pm_kept returns %d\n", pmf, 6458 ret)) 6459 break; 6460 case PM_DEP_WK_KEEPER: 6461 ret = pm_keeper(work->pdw_keeper); 6462 PMD(PMD_DEP, ("%s: PM_DEP_WK_KEEPER: pm_keeper returns %d\n", 6463 pmf, ret)) 6464 break; 6465 case PM_DEP_WK_ATTACH: 6466 ret = pm_keeper(work->pdw_keeper); 6467 PMD(PMD_DEP, ("%s: PM_DEP_WK_ATTACH: pm_keeper returns %d\n", 6468 pmf, ret)) 6469 ret = pm_kept(work->pdw_kept); 6470 PMD(PMD_DEP, ("%s: PM_DEP_WK_ATTACH: pm_kept returns %d\n", 6471 pmf, ret)) 6472 break; 6473 case PM_DEP_WK_CHECK_KEPT: 6474 ret = pm_is_kept(work->pdw_kept); 6475 PMD(PMD_DEP, ("%s: PM_DEP_WK_CHECK_KEPT: kept=%s, ret=%d\n", 6476 pmf, work->pdw_kept, ret)) 6477 break; 6478 case PM_DEP_WK_CPR_SUSPEND: 6479 pm_discard_dependencies(); 6480 break; 6481 case PM_DEP_WK_CPR_RESUME: 6482 ddi_walk_devs(ddi_root_node(), pm_kept_walk, NULL); 6483 ddi_walk_devs(ddi_root_node(), pm_keeper_walk, NULL); 6484 break; 6485 default: 6486 ASSERT(0); 6487 break; 6488 } 6489 /* 6490 * Free the work structure if the requester is not waiting 6491 * Otherwise it is the requester's responsiblity to free it. 6492 */ 6493 if (!work->pdw_wait) { 6494 if (work->pdw_keeper) 6495 kmem_free(work->pdw_keeper, 6496 strlen(work->pdw_keeper) + 1); 6497 if (work->pdw_kept) 6498 kmem_free(work->pdw_kept, strlen(work->pdw_kept) + 1); 6499 kmem_free(work, sizeof (pm_dep_wk_t)); 6500 } else { 6501 /* 6502 * Notify requester if it is waiting for it. 6503 */ 6504 work->pdw_ret = ret; 6505 work->pdw_done = 1; 6506 cv_signal(&work->pdw_cv); 6507 } 6508 } 6509 6510 /* 6511 * Process PM dependency requests. 6512 */ 6513 static void 6514 pm_dep_thread(void) 6515 { 6516 pm_dep_wk_t *work; 6517 callb_cpr_t cprinfo; 6518 6519 CALLB_CPR_INIT(&cprinfo, &pm_dep_thread_lock, callb_generic_cpr, 6520 "pm_dep_thread"); 6521 for (;;) { 6522 mutex_enter(&pm_dep_thread_lock); 6523 if (pm_dep_thread_workq == NULL) { 6524 CALLB_CPR_SAFE_BEGIN(&cprinfo); 6525 cv_wait(&pm_dep_thread_cv, &pm_dep_thread_lock); 6526 CALLB_CPR_SAFE_END(&cprinfo, &pm_dep_thread_lock); 6527 } 6528 work = pm_dep_thread_workq; 6529 pm_dep_thread_workq = work->pdw_next; 6530 if (pm_dep_thread_tail == work) 6531 pm_dep_thread_tail = work->pdw_next; 6532 mutex_exit(&pm_dep_thread_lock); 6533 pm_process_dep_request(work); 6534 6535 } 6536 /*NOTREACHED*/ 6537 } 6538 6539 /* 6540 * Set the power level of the indicated device to unknown (if it is not a 6541 * backwards compatible device), as it has just been resumed, and it won't 6542 * know if the power was removed or not. Adjust parent's kidsupcnt if necessary. 6543 */ 6544 void 6545 pm_forget_power_level(dev_info_t *dip) 6546 { 6547 dev_info_t *pdip = ddi_get_parent(dip); 6548 int i, count = 0; 6549 6550 if (!PM_ISBC(dip)) { 6551 for (i = 0; i < PM_NUMCMPTS(dip); i++) 6552 count += (PM_CURPOWER(dip, i) == 0); 6553 6554 if (count && pdip && !PM_WANTS_NOTIFICATION(pdip)) 6555 e_pm_hold_rele_power(pdip, count); 6556 6557 /* 6558 * Count this as a power cycle if we care 6559 */ 6560 if (DEVI(dip)->devi_pm_volpmd && 6561 PM_CP(dip, 0)->pmc_cur_pwr == 0) 6562 DEVI(dip)->devi_pm_volpmd = 0; 6563 for (i = 0; i < PM_NUMCMPTS(dip); i++) 6564 e_pm_set_cur_pwr(dip, PM_CP(dip, i), PM_LEVEL_UNKNOWN); 6565 } 6566 } 6567 6568 /* 6569 * This function advises the caller whether it should make a power-off 6570 * transition at this time or not. If the transition is not advised 6571 * at this time, the time that the next power-off transition can 6572 * be made from now is returned through "intervalp" pointer. 6573 * This function returns: 6574 * 6575 * 1 power-off advised 6576 * 0 power-off not advised, intervalp will point to seconds from 6577 * now that a power-off is advised. If it is passed the number 6578 * of years that policy specifies the device should last, 6579 * a large number is returned as the time interval. 6580 * -1 error 6581 */ 6582 int 6583 pm_trans_check(struct pm_trans_data *datap, time_t *intervalp) 6584 { 6585 PMD_FUNC(pmf, "pm_trans_check") 6586 char dbuf[DC_SCSI_MFR_LEN]; 6587 struct pm_scsi_cycles *scp; 6588 int service_years, service_weeks, full_years; 6589 time_t now, service_seconds, tdiff; 6590 time_t within_year, when_allowed; 6591 char *ptr; 6592 int lower_bound_cycles, upper_bound_cycles, cycles_allowed; 6593 int cycles_diff, cycles_over; 6594 struct pm_smart_count *smart_p; 6595 6596 if (datap == NULL) { 6597 PMD(PMD_TCHECK, ("%s: NULL data pointer!\n", pmf)) 6598 return (-1); 6599 } 6600 6601 if (datap->format == DC_SCSI_FORMAT) { 6602 /* 6603 * Power cycles of the scsi drives are distributed 6604 * over 5 years with the following percentage ratio: 6605 * 6606 * 30%, 25%, 20%, 15%, and 10% 6607 * 6608 * The power cycle quota for each year is distributed 6609 * linearly through out the year. The equation for 6610 * determining the expected cycles is: 6611 * 6612 * e = a * (n / y) 6613 * 6614 * e = expected cycles 6615 * a = allocated cycles for this year 6616 * n = number of seconds since beginning of this year 6617 * y = number of seconds in a year 6618 * 6619 * Note that beginning of the year starts the day that 6620 * the drive has been put on service. 6621 * 6622 * If the drive has passed its expected cycles, we 6623 * can determine when it can start to power cycle 6624 * again to keep it on track to meet the 5-year 6625 * life expectancy. The equation for determining 6626 * when to power cycle is: 6627 * 6628 * w = y * (c / a) 6629 * 6630 * w = when it can power cycle again 6631 * y = number of seconds in a year 6632 * c = current number of cycles 6633 * a = allocated cycles for the year 6634 * 6635 */ 6636 char pcnt[DC_SCSI_NPY] = { 30, 55, 75, 90, 100 }; 6637 6638 scp = &datap->un.scsi_cycles; 6639 PMD(PMD_TCHECK, ("%s: format=%d, lifemax=%d, ncycles=%d, " 6640 "svc_date=%s, svc_flag=%d\n", pmf, datap->format, 6641 scp->lifemax, scp->ncycles, scp->svc_date, scp->flag)) 6642 if (scp->ncycles < 0 || scp->flag != 0) { 6643 PMD(PMD_TCHECK, ("%s: ncycles < 0 || flag != 0\n", pmf)) 6644 return (-1); 6645 } 6646 6647 if (scp->ncycles > scp->lifemax) { 6648 *intervalp = (LONG_MAX / hz); 6649 return (0); 6650 } 6651 6652 /* 6653 * convert service date to time_t 6654 */ 6655 bcopy(scp->svc_date, dbuf, DC_SCSI_YEAR_LEN); 6656 dbuf[DC_SCSI_YEAR_LEN] = '\0'; 6657 ptr = dbuf; 6658 service_years = stoi(&ptr) - EPOCH_YEAR; 6659 bcopy(&scp->svc_date[DC_SCSI_YEAR_LEN], dbuf, 6660 DC_SCSI_WEEK_LEN); 6661 dbuf[DC_SCSI_WEEK_LEN] = '\0'; 6662 6663 /* 6664 * scsi standard does not specify WW data, 6665 * could be (00-51) or (01-52) 6666 */ 6667 ptr = dbuf; 6668 service_weeks = stoi(&ptr); 6669 if (service_years < 0 || 6670 service_weeks < 0 || service_weeks > 52) { 6671 PMD(PMD_TCHECK, ("%s: service year %d and week %d\n", 6672 pmf, service_years, service_weeks)) 6673 return (-1); 6674 } 6675 6676 /* 6677 * calculate service date in seconds-since-epoch, 6678 * adding one day for each leap-year. 6679 * 6680 * (years-since-epoch + 2) fixes integer truncation, 6681 * example: (8) leap-years during [1972, 2000] 6682 * (2000 - 1970) = 30; and (30 + 2) / 4 = 8; 6683 */ 6684 service_seconds = (service_years * DC_SPY) + 6685 (service_weeks * DC_SPW) + 6686 (((service_years + 2) / 4) * DC_SPD); 6687 6688 now = gethrestime_sec(); 6689 /* 6690 * since the granularity of 'svc_date' is day not second, 6691 * 'now' should be rounded up to full day. 6692 */ 6693 now = ((now + DC_SPD -1) / DC_SPD) * DC_SPD; 6694 if (service_seconds > now) { 6695 PMD(PMD_TCHECK, ("%s: service date (%ld) later " 6696 "than now (%ld)!\n", pmf, service_seconds, now)) 6697 return (-1); 6698 } 6699 6700 tdiff = now - service_seconds; 6701 PMD(PMD_TCHECK, ("%s: age is %ld sec\n", pmf, tdiff)) 6702 6703 /* 6704 * NOTE - Leap years are not considered in the calculations 6705 * below. 6706 */ 6707 full_years = (tdiff / DC_SPY); 6708 if ((full_years >= DC_SCSI_NPY) && 6709 (scp->ncycles <= scp->lifemax)) 6710 return (1); 6711 6712 /* 6713 * Determine what is the normal cycle usage for the 6714 * device at the beginning and the end of this year. 6715 */ 6716 lower_bound_cycles = (!full_years) ? 0 : 6717 ((scp->lifemax * pcnt[full_years - 1]) / 100); 6718 upper_bound_cycles = (scp->lifemax * pcnt[full_years]) / 100; 6719 6720 if (scp->ncycles <= lower_bound_cycles) 6721 return (1); 6722 6723 /* 6724 * The linear slope that determines how many cycles 6725 * are allowed this year is number of seconds 6726 * passed this year over total number of seconds in a year. 6727 */ 6728 cycles_diff = (upper_bound_cycles - lower_bound_cycles); 6729 within_year = (tdiff % DC_SPY); 6730 cycles_allowed = lower_bound_cycles + 6731 (((uint64_t)cycles_diff * (uint64_t)within_year) / DC_SPY); 6732 PMD(PMD_TCHECK, ("%s: lived %d yrs and %ld secs\n", pmf, 6733 full_years, within_year)) 6734 PMD(PMD_TCHECK, ("%s: # of cycles allowed %d\n", pmf, 6735 cycles_allowed)) 6736 6737 if (scp->ncycles <= cycles_allowed) 6738 return (1); 6739 6740 /* 6741 * The transition is not advised now but we can 6742 * determine when the next transition can be made. 6743 * 6744 * Depending on how many cycles the device has been 6745 * over-used, we may need to skip years with 6746 * different percentage quota in order to determine 6747 * when the next transition can be made. 6748 */ 6749 cycles_over = (scp->ncycles - lower_bound_cycles); 6750 while (cycles_over > cycles_diff) { 6751 full_years++; 6752 if (full_years >= DC_SCSI_NPY) { 6753 *intervalp = (LONG_MAX / hz); 6754 return (0); 6755 } 6756 cycles_over -= cycles_diff; 6757 lower_bound_cycles = upper_bound_cycles; 6758 upper_bound_cycles = 6759 (scp->lifemax * pcnt[full_years]) / 100; 6760 cycles_diff = (upper_bound_cycles - lower_bound_cycles); 6761 } 6762 6763 /* 6764 * The linear slope that determines when the next transition 6765 * can be made is the relative position of used cycles within a 6766 * year over total number of cycles within that year. 6767 */ 6768 when_allowed = service_seconds + (full_years * DC_SPY) + 6769 (((uint64_t)DC_SPY * (uint64_t)cycles_over) / cycles_diff); 6770 *intervalp = (when_allowed - now); 6771 if (*intervalp > (LONG_MAX / hz)) 6772 *intervalp = (LONG_MAX / hz); 6773 PMD(PMD_TCHECK, ("%s: no cycle is allowed in %ld secs\n", pmf, 6774 *intervalp)) 6775 return (0); 6776 } else if (datap->format == DC_SMART_FORMAT) { 6777 /* 6778 * power cycles of SATA disks are reported from SMART 6779 * attributes. 6780 */ 6781 smart_p = &datap->un.smart_count; 6782 if (smart_p->consumed >= smart_p->allowed) { 6783 *intervalp = (LONG_MAX / hz); 6784 PMD(PMD_TCHECK, ("%s: exceeded lifemax cycles.\n", pmf)) 6785 return (0); 6786 } else 6787 return (1); 6788 } 6789 6790 PMD(PMD_TCHECK, ("%s: unknown format!\n", pmf)) 6791 return (-1); 6792 } 6793 6794 /* 6795 * Nexus drivers call into pm framework to indicate which child driver is about 6796 * to be installed. In some platforms, ppm may need to configure the hardware 6797 * for successful installation of a driver. 6798 */ 6799 int 6800 pm_init_child(dev_info_t *dip) 6801 { 6802 power_req_t power_req; 6803 6804 ASSERT(ddi_binding_name(dip)); 6805 ASSERT(ddi_get_name_addr(dip)); 6806 pm_ppm_claim(dip); 6807 if (pm_ppm_claimed(dip)) { /* if ppm driver claims the node */ 6808 power_req.request_type = PMR_PPM_INIT_CHILD; 6809 power_req.req.ppm_config_req.who = dip; 6810 ASSERT(PPM(dip) != NULL); 6811 return (pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, &power_req, 6812 NULL)); 6813 } else { 6814 #ifdef DEBUG 6815 /* pass it to the default handler so we can debug things */ 6816 power_req.request_type = PMR_PPM_INIT_CHILD; 6817 power_req.req.ppm_config_req.who = dip; 6818 (void) pm_ctlops(NULL, dip, 6819 DDI_CTLOPS_POWER, &power_req, NULL); 6820 #endif 6821 } 6822 return (DDI_SUCCESS); 6823 } 6824 6825 /* 6826 * Bring parent of a node that is about to be probed up to full power, and 6827 * arrange for it to stay up until pm_post_probe() or pm_post_attach() decide 6828 * it is time to let it go down again 6829 */ 6830 void 6831 pm_pre_probe(dev_info_t *dip, pm_ppm_cookie_t *cp) 6832 { 6833 int result; 6834 power_req_t power_req; 6835 6836 bzero(cp, sizeof (*cp)); 6837 cp->ppc_dip = dip; 6838 6839 pm_ppm_claim(dip); 6840 if (pm_ppm_claimed(dip)) { /* if ppm driver claims the node */ 6841 power_req.request_type = PMR_PPM_PRE_PROBE; 6842 power_req.req.ppm_config_req.who = dip; 6843 ASSERT(PPM(dip) != NULL); 6844 (void) pm_ctlops(PPM(dip), dip, 6845 DDI_CTLOPS_POWER, &power_req, &result); 6846 cp->ppc_ppm = PPM(dip); 6847 } else { 6848 #ifdef DEBUG 6849 /* pass it to the default handler so we can debug things */ 6850 power_req.request_type = PMR_PPM_PRE_PROBE; 6851 power_req.req.ppm_config_req.who = dip; 6852 (void) pm_ctlops(NULL, dip, 6853 DDI_CTLOPS_POWER, &power_req, &result); 6854 #endif 6855 cp->ppc_ppm = NULL; 6856 } 6857 } 6858 6859 int 6860 pm_pre_config(dev_info_t *dip, char *devnm) 6861 { 6862 PMD_FUNC(pmf, "pre_config") 6863 int ret; 6864 6865 if (MDI_VHCI(dip)) { 6866 PMD(PMD_SET, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 6867 ret = mdi_power(dip, MDI_PM_PRE_CONFIG, NULL, devnm, 0); 6868 return (ret == MDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 6869 } else if (!PM_GET_PM_INFO(dip)) 6870 return (DDI_SUCCESS); 6871 6872 PMD(PMD_SET, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 6873 pm_hold_power(dip); 6874 ret = pm_all_to_normal(dip, PM_CANBLOCK_BLOCK); 6875 if (ret != DDI_SUCCESS) 6876 pm_rele_power(dip); 6877 return (ret); 6878 } 6879 6880 /* 6881 * This routine is called by devfs during its walk to unconfigue a node. 6882 * If the call is due to auto mod_unloads and the dip is not at its 6883 * full power, we return DDI_FAILURE to terminate the walk, otherwise 6884 * return DDI_SUCCESS. 6885 */ 6886 int 6887 pm_pre_unconfig(dev_info_t *dip, int flags, int *held, char *devnm) 6888 { 6889 PMD_FUNC(pmf, "pre_unconfig") 6890 int ret; 6891 6892 if (MDI_VHCI(dip)) { 6893 PMD(PMD_SET, ("%s: %s@%s(%s#%d), flags=%x\n", pmf, 6894 PM_DEVICE(dip), flags)) 6895 ret = mdi_power(dip, MDI_PM_PRE_UNCONFIG, held, devnm, flags); 6896 return (ret == MDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE); 6897 } else if (!PM_GET_PM_INFO(dip)) 6898 return (DDI_SUCCESS); 6899 6900 PMD(PMD_SET, ("%s: %s@%s(%s#%d), flags=%x\n", pmf, PM_DEVICE(dip), 6901 flags)) 6902 *held = 0; 6903 6904 /* 6905 * If the dip is a leaf node, don't power it up. 6906 */ 6907 if (!ddi_get_child(dip)) 6908 return (DDI_SUCCESS); 6909 6910 /* 6911 * Do not power up the node if it is called due to auto-modunload. 6912 */ 6913 if ((flags & NDI_AUTODETACH) && !pm_all_at_normal(dip)) 6914 return (DDI_FAILURE); 6915 6916 pm_hold_power(dip); 6917 *held = 1; 6918 ret = pm_all_to_normal(dip, PM_CANBLOCK_BLOCK); 6919 if (ret != DDI_SUCCESS) { 6920 pm_rele_power(dip); 6921 *held = 0; 6922 } 6923 return (ret); 6924 } 6925 6926 /* 6927 * Notify ppm of attach action. Parent is already held at full power by 6928 * probe action. 6929 */ 6930 void 6931 pm_pre_attach(dev_info_t *dip, pm_ppm_cookie_t *cp, ddi_attach_cmd_t cmd) 6932 { 6933 static char *me = "pm_pre_attach"; 6934 power_req_t power_req; 6935 int result; 6936 6937 /* 6938 * Initialize and fill in the PPM cookie 6939 */ 6940 bzero(cp, sizeof (*cp)); 6941 cp->ppc_cmd = (int)cmd; 6942 cp->ppc_ppm = PPM(dip); 6943 cp->ppc_dip = dip; 6944 6945 /* 6946 * DDI_ATTACH and DDI_RESUME cmds need to call platform specific 6947 * Power Management stuff. DDI_RESUME also has to purge it's 6948 * powerlevel information. 6949 */ 6950 switch (cmd) { 6951 case DDI_ATTACH: 6952 if (cp->ppc_ppm) { /* if ppm driver claims the node */ 6953 power_req.request_type = PMR_PPM_PRE_ATTACH; 6954 power_req.req.ppm_config_req.who = dip; 6955 ASSERT(PPM(dip)); 6956 (void) pm_ctlops(cp->ppc_ppm, dip, DDI_CTLOPS_POWER, 6957 &power_req, &result); 6958 } 6959 #ifdef DEBUG 6960 else { 6961 power_req.request_type = PMR_PPM_PRE_ATTACH; 6962 power_req.req.ppm_config_req.who = dip; 6963 (void) pm_ctlops(NULL, dip, 6964 DDI_CTLOPS_POWER, &power_req, &result); 6965 } 6966 #endif 6967 break; 6968 case DDI_RESUME: 6969 pm_forget_power_level(dip); 6970 6971 if (cp->ppc_ppm) { /* if ppm driver claims the node */ 6972 power_req.request_type = PMR_PPM_PRE_RESUME; 6973 power_req.req.resume_req.who = cp->ppc_dip; 6974 power_req.req.resume_req.cmd = 6975 (ddi_attach_cmd_t)cp->ppc_cmd; 6976 ASSERT(PPM(cp->ppc_dip) == cp->ppc_ppm); 6977 (void) pm_ctlops(cp->ppc_ppm, cp->ppc_dip, 6978 DDI_CTLOPS_POWER, &power_req, &result); 6979 } 6980 #ifdef DEBUG 6981 else { 6982 power_req.request_type = PMR_PPM_PRE_RESUME; 6983 power_req.req.resume_req.who = cp->ppc_dip; 6984 power_req.req.resume_req.cmd = 6985 (ddi_attach_cmd_t)cp->ppc_cmd; 6986 (void) pm_ctlops(NULL, cp->ppc_dip, 6987 DDI_CTLOPS_POWER, &power_req, &result); 6988 } 6989 #endif 6990 break; 6991 6992 case DDI_PM_RESUME: 6993 break; 6994 6995 default: 6996 panic(me); 6997 } 6998 } 6999 7000 /* 7001 * Nexus drivers call into pm framework to indicate which child driver is 7002 * being uninstalled. In some platforms, ppm may need to reconfigure the 7003 * hardware since the device driver is no longer installed. 7004 */ 7005 int 7006 pm_uninit_child(dev_info_t *dip) 7007 { 7008 power_req_t power_req; 7009 7010 ASSERT(ddi_binding_name(dip)); 7011 ASSERT(ddi_get_name_addr(dip)); 7012 pm_ppm_claim(dip); 7013 if (pm_ppm_claimed(dip)) { /* if ppm driver claims the node */ 7014 power_req.request_type = PMR_PPM_UNINIT_CHILD; 7015 power_req.req.ppm_config_req.who = dip; 7016 ASSERT(PPM(dip)); 7017 return (pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, &power_req, 7018 NULL)); 7019 } else { 7020 #ifdef DEBUG 7021 /* pass it to the default handler so we can debug things */ 7022 power_req.request_type = PMR_PPM_UNINIT_CHILD; 7023 power_req.req.ppm_config_req.who = dip; 7024 (void) pm_ctlops(NULL, dip, DDI_CTLOPS_POWER, &power_req, NULL); 7025 #endif 7026 } 7027 return (DDI_SUCCESS); 7028 } 7029 /* 7030 * Decrement kidsupcnt so scan can turn the parent back off if it is idle 7031 * Also notify ppm of result of probe if there is a ppm that cares 7032 */ 7033 void 7034 pm_post_probe(pm_ppm_cookie_t *cp, int ret, int probe_failed) 7035 { 7036 _NOTE(ARGUNUSED(probe_failed)) 7037 int result; 7038 power_req_t power_req; 7039 7040 if (cp->ppc_ppm) { /* if ppm driver claims the node */ 7041 power_req.request_type = PMR_PPM_POST_PROBE; 7042 power_req.req.ppm_config_req.who = cp->ppc_dip; 7043 power_req.req.ppm_config_req.result = ret; 7044 ASSERT(PPM(cp->ppc_dip) == cp->ppc_ppm); 7045 (void) pm_ctlops(cp->ppc_ppm, cp->ppc_dip, DDI_CTLOPS_POWER, 7046 &power_req, &result); 7047 } 7048 #ifdef DEBUG 7049 else { 7050 power_req.request_type = PMR_PPM_POST_PROBE; 7051 power_req.req.ppm_config_req.who = cp->ppc_dip; 7052 power_req.req.ppm_config_req.result = ret; 7053 (void) pm_ctlops(NULL, cp->ppc_dip, DDI_CTLOPS_POWER, 7054 &power_req, &result); 7055 } 7056 #endif 7057 } 7058 7059 void 7060 pm_post_config(dev_info_t *dip, char *devnm) 7061 { 7062 PMD_FUNC(pmf, "post_config") 7063 7064 if (MDI_VHCI(dip)) { 7065 PMD(PMD_SET, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 7066 (void) mdi_power(dip, MDI_PM_POST_CONFIG, NULL, devnm, 0); 7067 return; 7068 } else if (!PM_GET_PM_INFO(dip)) 7069 return; 7070 7071 PMD(PMD_SET, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 7072 pm_rele_power(dip); 7073 } 7074 7075 void 7076 pm_post_unconfig(dev_info_t *dip, int held, char *devnm) 7077 { 7078 PMD_FUNC(pmf, "post_unconfig") 7079 7080 if (MDI_VHCI(dip)) { 7081 PMD(PMD_SET, ("%s: %s@%s(%s#%d), held = %d\n", pmf, 7082 PM_DEVICE(dip), held)) 7083 (void) mdi_power(dip, MDI_PM_POST_UNCONFIG, &held, devnm, 0); 7084 return; 7085 } else if (!PM_GET_PM_INFO(dip)) 7086 return; 7087 7088 PMD(PMD_SET, ("%s: %s@%s(%s#%d), held = %d\n", pmf, PM_DEVICE(dip), 7089 held)) 7090 if (!held) 7091 return; 7092 /* 7093 * We have held power in pre_unconfig, release it here. 7094 */ 7095 pm_rele_power(dip); 7096 } 7097 7098 /* 7099 * Notify ppm of result of attach if there is a ppm that cares 7100 */ 7101 void 7102 pm_post_attach(pm_ppm_cookie_t *cp, int ret) 7103 { 7104 int result; 7105 power_req_t power_req; 7106 dev_info_t *dip; 7107 7108 if (cp->ppc_cmd != DDI_ATTACH) 7109 return; 7110 7111 dip = cp->ppc_dip; 7112 7113 if (ret == DDI_SUCCESS) { 7114 /* 7115 * Attach succeeded, so proceed to doing post-attach pm tasks 7116 */ 7117 if (PM_GET_PM_INFO(dip) == NULL) 7118 (void) pm_start(dip); 7119 } else { 7120 /* 7121 * Attach may have got pm started before failing 7122 */ 7123 pm_stop(dip); 7124 } 7125 7126 if (cp->ppc_ppm) { /* if ppm driver claims the node */ 7127 power_req.request_type = PMR_PPM_POST_ATTACH; 7128 power_req.req.ppm_config_req.who = cp->ppc_dip; 7129 power_req.req.ppm_config_req.result = ret; 7130 ASSERT(PPM(cp->ppc_dip) == cp->ppc_ppm); 7131 (void) pm_ctlops(cp->ppc_ppm, cp->ppc_dip, 7132 DDI_CTLOPS_POWER, &power_req, &result); 7133 } 7134 #ifdef DEBUG 7135 else { 7136 power_req.request_type = PMR_PPM_POST_ATTACH; 7137 power_req.req.ppm_config_req.who = cp->ppc_dip; 7138 power_req.req.ppm_config_req.result = ret; 7139 (void) pm_ctlops(NULL, cp->ppc_dip, 7140 DDI_CTLOPS_POWER, &power_req, &result); 7141 } 7142 #endif 7143 } 7144 7145 /* 7146 * Notify ppm of attach action. Parent is already held at full power by 7147 * probe action. 7148 */ 7149 void 7150 pm_pre_detach(dev_info_t *dip, ddi_detach_cmd_t cmd, pm_ppm_cookie_t *cp) 7151 { 7152 int result; 7153 power_req_t power_req; 7154 7155 bzero(cp, sizeof (*cp)); 7156 cp->ppc_dip = dip; 7157 cp->ppc_cmd = (int)cmd; 7158 7159 switch (cmd) { 7160 case DDI_DETACH: 7161 pm_detaching(dip); /* suspend pm while detaching */ 7162 if (pm_ppm_claimed(dip)) { /* if ppm driver claims node */ 7163 power_req.request_type = PMR_PPM_PRE_DETACH; 7164 power_req.req.ppm_config_req.who = dip; 7165 ASSERT(PPM(dip)); 7166 (void) pm_ctlops(PPM(dip), dip, DDI_CTLOPS_POWER, 7167 &power_req, &result); 7168 cp->ppc_ppm = PPM(dip); 7169 } else { 7170 #ifdef DEBUG 7171 /* pass to the default handler so we can debug things */ 7172 power_req.request_type = PMR_PPM_PRE_DETACH; 7173 power_req.req.ppm_config_req.who = dip; 7174 (void) pm_ctlops(NULL, dip, 7175 DDI_CTLOPS_POWER, &power_req, &result); 7176 #endif 7177 cp->ppc_ppm = NULL; 7178 } 7179 break; 7180 7181 default: 7182 break; 7183 } 7184 } 7185 7186 /* 7187 * Dip is either a leaf node that exported "no-involuntary-power-cycles" prop., 7188 * (if devi_pm_noinvol count is 0) or an ancestor of such a node. We need to 7189 * make an entry to record the details, which includes certain flag settings. 7190 */ 7191 static void 7192 pm_record_invol_path(char *path, int flags, int noinvolpm, int volpmd, 7193 int wasvolpmd, major_t major) 7194 { 7195 PMD_FUNC(pmf, "record_invol_path") 7196 major_t pm_path_to_major(char *); 7197 size_t plen; 7198 pm_noinvol_t *ip, *np, *pp; 7199 pp = NULL; 7200 7201 plen = strlen(path) + 1; 7202 np = kmem_zalloc(sizeof (*np), KM_SLEEP); 7203 np->ni_size = plen; 7204 np->ni_path = kmem_alloc(plen, KM_SLEEP); 7205 np->ni_noinvolpm = noinvolpm; 7206 np->ni_volpmd = volpmd; 7207 np->ni_wasvolpmd = wasvolpmd; 7208 np->ni_flags = flags; 7209 (void) strcpy(np->ni_path, path); 7210 /* 7211 * If we haven't actually seen the node attached, it is hard to figure 7212 * out its major. If we could hold the node by path, we would be much 7213 * happier here. 7214 */ 7215 if (major == DDI_MAJOR_T_NONE) { 7216 np->ni_major = pm_path_to_major(path); 7217 } else { 7218 np->ni_major = major; 7219 } 7220 rw_enter(&pm_noinvol_rwlock, RW_WRITER); 7221 for (ip = pm_noinvol_head; ip; pp = ip, ip = ip->ni_next) { 7222 int comp = strcmp(path, ip->ni_path); 7223 if (comp < 0) { 7224 PMD(PMD_NOINVOL, ("%s: %s insert before %s\n", 7225 pmf, path, ip->ni_path)) 7226 /* insert before current entry */ 7227 np->ni_next = ip; 7228 if (pp) { 7229 pp->ni_next = np; 7230 } else { 7231 pm_noinvol_head = np; 7232 } 7233 rw_exit(&pm_noinvol_rwlock); 7234 #ifdef DEBUG 7235 if (pm_debug & PMD_NOINVOL) 7236 pr_noinvol("record_invol_path exit0"); 7237 #endif 7238 return; 7239 } else if (comp == 0) { 7240 panic("%s already in pm_noinvol list", path); 7241 } 7242 } 7243 /* 7244 * If we did not find an entry in the list that this should go before, 7245 * then it must go at the end 7246 */ 7247 if (pp) { 7248 PMD(PMD_NOINVOL, ("%s: %s append after %s\n", pmf, path, 7249 pp->ni_path)) 7250 ASSERT(pp->ni_next == 0); 7251 pp->ni_next = np; 7252 } else { 7253 PMD(PMD_NOINVOL, ("%s: %s added to end-of-list\n", pmf, path)) 7254 ASSERT(!pm_noinvol_head); 7255 pm_noinvol_head = np; 7256 } 7257 rw_exit(&pm_noinvol_rwlock); 7258 #ifdef DEBUG 7259 if (pm_debug & PMD_NOINVOL) 7260 pr_noinvol("record_invol_path exit"); 7261 #endif 7262 } 7263 7264 void 7265 pm_record_invol(dev_info_t *dip) 7266 { 7267 char *pathbuf; 7268 int pm_all_components_off(dev_info_t *); 7269 int volpmd = (PM_NUMCMPTS(dip) > 0) && pm_all_components_off(dip); 7270 7271 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 7272 (void) ddi_pathname(dip, pathbuf); 7273 7274 pm_record_invol_path(pathbuf, (DEVI(dip)->devi_pm_flags & 7275 (PMC_NO_INVOL | PMC_CONSOLE_FB)), DEVI(dip)->devi_pm_noinvolpm, 7276 DEVI(dip)->devi_pm_volpmd, volpmd, PM_MAJOR(dip)); 7277 7278 /* 7279 * If this child's detach will be holding up its ancestors, then we 7280 * allow for an exception to that if all children of this type have 7281 * gone down voluntarily. 7282 * Now walk down the tree incrementing devi_pm_noinvolpm 7283 */ 7284 (void) pm_noinvol_update(PM_BP_NOINVOL_DETACH, 0, volpmd, pathbuf, 7285 dip); 7286 kmem_free(pathbuf, MAXPATHLEN); 7287 } 7288 7289 void 7290 pm_post_detach(pm_ppm_cookie_t *cp, int ret) 7291 { 7292 dev_info_t *dip = cp->ppc_dip; 7293 int result; 7294 power_req_t power_req; 7295 7296 switch (cp->ppc_cmd) { 7297 case DDI_DETACH: 7298 if (cp->ppc_ppm) { /* if ppm driver claims the node */ 7299 power_req.request_type = PMR_PPM_POST_DETACH; 7300 power_req.req.ppm_config_req.who = cp->ppc_dip; 7301 power_req.req.ppm_config_req.result = ret; 7302 ASSERT(PPM(cp->ppc_dip) == cp->ppc_ppm); 7303 (void) pm_ctlops(cp->ppc_ppm, cp->ppc_dip, 7304 DDI_CTLOPS_POWER, &power_req, &result); 7305 } 7306 #ifdef DEBUG 7307 else { 7308 power_req.request_type = PMR_PPM_POST_DETACH; 7309 power_req.req.ppm_config_req.who = cp->ppc_dip; 7310 power_req.req.ppm_config_req.result = ret; 7311 (void) pm_ctlops(NULL, cp->ppc_dip, 7312 DDI_CTLOPS_POWER, &power_req, &result); 7313 } 7314 #endif 7315 if (ret == DDI_SUCCESS) { 7316 /* 7317 * For hotplug detach we assume it is *really* gone 7318 */ 7319 if (cp->ppc_cmd == DDI_DETACH && 7320 ((DEVI(dip)->devi_pm_flags & 7321 (PMC_NO_INVOL | PMC_CONSOLE_FB)) || 7322 DEVI(dip)->devi_pm_noinvolpm)) 7323 pm_record_invol(dip); 7324 DEVI(dip)->devi_pm_flags &= 7325 ~(PMC_NO_INVOL | PMC_NOINVOL_DONE); 7326 7327 /* 7328 * If console fb is detaching, then we don't need to 7329 * worry any more about it going off (pm_detaching has 7330 * brought up all components) 7331 */ 7332 if (PM_IS_CFB(dip)) { 7333 mutex_enter(&pm_cfb_lock); 7334 ASSERT(cfb_dip_detaching); 7335 ASSERT(cfb_dip == NULL); 7336 ASSERT(pm_cfb_comps_off == 0); 7337 cfb_dip_detaching = NULL; 7338 mutex_exit(&pm_cfb_lock); 7339 } 7340 pm_stop(dip); /* make it permanent */ 7341 } else { 7342 if (PM_IS_CFB(dip)) { 7343 mutex_enter(&pm_cfb_lock); 7344 ASSERT(cfb_dip_detaching); 7345 ASSERT(cfb_dip == NULL); 7346 ASSERT(pm_cfb_comps_off == 0); 7347 cfb_dip = cfb_dip_detaching; 7348 cfb_dip_detaching = NULL; 7349 mutex_exit(&pm_cfb_lock); 7350 } 7351 pm_detach_failed(dip); /* resume power management */ 7352 } 7353 break; 7354 case DDI_PM_SUSPEND: 7355 break; 7356 case DDI_SUSPEND: 7357 break; /* legal, but nothing to do */ 7358 default: 7359 #ifdef DEBUG 7360 panic("pm_post_detach: unrecognized cmd %d for detach", 7361 cp->ppc_cmd); 7362 /*NOTREACHED*/ 7363 #else 7364 break; 7365 #endif 7366 } 7367 } 7368 7369 /* 7370 * Called after vfs_mountroot has got the clock started to fix up timestamps 7371 * that were set when root bush drivers attached. hresttime was 0 then, so the 7372 * devices look busy but have a 0 busycnt 7373 */ 7374 int 7375 pm_adjust_timestamps(dev_info_t *dip, void *arg) 7376 { 7377 _NOTE(ARGUNUSED(arg)) 7378 7379 pm_info_t *info = PM_GET_PM_INFO(dip); 7380 struct pm_component *cp; 7381 int i; 7382 7383 if (!info) 7384 return (DDI_WALK_CONTINUE); 7385 PM_LOCK_BUSY(dip); 7386 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 7387 cp = PM_CP(dip, i); 7388 if (cp->pmc_timestamp == 0 && cp->pmc_busycount == 0) 7389 cp->pmc_timestamp = gethrestime_sec(); 7390 } 7391 PM_UNLOCK_BUSY(dip); 7392 return (DDI_WALK_CONTINUE); 7393 } 7394 7395 /* 7396 * Called at attach time to see if the device being attached has a record in 7397 * the no involuntary power cycles list. If so, we do some bookkeeping on the 7398 * parents and set a flag in the dip 7399 */ 7400 void 7401 pm_noinvol_specd(dev_info_t *dip) 7402 { 7403 PMD_FUNC(pmf, "noinvol_specd") 7404 char *pathbuf; 7405 pm_noinvol_t *ip, *pp = NULL; 7406 int wasvolpmd; 7407 int found = 0; 7408 7409 if (DEVI(dip)->devi_pm_flags & PMC_NOINVOL_DONE) 7410 return; 7411 DEVI(dip)->devi_pm_flags |= PMC_NOINVOL_DONE; 7412 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 7413 (void) ddi_pathname(dip, pathbuf); 7414 7415 PM_LOCK_DIP(dip); 7416 DEVI(dip)->devi_pm_volpmd = 0; 7417 DEVI(dip)->devi_pm_noinvolpm = 0; 7418 rw_enter(&pm_noinvol_rwlock, RW_READER); 7419 for (ip = pm_noinvol_head; ip; pp = ip, ip = ip->ni_next) { 7420 PMD(PMD_NOINVOL, ("%s: comparing '%s' to '%s'\n", 7421 pmf, pathbuf, ip->ni_path)) 7422 if (strcmp(pathbuf, ip->ni_path) == 0) { 7423 found++; 7424 break; 7425 } 7426 } 7427 rw_exit(&pm_noinvol_rwlock); 7428 if (!found) { 7429 PM_UNLOCK_DIP(dip); 7430 kmem_free(pathbuf, MAXPATHLEN); 7431 return; 7432 } 7433 rw_enter(&pm_noinvol_rwlock, RW_WRITER); 7434 pp = NULL; 7435 for (ip = pm_noinvol_head; ip; pp = ip, ip = ip->ni_next) { 7436 PMD(PMD_NOINVOL, ("%s: comparing '%s' to '%s'\n", 7437 pmf, pathbuf, ip->ni_path)) 7438 if (strcmp(pathbuf, ip->ni_path) == 0) { 7439 ip->ni_flags &= ~PMC_DRIVER_REMOVED; 7440 DEVI(dip)->devi_pm_flags |= ip->ni_flags; 7441 /* 7442 * Handle special case of console fb 7443 */ 7444 if (PM_IS_CFB(dip)) { 7445 mutex_enter(&pm_cfb_lock); 7446 cfb_dip = dip; 7447 PMD(PMD_CFB, ("%s: %s@%s(%s#%d) setting " 7448 "cfb_dip\n", pmf, PM_DEVICE(dip))) 7449 mutex_exit(&pm_cfb_lock); 7450 } 7451 DEVI(dip)->devi_pm_noinvolpm = ip->ni_noinvolpm; 7452 ASSERT((DEVI(dip)->devi_pm_flags & 7453 (PMC_NO_INVOL | PMC_CONSOLE_FB)) || 7454 DEVI(dip)->devi_pm_noinvolpm); 7455 DEVI(dip)->devi_pm_volpmd = ip->ni_volpmd; 7456 PMD(PMD_NOINVOL, ("%s: noinvol=%d, volpmd=%d, " 7457 "wasvolpmd=%d, flags=%x, path=%s\n", pmf, 7458 ip->ni_noinvolpm, ip->ni_volpmd, 7459 ip->ni_wasvolpmd, ip->ni_flags, ip->ni_path)) 7460 /* 7461 * free the entry in hopes the list will now be empty 7462 * and we won't have to search it any more until the 7463 * device detaches 7464 */ 7465 if (pp) { 7466 PMD(PMD_NOINVOL, ("%s: free %s, prev %s\n", 7467 pmf, ip->ni_path, pp->ni_path)) 7468 pp->ni_next = ip->ni_next; 7469 } else { 7470 PMD(PMD_NOINVOL, ("%s: free %s head\n", 7471 pmf, ip->ni_path)) 7472 ASSERT(pm_noinvol_head == ip); 7473 pm_noinvol_head = ip->ni_next; 7474 } 7475 PM_UNLOCK_DIP(dip); 7476 wasvolpmd = ip->ni_wasvolpmd; 7477 rw_exit(&pm_noinvol_rwlock); 7478 kmem_free(ip->ni_path, ip->ni_size); 7479 kmem_free(ip, sizeof (*ip)); 7480 /* 7481 * Now walk up the tree decrementing devi_pm_noinvolpm 7482 * (and volpmd if appropriate) 7483 */ 7484 (void) pm_noinvol_update(PM_BP_NOINVOL_ATTACH, 0, 7485 wasvolpmd, pathbuf, dip); 7486 #ifdef DEBUG 7487 if (pm_debug & PMD_NOINVOL) 7488 pr_noinvol("noinvol_specd exit"); 7489 #endif 7490 kmem_free(pathbuf, MAXPATHLEN); 7491 return; 7492 } 7493 } 7494 kmem_free(pathbuf, MAXPATHLEN); 7495 rw_exit(&pm_noinvol_rwlock); 7496 PM_UNLOCK_DIP(dip); 7497 } 7498 7499 int 7500 pm_all_components_off(dev_info_t *dip) 7501 { 7502 int i; 7503 pm_component_t *cp; 7504 7505 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 7506 cp = PM_CP(dip, i); 7507 if (cp->pmc_cur_pwr == PM_LEVEL_UNKNOWN || 7508 cp->pmc_comp.pmc_lvals[cp->pmc_cur_pwr]) 7509 return (0); 7510 } 7511 return (1); /* all off */ 7512 } 7513 7514 /* 7515 * Make sure that all "no involuntary power cycles" devices are attached. 7516 * Called before doing a cpr suspend to make sure the driver has a say about 7517 * the power cycle 7518 */ 7519 int 7520 pm_reattach_noinvol(void) 7521 { 7522 PMD_FUNC(pmf, "reattach_noinvol") 7523 pm_noinvol_t *ip; 7524 char *path; 7525 dev_info_t *dip; 7526 7527 /* 7528 * Prevent the modunload thread from unloading any modules until we 7529 * have completely stopped all kernel threads. 7530 */ 7531 modunload_disable(); 7532 for (ip = pm_noinvol_head; ip; ip = ip->ni_next) { 7533 /* 7534 * Forget we'v ever seen any entry 7535 */ 7536 ip->ni_persistent = 0; 7537 } 7538 restart: 7539 rw_enter(&pm_noinvol_rwlock, RW_READER); 7540 for (ip = pm_noinvol_head; ip; ip = ip->ni_next) { 7541 #ifdef PMDDEBUG 7542 major_t maj; 7543 maj = ip->ni_major; 7544 #endif 7545 path = ip->ni_path; 7546 if (path != NULL && !(ip->ni_flags & PMC_DRIVER_REMOVED)) { 7547 if (ip->ni_persistent) { 7548 /* 7549 * If we weren't able to make this entry 7550 * go away, then we give up, as 7551 * holding/attaching the driver ought to have 7552 * resulted in this entry being deleted 7553 */ 7554 PMD(PMD_NOINVOL, ("%s: can't reattach %s " 7555 "(%s|%d)\n", pmf, ip->ni_path, 7556 ddi_major_to_name(maj), (int)maj)) 7557 cmn_err(CE_WARN, "cpr: unable to reattach %s ", 7558 ip->ni_path); 7559 modunload_enable(); 7560 rw_exit(&pm_noinvol_rwlock); 7561 return (0); 7562 } 7563 ip->ni_persistent++; 7564 rw_exit(&pm_noinvol_rwlock); 7565 PMD(PMD_NOINVOL, ("%s: holding %s\n", pmf, path)) 7566 dip = e_ddi_hold_devi_by_path(path, 0); 7567 if (dip == NULL) { 7568 PMD(PMD_NOINVOL, ("%s: can't hold (%s|%d)\n", 7569 pmf, path, (int)maj)) 7570 cmn_err(CE_WARN, "cpr: unable to hold %s " 7571 "driver", path); 7572 modunload_enable(); 7573 return (0); 7574 } else { 7575 PMD(PMD_DHR, ("%s: release %s\n", pmf, path)) 7576 /* 7577 * Since the modunload thread is stopped, we 7578 * don't have to keep the driver held, which 7579 * saves a ton of bookkeeping 7580 */ 7581 ddi_release_devi(dip); 7582 goto restart; 7583 } 7584 } else { 7585 PMD(PMD_NOINVOL, ("%s: skip %s; unknown major\n", 7586 pmf, ip->ni_path)) 7587 continue; 7588 } 7589 } 7590 rw_exit(&pm_noinvol_rwlock); 7591 return (1); 7592 } 7593 7594 void 7595 pm_reattach_noinvol_fini(void) 7596 { 7597 modunload_enable(); 7598 } 7599 7600 /* 7601 * Display pm support code 7602 */ 7603 7604 7605 /* 7606 * console frame-buffer power-mgmt gets enabled when debugging 7607 * services are not present or console fbpm override is set 7608 */ 7609 void 7610 pm_cfb_setup(const char *stdout_path) 7611 { 7612 PMD_FUNC(pmf, "cfb_setup") 7613 extern int obpdebug; 7614 char *devname; 7615 dev_info_t *dip; 7616 int devname_len; 7617 extern dev_info_t *fbdip; 7618 7619 /* 7620 * By virtue of this function being called (from consconfig), 7621 * we know stdout is a framebuffer. 7622 */ 7623 stdout_is_framebuffer = 1; 7624 7625 if (obpdebug || (boothowto & RB_DEBUG)) { 7626 if (pm_cfb_override == 0) { 7627 /* 7628 * Console is frame buffer, but we want to suppress 7629 * pm on it because of debugging setup 7630 */ 7631 pm_cfb_enabled = 0; 7632 cmn_err(CE_NOTE, "Kernel debugger present: disabling " 7633 "console power management."); 7634 /* 7635 * however, we still need to know which is the console 7636 * fb in order to suppress pm on it 7637 */ 7638 } else { 7639 cmn_err(CE_WARN, "Kernel debugger present: see " 7640 "kmdb(1) for interaction with power management."); 7641 } 7642 } 7643 #ifdef DEBUG 7644 /* 7645 * IF console is fb and is power managed, don't do prom_printfs from 7646 * pm debug macro 7647 */ 7648 if (pm_cfb_enabled && !pm_debug_to_console) { 7649 if (pm_debug) 7650 prom_printf("pm debug output will be to log only\n"); 7651 pm_divertdebug++; 7652 } 7653 #endif 7654 devname = i_ddi_strdup((char *)stdout_path, KM_SLEEP); 7655 devname_len = strlen(devname) + 1; 7656 PMD(PMD_CFB, ("%s: stripped %s\n", pmf, devname)) 7657 /* if the driver is attached */ 7658 if ((dip = fbdip) != NULL) { 7659 PMD(PMD_CFB, ("%s: attached: %s@%s(%s#%d)\n", pmf, 7660 PM_DEVICE(dip))) 7661 /* 7662 * We set up here as if the driver were power manageable in case 7663 * we get a later attach of a pm'able driver (which would result 7664 * in a panic later) 7665 */ 7666 cfb_dip = dip; 7667 DEVI(dip)->devi_pm_flags |= (PMC_CONSOLE_FB | PMC_NO_INVOL); 7668 PMD(PMD_CFB, ("%s: cfb_dip -> %s@%s(%s#%d)\n", pmf, 7669 PM_DEVICE(dip))) 7670 #ifdef DEBUG 7671 if (!(PM_GET_PM_INFO(dip) != NULL && PM_NUMCMPTS(dip))) { 7672 PMD(PMD_CFB, ("%s: %s@%s(%s#%d) not power-managed\n", 7673 pmf, PM_DEVICE(dip))) 7674 } 7675 #endif 7676 } else { 7677 char *ep; 7678 PMD(PMD_CFB, ("%s: pntd %s failed\n", pmf, devname)) 7679 pm_record_invol_path(devname, 7680 (PMC_CONSOLE_FB | PMC_NO_INVOL), 1, 0, 0, 7681 DDI_MAJOR_T_NONE); 7682 for (ep = strrchr(devname, '/'); ep != devname; 7683 ep = strrchr(devname, '/')) { 7684 PMD(PMD_CFB, ("%s: devname %s\n", pmf, devname)) 7685 *ep = '\0'; 7686 dip = pm_name_to_dip(devname, 0); 7687 if (dip != NULL) { 7688 /* 7689 * Walk up the tree incrementing 7690 * devi_pm_noinvolpm 7691 */ 7692 (void) pm_noinvol_update(PM_BP_NOINVOL_CFB, 7693 0, 0, devname, dip); 7694 break; 7695 } else { 7696 pm_record_invol_path(devname, 7697 PMC_NO_INVOL, 1, 0, 0, DDI_MAJOR_T_NONE); 7698 } 7699 } 7700 } 7701 kmem_free(devname, devname_len); 7702 } 7703 7704 void 7705 pm_cfb_rele(void) 7706 { 7707 mutex_enter(&pm_cfb_lock); 7708 /* 7709 * this call isn't using the console any more, it is ok to take it 7710 * down if the count goes to 0 7711 */ 7712 cfb_inuse--; 7713 mutex_exit(&pm_cfb_lock); 7714 } 7715 7716 /* 7717 * software interrupt handler for fbpm; this function exists because we can't 7718 * bring up the frame buffer power from above lock level. So if we need to, 7719 * we instead schedule a softint that runs this routine and takes us into 7720 * debug_enter (a bit delayed from the original request, but avoiding a panic). 7721 */ 7722 static uint_t 7723 pm_cfb_softint(caddr_t int_handler_arg) 7724 { 7725 _NOTE(ARGUNUSED(int_handler_arg)) 7726 int rval = DDI_INTR_UNCLAIMED; 7727 7728 mutex_enter(&pm_cfb_lock); 7729 if (pm_soft_pending) { 7730 mutex_exit(&pm_cfb_lock); 7731 debug_enter((char *)NULL); 7732 /* acquired in debug_enter before calling pm_cfb_trigger */ 7733 pm_cfb_rele(); 7734 mutex_enter(&pm_cfb_lock); 7735 pm_soft_pending = B_FALSE; 7736 mutex_exit(&pm_cfb_lock); 7737 rval = DDI_INTR_CLAIMED; 7738 } else 7739 mutex_exit(&pm_cfb_lock); 7740 7741 return (rval); 7742 } 7743 7744 void 7745 pm_cfb_setup_intr(void) 7746 { 7747 PMD_FUNC(pmf, "cfb_setup_intr") 7748 extern void prom_set_outfuncs(void (*)(void), void (*)(void)); 7749 void pm_cfb_check_and_powerup(void); 7750 7751 mutex_init(&pm_cfb_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL8)); 7752 #ifdef PMDDEBUG 7753 mutex_init(&pm_debug_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL8)); 7754 #endif 7755 7756 if (!stdout_is_framebuffer) { 7757 PMD(PMD_CFB, ("%s: console not fb\n", pmf)) 7758 return; 7759 } 7760 7761 /* 7762 * setup software interrupt handler 7763 */ 7764 if (ddi_add_softintr(ddi_root_node(), DDI_SOFTINT_HIGH, &pm_soft_id, 7765 NULL, NULL, pm_cfb_softint, NULL) != DDI_SUCCESS) 7766 panic("pm: unable to register soft intr."); 7767 7768 prom_set_outfuncs(pm_cfb_check_and_powerup, pm_cfb_rele); 7769 } 7770 7771 /* 7772 * Checks to see if it is safe to write to the console wrt power management 7773 * (i.e. if the console is a framebuffer, then it must be at full power) 7774 * returns 1 when power is off (power-up is needed) 7775 * returns 0 when power is on (power-up not needed) 7776 */ 7777 int 7778 pm_cfb_check_and_hold(void) 7779 { 7780 /* 7781 * cfb_dip is set iff console is a power manageable frame buffer 7782 * device 7783 */ 7784 extern int modrootloaded; 7785 7786 mutex_enter(&pm_cfb_lock); 7787 cfb_inuse++; 7788 ASSERT(cfb_inuse); /* wrap? */ 7789 if (modrootloaded && cfb_dip) { 7790 /* 7791 * don't power down the frame buffer, the prom is using it 7792 */ 7793 if (pm_cfb_comps_off) { 7794 mutex_exit(&pm_cfb_lock); 7795 return (1); 7796 } 7797 } 7798 mutex_exit(&pm_cfb_lock); 7799 return (0); 7800 } 7801 7802 /* 7803 * turn on cfb power (which is known to be off). 7804 * Must be called below lock level! 7805 */ 7806 void 7807 pm_cfb_powerup(void) 7808 { 7809 pm_info_t *info; 7810 int norm; 7811 int ccount, ci; 7812 int unused; 7813 #ifdef DEBUG 7814 /* 7815 * Can't reenter prom_prekern, so suppress pm debug messages 7816 * (still go to circular buffer). 7817 */ 7818 mutex_enter(&pm_debug_lock); 7819 pm_divertdebug++; 7820 mutex_exit(&pm_debug_lock); 7821 #endif 7822 info = PM_GET_PM_INFO(cfb_dip); 7823 ASSERT(info); 7824 7825 ccount = PM_NUMCMPTS(cfb_dip); 7826 for (ci = 0; ci < ccount; ci++) { 7827 norm = pm_get_normal_power(cfb_dip, ci); 7828 (void) pm_set_power(cfb_dip, ci, norm, PM_LEVEL_UPONLY, 7829 PM_CANBLOCK_BYPASS, 0, &unused); 7830 } 7831 #ifdef DEBUG 7832 mutex_enter(&pm_debug_lock); 7833 pm_divertdebug--; 7834 mutex_exit(&pm_debug_lock); 7835 #endif 7836 } 7837 7838 /* 7839 * Check if the console framebuffer is powered up. If not power it up. 7840 * Note: Calling pm_cfb_check_and_hold has put a hold on the power state which 7841 * must be released by calling pm_cfb_rele when the console fb operation 7842 * is completed. 7843 */ 7844 void 7845 pm_cfb_check_and_powerup(void) 7846 { 7847 if (pm_cfb_check_and_hold()) 7848 pm_cfb_powerup(); 7849 } 7850 7851 /* 7852 * Trigger a low level interrupt to power up console frame buffer. 7853 */ 7854 void 7855 pm_cfb_trigger(void) 7856 { 7857 if (cfb_dip == NULL) 7858 return; 7859 7860 mutex_enter(&pm_cfb_lock); 7861 /* 7862 * If the machine appears to be hung, pulling the keyboard connector of 7863 * the console will cause a high level interrupt and go to debug_enter. 7864 * But, if the fb is powered down, this routine will be called to bring 7865 * it up (by generating a softint to do the work). If a second attempt 7866 * at triggering this softint happens before the first one completes, 7867 * we panic as softints are most likely not being handled. 7868 */ 7869 if (pm_soft_pending) { 7870 panicstr = "pm_cfb_trigger: failed to enter the debugger"; 7871 panic(panicstr); /* does a power up at any intr level */ 7872 /* NOTREACHED */ 7873 } 7874 pm_soft_pending = B_TRUE; 7875 mutex_exit(&pm_cfb_lock); 7876 ddi_trigger_softintr(pm_soft_id); 7877 } 7878 7879 static major_t i_path_to_major(char *, char *); 7880 7881 major_t 7882 pm_path_to_major(char *path) 7883 { 7884 PMD_FUNC(pmf, "path_to_major") 7885 char *np, *ap, *bp; 7886 major_t ret; 7887 size_t len; 7888 7889 PMD(PMD_NOINVOL, ("%s: %s\n", pmf, path)) 7890 7891 np = strrchr(path, '/'); 7892 if (np != NULL) 7893 np++; 7894 else 7895 np = path; 7896 len = strlen(np) + 1; 7897 bp = kmem_alloc(len, KM_SLEEP); 7898 (void) strcpy(bp, np); 7899 if ((ap = strchr(bp, '@')) != NULL) { 7900 *ap = '\0'; 7901 } 7902 PMD(PMD_NOINVOL, ("%s: %d\n", pmf, ddi_name_to_major(np))) 7903 ret = i_path_to_major(path, np); 7904 kmem_free(bp, len); 7905 return (ret); 7906 } 7907 7908 #ifdef DEBUG 7909 #ifndef sparc 7910 clock_t pt_sleep = 1; 7911 #endif 7912 7913 char *pm_msgp; 7914 char *pm_bufend; 7915 char *pm_msgbuf = NULL; 7916 int pm_logpages = 0x100; 7917 #include <sys/sunldi.h> 7918 #include <sys/uio.h> 7919 clock_t pm_log_sleep = 1000; 7920 int pm_extra_cr = 1; 7921 volatile int pm_tty = 1; 7922 7923 #define PMLOGPGS pm_logpages 7924 7925 #if defined(__x86) 7926 void pm_printf(char *s); 7927 #endif 7928 7929 /*PRINTFLIKE1*/ 7930 void 7931 pm_log(const char *fmt, ...) 7932 { 7933 va_list adx; 7934 size_t size; 7935 7936 mutex_enter(&pm_debug_lock); 7937 if (pm_msgbuf == NULL) { 7938 pm_msgbuf = kmem_zalloc(mmu_ptob(PMLOGPGS), KM_SLEEP); 7939 pm_bufend = pm_msgbuf + mmu_ptob(PMLOGPGS) - 1; 7940 pm_msgp = pm_msgbuf; 7941 } 7942 va_start(adx, fmt); 7943 size = vsnprintf(NULL, 0, fmt, adx) + 1; 7944 va_end(adx); 7945 va_start(adx, fmt); 7946 if (size > (pm_bufend - pm_msgp)) { /* wraps */ 7947 bzero(pm_msgp, pm_bufend - pm_msgp); 7948 (void) vsnprintf(pm_msgbuf, size, fmt, adx); 7949 if (!pm_divertdebug) 7950 prom_printf("%s", pm_msgp); 7951 #if defined(__x86) 7952 if (pm_tty) { 7953 pm_printf(pm_msgp); 7954 if (pm_extra_cr) 7955 pm_printf("\r"); 7956 } 7957 #endif 7958 pm_msgp = pm_msgbuf + size; 7959 } else { 7960 (void) vsnprintf(pm_msgp, size, fmt, adx); 7961 #if defined(__x86) 7962 if (pm_tty) { 7963 pm_printf(pm_msgp); 7964 if (pm_extra_cr) 7965 pm_printf("\r"); 7966 } 7967 #endif 7968 if (!pm_divertdebug) 7969 prom_printf("%s", pm_msgp); 7970 pm_msgp += size; 7971 } 7972 va_end(adx); 7973 mutex_exit(&pm_debug_lock); 7974 drv_usecwait((clock_t)pm_log_sleep); 7975 } 7976 #endif /* DEBUG */ 7977 7978 /* 7979 * We want to save the state of any directly pm'd devices over the suspend/ 7980 * resume process so that we can put them back the way the controlling 7981 * process left them. 7982 */ 7983 void 7984 pm_save_direct_levels(void) 7985 { 7986 pm_processes_stopped = 1; 7987 ddi_walk_devs(ddi_root_node(), pm_save_direct_lvl_walk, 0); 7988 } 7989 7990 static int 7991 pm_save_direct_lvl_walk(dev_info_t *dip, void *arg) 7992 { 7993 _NOTE(ARGUNUSED(arg)) 7994 int i; 7995 int *ip; 7996 pm_info_t *info = PM_GET_PM_INFO(dip); 7997 7998 if (!info) 7999 return (DDI_WALK_CONTINUE); 8000 8001 if (PM_ISDIRECT(dip) && !PM_ISBC(dip)) { 8002 if (PM_NUMCMPTS(dip) > 2) { 8003 info->pmi_lp = kmem_alloc(PM_NUMCMPTS(dip) * 8004 sizeof (int), KM_SLEEP); 8005 ip = info->pmi_lp; 8006 } else { 8007 ip = info->pmi_levels; 8008 } 8009 /* autopm and processes are stopped, ok not to lock power */ 8010 for (i = 0; i < PM_NUMCMPTS(dip); i++) 8011 *ip++ = PM_CURPOWER(dip, i); 8012 /* 8013 * There is a small window between stopping the 8014 * processes and setting pm_processes_stopped where 8015 * a driver could get hung up in a pm_raise_power() 8016 * call. Free any such driver now. 8017 */ 8018 pm_proceed(dip, PMP_RELEASE, -1, -1); 8019 } 8020 8021 return (DDI_WALK_CONTINUE); 8022 } 8023 8024 void 8025 pm_restore_direct_levels(void) 8026 { 8027 /* 8028 * If cpr didn't call pm_save_direct_levels, (because stopping user 8029 * threads failed) then we don't want to try to restore them 8030 */ 8031 if (!pm_processes_stopped) 8032 return; 8033 8034 ddi_walk_devs(ddi_root_node(), pm_restore_direct_lvl_walk, 0); 8035 pm_processes_stopped = 0; 8036 } 8037 8038 static int 8039 pm_restore_direct_lvl_walk(dev_info_t *dip, void *arg) 8040 { 8041 _NOTE(ARGUNUSED(arg)) 8042 PMD_FUNC(pmf, "restore_direct_lvl_walk") 8043 int i, nc, result; 8044 int *ip; 8045 8046 pm_info_t *info = PM_GET_PM_INFO(dip); 8047 if (!info) 8048 return (DDI_WALK_CONTINUE); 8049 8050 if (PM_ISDIRECT(dip) && !PM_ISBC(dip)) { 8051 if ((nc = PM_NUMCMPTS(dip)) > 2) { 8052 ip = &info->pmi_lp[nc - 1]; 8053 } else { 8054 ip = &info->pmi_levels[nc - 1]; 8055 } 8056 /* 8057 * Because fb drivers fail attempts to turn off the 8058 * fb when the monitor is on, but treat a request to 8059 * turn on the monitor as a request to turn on the 8060 * fb too, we process components in descending order 8061 * Because autopm is disabled and processes aren't 8062 * running, it is ok to examine current power outside 8063 * of the power lock 8064 */ 8065 for (i = nc - 1; i >= 0; i--, ip--) { 8066 if (PM_CURPOWER(dip, i) == *ip) 8067 continue; 8068 if (pm_set_power(dip, i, *ip, PM_LEVEL_EXACT, 8069 PM_CANBLOCK_BYPASS, 0, &result) != DDI_SUCCESS) { 8070 cmn_err(CE_WARN, "cpr: unable " 8071 "to restore power level of " 8072 "component %d of directly " 8073 "power manged device %s@%s" 8074 " to %d", 8075 i, PM_NAME(dip), 8076 PM_ADDR(dip), *ip); 8077 PMD(PMD_FAIL, ("%s: failed to restore " 8078 "%s@%s(%s#%d)[%d] exact(%d)->%d, " 8079 "errno %d\n", pmf, PM_DEVICE(dip), i, 8080 PM_CURPOWER(dip, i), *ip, result)) 8081 } 8082 } 8083 if (nc > 2) { 8084 kmem_free(info->pmi_lp, nc * sizeof (int)); 8085 info->pmi_lp = NULL; 8086 } 8087 } 8088 return (DDI_WALK_CONTINUE); 8089 } 8090 8091 /* 8092 * Stolen from the bootdev module 8093 * attempt to convert a path to a major number 8094 */ 8095 static major_t 8096 i_path_to_major(char *path, char *leaf_name) 8097 { 8098 extern major_t path_to_major(char *pathname); 8099 major_t maj; 8100 8101 if ((maj = path_to_major(path)) == DDI_MAJOR_T_NONE) { 8102 maj = ddi_name_to_major(leaf_name); 8103 } 8104 8105 return (maj); 8106 } 8107 8108 static void i_pm_driver_removed(major_t major); 8109 8110 /* 8111 * When user calls rem_drv, we need to forget no-involuntary-power-cycles state 8112 * An entry in the list means that the device is detached, so we need to 8113 * adjust its ancestors as if they had just seen this attach, and any detached 8114 * ancestors need to have their list entries adjusted. 8115 */ 8116 void 8117 pm_driver_removed(major_t major) 8118 { 8119 8120 /* 8121 * Serialize removal of drivers. This is to keep ancestors of 8122 * a node that is being deleted from getting deleted and added back 8123 * with different counters. 8124 */ 8125 mutex_enter(&pm_remdrv_lock); 8126 i_pm_driver_removed(major); 8127 mutex_exit(&pm_remdrv_lock); 8128 } 8129 8130 static void adjust_ancestors(char *, int); 8131 static int pm_is_noinvol_ancestor(pm_noinvol_t *); 8132 static void pm_noinvol_process_ancestors(char *); 8133 8134 /* 8135 * This routine is called recursively by pm_noinvol_process_ancestors() 8136 */ 8137 static void 8138 i_pm_driver_removed(major_t major) 8139 { 8140 PMD_FUNC(pmf, "driver_removed") 8141 pm_noinvol_t *ip, *pp = NULL; 8142 int wasvolpmd; 8143 ASSERT(major != DDI_MAJOR_T_NONE); 8144 PMD(PMD_NOINVOL, ("%s: %s\n", pmf, ddi_major_to_name(major))) 8145 again: 8146 rw_enter(&pm_noinvol_rwlock, RW_WRITER); 8147 for (ip = pm_noinvol_head; ip; pp = ip, ip = ip->ni_next) { 8148 if (major != ip->ni_major) 8149 continue; 8150 /* 8151 * If it is an ancestor of no-invol node, which is 8152 * not removed, skip it. This is to cover the case of 8153 * ancestor removed without removing its descendants. 8154 */ 8155 if (pm_is_noinvol_ancestor(ip)) { 8156 ip->ni_flags |= PMC_DRIVER_REMOVED; 8157 continue; 8158 } 8159 wasvolpmd = ip->ni_wasvolpmd; 8160 /* 8161 * remove the entry from the list 8162 */ 8163 if (pp) { 8164 PMD(PMD_NOINVOL, ("%s: freeing %s, prev is %s\n", 8165 pmf, ip->ni_path, pp->ni_path)) 8166 pp->ni_next = ip->ni_next; 8167 } else { 8168 PMD(PMD_NOINVOL, ("%s: free %s head\n", pmf, 8169 ip->ni_path)) 8170 ASSERT(pm_noinvol_head == ip); 8171 pm_noinvol_head = ip->ni_next; 8172 } 8173 rw_exit(&pm_noinvol_rwlock); 8174 adjust_ancestors(ip->ni_path, wasvolpmd); 8175 /* 8176 * Had an ancestor been removed before this node, it would have 8177 * been skipped. Adjust the no-invol counters for such skipped 8178 * ancestors. 8179 */ 8180 pm_noinvol_process_ancestors(ip->ni_path); 8181 kmem_free(ip->ni_path, ip->ni_size); 8182 kmem_free(ip, sizeof (*ip)); 8183 goto again; 8184 } 8185 rw_exit(&pm_noinvol_rwlock); 8186 } 8187 8188 /* 8189 * returns 1, if *aip is a ancestor of a no-invol node 8190 * 0, otherwise 8191 */ 8192 static int 8193 pm_is_noinvol_ancestor(pm_noinvol_t *aip) 8194 { 8195 pm_noinvol_t *ip; 8196 8197 ASSERT(strlen(aip->ni_path) != 0); 8198 for (ip = pm_noinvol_head; ip; ip = ip->ni_next) { 8199 if (ip == aip) 8200 continue; 8201 /* 8202 * To be an ancestor, the path must be an initial substring of 8203 * the descendent, and end just before a '/' in the 8204 * descendent's path. 8205 */ 8206 if ((strstr(ip->ni_path, aip->ni_path) == ip->ni_path) && 8207 (ip->ni_path[strlen(aip->ni_path)] == '/')) 8208 return (1); 8209 } 8210 return (0); 8211 } 8212 8213 /* 8214 * scan through the pm_noinvolpm list adjusting ancestors of the current 8215 * node; Modifies string *path. 8216 */ 8217 static void 8218 adjust_ancestors(char *path, int wasvolpmd) 8219 { 8220 PMD_FUNC(pmf, "adjust_ancestors") 8221 char *cp; 8222 pm_noinvol_t *lp; 8223 pm_noinvol_t *pp = NULL; 8224 major_t locked = DDI_MAJOR_T_NONE; 8225 dev_info_t *dip; 8226 char *pathbuf; 8227 size_t pathbuflen = strlen(path) + 1; 8228 8229 /* 8230 * First we look up the ancestor's dip. If we find it, then we 8231 * adjust counts up the tree 8232 */ 8233 PMD(PMD_NOINVOL, ("%s: %s wasvolpmd %d\n", pmf, path, wasvolpmd)) 8234 pathbuf = kmem_alloc(pathbuflen, KM_SLEEP); 8235 (void) strcpy(pathbuf, path); 8236 cp = strrchr(pathbuf, '/'); 8237 if (cp == NULL) { 8238 /* if no ancestors, then nothing to do */ 8239 kmem_free(pathbuf, pathbuflen); 8240 return; 8241 } 8242 *cp = '\0'; 8243 dip = pm_name_to_dip(pathbuf, 1); 8244 if (dip != NULL) { 8245 locked = PM_MAJOR(dip); 8246 8247 (void) pm_noinvol_update(PM_BP_NOINVOL_REMDRV, 0, wasvolpmd, 8248 path, dip); 8249 8250 if (locked != DDI_MAJOR_T_NONE) 8251 ddi_release_devi(dip); 8252 } else { 8253 char *apath; 8254 size_t len = strlen(pathbuf) + 1; 8255 int lock_held = 1; 8256 8257 /* 8258 * Now check for ancestors that exist only in the list 8259 */ 8260 apath = kmem_alloc(len, KM_SLEEP); 8261 (void) strcpy(apath, pathbuf); 8262 rw_enter(&pm_noinvol_rwlock, RW_WRITER); 8263 for (lp = pm_noinvol_head; lp; pp = lp, lp = lp->ni_next) { 8264 /* 8265 * This can only happen once. Since we have to drop 8266 * the lock, we need to extract the relevant info. 8267 */ 8268 if (strcmp(pathbuf, lp->ni_path) == 0) { 8269 PMD(PMD_NOINVOL, ("%s: %s no %d -> %d\n", pmf, 8270 lp->ni_path, lp->ni_noinvolpm, 8271 lp->ni_noinvolpm - 1)) 8272 lp->ni_noinvolpm--; 8273 if (wasvolpmd && lp->ni_volpmd) { 8274 PMD(PMD_NOINVOL, ("%s: %s vol %d -> " 8275 "%d\n", pmf, lp->ni_path, 8276 lp->ni_volpmd, lp->ni_volpmd - 1)) 8277 lp->ni_volpmd--; 8278 } 8279 /* 8280 * remove the entry from the list, if there 8281 * are no more no-invol descendants and node 8282 * itself is not a no-invol node. 8283 */ 8284 if (!(lp->ni_noinvolpm || 8285 (lp->ni_flags & PMC_NO_INVOL))) { 8286 ASSERT(lp->ni_volpmd == 0); 8287 if (pp) { 8288 PMD(PMD_NOINVOL, ("%s: freeing " 8289 "%s, prev is %s\n", pmf, 8290 lp->ni_path, pp->ni_path)) 8291 pp->ni_next = lp->ni_next; 8292 } else { 8293 PMD(PMD_NOINVOL, ("%s: free %s " 8294 "head\n", pmf, lp->ni_path)) 8295 ASSERT(pm_noinvol_head == lp); 8296 pm_noinvol_head = lp->ni_next; 8297 } 8298 lock_held = 0; 8299 rw_exit(&pm_noinvol_rwlock); 8300 adjust_ancestors(apath, wasvolpmd); 8301 /* restore apath */ 8302 (void) strcpy(apath, pathbuf); 8303 kmem_free(lp->ni_path, lp->ni_size); 8304 kmem_free(lp, sizeof (*lp)); 8305 } 8306 break; 8307 } 8308 } 8309 if (lock_held) 8310 rw_exit(&pm_noinvol_rwlock); 8311 adjust_ancestors(apath, wasvolpmd); 8312 kmem_free(apath, len); 8313 } 8314 kmem_free(pathbuf, pathbuflen); 8315 } 8316 8317 /* 8318 * Do no-invol processing for any ancestors i.e. adjust counters of ancestors, 8319 * which were skipped even though their drivers were removed. 8320 */ 8321 static void 8322 pm_noinvol_process_ancestors(char *path) 8323 { 8324 pm_noinvol_t *lp; 8325 8326 rw_enter(&pm_noinvol_rwlock, RW_READER); 8327 for (lp = pm_noinvol_head; lp; lp = lp->ni_next) { 8328 if (strstr(path, lp->ni_path) && 8329 (lp->ni_flags & PMC_DRIVER_REMOVED)) { 8330 rw_exit(&pm_noinvol_rwlock); 8331 i_pm_driver_removed(lp->ni_major); 8332 return; 8333 } 8334 } 8335 rw_exit(&pm_noinvol_rwlock); 8336 } 8337 8338 /* 8339 * Returns true if (detached) device needs to be kept up because it exported the 8340 * "no-involuntary-power-cycles" property or we're pretending it did (console 8341 * fb case) or it is an ancestor of such a device and has used up the "one 8342 * free cycle" allowed when all such leaf nodes have voluntarily powered down 8343 * upon detach. In any event, we need an exact hit on the path or we return 8344 * false. 8345 */ 8346 int 8347 pm_noinvol_detached(char *path) 8348 { 8349 PMD_FUNC(pmf, "noinvol_detached") 8350 pm_noinvol_t *ip; 8351 int ret = 0; 8352 8353 rw_enter(&pm_noinvol_rwlock, RW_READER); 8354 for (ip = pm_noinvol_head; ip; ip = ip->ni_next) { 8355 if (strcmp(path, ip->ni_path) == 0) { 8356 if (ip->ni_flags & PMC_CONSOLE_FB) { 8357 PMD(PMD_NOINVOL | PMD_CFB, ("%s: inhibits CFB " 8358 "%s\n", pmf, path)) 8359 ret = 1; 8360 break; 8361 } 8362 #ifdef DEBUG 8363 if (ip->ni_noinvolpm != ip->ni_volpmd) 8364 PMD(PMD_NOINVOL, ("%s: (%d != %d) inhibits %s" 8365 "\n", pmf, ip->ni_noinvolpm, ip->ni_volpmd, 8366 path)) 8367 #endif 8368 ret = (ip->ni_noinvolpm != ip->ni_volpmd); 8369 break; 8370 } 8371 } 8372 rw_exit(&pm_noinvol_rwlock); 8373 return (ret); 8374 } 8375 8376 int 8377 pm_is_cfb(dev_info_t *dip) 8378 { 8379 return (dip == cfb_dip); 8380 } 8381 8382 #ifdef DEBUG 8383 /* 8384 * Return true if all components of the console frame buffer are at 8385 * "normal" power, i.e., fully on. For the case where the console is not 8386 * a framebuffer, we also return true 8387 */ 8388 int 8389 pm_cfb_is_up(void) 8390 { 8391 return (pm_cfb_comps_off == 0); 8392 } 8393 #endif 8394 8395 /* 8396 * Preventing scan from powering down the node by incrementing the 8397 * kidsupcnt. 8398 */ 8399 void 8400 pm_hold_power(dev_info_t *dip) 8401 { 8402 e_pm_hold_rele_power(dip, 1); 8403 } 8404 8405 /* 8406 * Releasing the hold by decrementing the kidsupcnt allowing scan 8407 * to power down the node if all conditions are met. 8408 */ 8409 void 8410 pm_rele_power(dev_info_t *dip) 8411 { 8412 e_pm_hold_rele_power(dip, -1); 8413 } 8414 8415 /* 8416 * A wrapper of pm_all_to_normal() to power up a dip 8417 * to its normal level 8418 */ 8419 int 8420 pm_powerup(dev_info_t *dip) 8421 { 8422 PMD_FUNC(pmf, "pm_powerup") 8423 8424 PMD(PMD_ALLNORM, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 8425 ASSERT(!(servicing_interrupt())); 8426 8427 /* 8428 * in case this node is not already participating pm 8429 */ 8430 if (!PM_GET_PM_INFO(dip)) { 8431 if (!DEVI_IS_ATTACHING(dip)) 8432 return (DDI_SUCCESS); 8433 if (pm_start(dip) != DDI_SUCCESS) 8434 return (DDI_FAILURE); 8435 if (!PM_GET_PM_INFO(dip)) 8436 return (DDI_SUCCESS); 8437 } 8438 8439 return (pm_all_to_normal(dip, PM_CANBLOCK_BLOCK)); 8440 } 8441 8442 int 8443 pm_rescan_walk(dev_info_t *dip, void *arg) 8444 { 8445 _NOTE(ARGUNUSED(arg)) 8446 8447 if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) 8448 return (DDI_WALK_CONTINUE); 8449 8450 /* 8451 * Currently pm_cpr_callb/resume code is the only caller 8452 * and it needs to make sure that stopped scan get 8453 * reactivated. Otherwise, rescan walk needn't reactive 8454 * stopped scan. 8455 */ 8456 pm_scan_init(dip); 8457 8458 (void) pm_rescan(dip); 8459 return (DDI_WALK_CONTINUE); 8460 } 8461 8462 static dev_info_t * 8463 pm_get_next_descendent(dev_info_t *dip, dev_info_t *tdip) 8464 { 8465 dev_info_t *wdip, *pdip; 8466 8467 for (wdip = tdip; wdip != dip; wdip = pdip) { 8468 pdip = ddi_get_parent(wdip); 8469 if (pdip == dip) 8470 return (wdip); 8471 } 8472 return (NULL); 8473 } 8474 8475 int 8476 pm_busop_bus_power(dev_info_t *dip, void *impl_arg, pm_bus_power_op_t op, 8477 void *arg, void *result) 8478 { 8479 PMD_FUNC(pmf, "bp_bus_power") 8480 dev_info_t *cdip; 8481 pm_info_t *cinfo; 8482 pm_bp_child_pwrchg_t *bpc; 8483 pm_sp_misc_t *pspm; 8484 pm_bp_nexus_pwrup_t *bpn; 8485 pm_bp_child_pwrchg_t new_bpc; 8486 pm_bp_noinvol_t *bpi; 8487 dev_info_t *tdip; 8488 char *pathbuf; 8489 int ret = DDI_SUCCESS; 8490 int errno = 0; 8491 pm_component_t *cp; 8492 8493 PMD(PMD_SET, ("%s: %s@%s(%s#%d) %s\n", pmf, PM_DEVICE(dip), 8494 pm_decode_op(op))) 8495 switch (op) { 8496 case BUS_POWER_CHILD_PWRCHG: 8497 bpc = (pm_bp_child_pwrchg_t *)arg; 8498 pspm = (pm_sp_misc_t *)bpc->bpc_private; 8499 tdip = bpc->bpc_dip; 8500 cdip = pm_get_next_descendent(dip, tdip); 8501 cinfo = PM_GET_PM_INFO(cdip); 8502 if (cdip != tdip) { 8503 /* 8504 * If the node is an involved parent, it needs to 8505 * power up the node as it is needed. There is nothing 8506 * else the framework can do here. 8507 */ 8508 if (PM_WANTS_NOTIFICATION(cdip)) { 8509 PMD(PMD_SET, ("%s: call bus_power for " 8510 "%s@%s(%s#%d)\n", pmf, PM_DEVICE(cdip))) 8511 return ((*PM_BUS_POWER_FUNC(cdip))(cdip, 8512 impl_arg, op, arg, result)); 8513 } 8514 ASSERT(pspm->pspm_direction == PM_LEVEL_UPONLY || 8515 pspm->pspm_direction == PM_LEVEL_DOWNONLY || 8516 pspm->pspm_direction == PM_LEVEL_EXACT); 8517 /* 8518 * we presume that the parent needs to be up in 8519 * order for the child to change state (either 8520 * because it must already be on if the child is on 8521 * (and the pm_all_to_normal_nexus() will be a nop) 8522 * or because it will need to be on for the child 8523 * to come on; so we make the call regardless 8524 */ 8525 pm_hold_power(cdip); 8526 if (cinfo) { 8527 pm_canblock_t canblock = pspm->pspm_canblock; 8528 ret = pm_all_to_normal_nexus(cdip, canblock); 8529 if (ret != DDI_SUCCESS) { 8530 pm_rele_power(cdip); 8531 return (ret); 8532 } 8533 } 8534 PMD(PMD_SET, ("%s: walk down to %s@%s(%s#%d)\n", pmf, 8535 PM_DEVICE(cdip))) 8536 ret = pm_busop_bus_power(cdip, impl_arg, op, arg, 8537 result); 8538 pm_rele_power(cdip); 8539 } else { 8540 ret = pm_busop_set_power(cdip, impl_arg, op, arg, 8541 result); 8542 } 8543 return (ret); 8544 8545 case BUS_POWER_NEXUS_PWRUP: 8546 bpn = (pm_bp_nexus_pwrup_t *)arg; 8547 pspm = (pm_sp_misc_t *)bpn->bpn_private; 8548 8549 if (!e_pm_valid_info(dip, NULL) || 8550 !e_pm_valid_comp(dip, bpn->bpn_comp, &cp) || 8551 !e_pm_valid_power(dip, bpn->bpn_comp, bpn->bpn_level)) { 8552 PMD(PMD_SET, ("%s: %s@%s(%s#%d) has no pm info; EIO\n", 8553 pmf, PM_DEVICE(dip))) 8554 *pspm->pspm_errnop = EIO; 8555 *(int *)result = DDI_FAILURE; 8556 return (DDI_FAILURE); 8557 } 8558 8559 ASSERT(bpn->bpn_dip == dip); 8560 PMD(PMD_SET, ("%s: nexus powerup for %s@%s(%s#%d)\n", pmf, 8561 PM_DEVICE(dip))) 8562 new_bpc.bpc_dip = dip; 8563 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 8564 new_bpc.bpc_path = ddi_pathname(dip, pathbuf); 8565 new_bpc.bpc_comp = bpn->bpn_comp; 8566 new_bpc.bpc_olevel = PM_CURPOWER(dip, bpn->bpn_comp); 8567 new_bpc.bpc_nlevel = bpn->bpn_level; 8568 new_bpc.bpc_private = bpn->bpn_private; 8569 ((pm_sp_misc_t *)(new_bpc.bpc_private))->pspm_direction = 8570 PM_LEVEL_UPONLY; 8571 ((pm_sp_misc_t *)(new_bpc.bpc_private))->pspm_errnop = 8572 &errno; 8573 ret = pm_busop_set_power(dip, impl_arg, BUS_POWER_CHILD_PWRCHG, 8574 (void *)&new_bpc, result); 8575 kmem_free(pathbuf, MAXPATHLEN); 8576 return (ret); 8577 8578 case BUS_POWER_NOINVOL: 8579 bpi = (pm_bp_noinvol_t *)arg; 8580 tdip = bpi->bpni_dip; 8581 cdip = pm_get_next_descendent(dip, tdip); 8582 8583 /* In case of rem_drv, the leaf node has been removed */ 8584 if (cdip == NULL) 8585 return (DDI_SUCCESS); 8586 8587 cinfo = PM_GET_PM_INFO(cdip); 8588 if (cdip != tdip) { 8589 if (PM_WANTS_NOTIFICATION(cdip)) { 8590 PMD(PMD_NOINVOL, 8591 ("%s: call bus_power for %s@%s(%s#%d)\n", 8592 pmf, PM_DEVICE(cdip))) 8593 ret = (*PM_BUS_POWER_FUNC(cdip)) 8594 (cdip, NULL, op, arg, result); 8595 if ((cinfo) && (ret == DDI_SUCCESS)) 8596 (void) pm_noinvol_update_node(cdip, 8597 bpi); 8598 return (ret); 8599 } else { 8600 PMD(PMD_NOINVOL, 8601 ("%s: walk down to %s@%s(%s#%d)\n", pmf, 8602 PM_DEVICE(cdip))) 8603 ret = pm_busop_bus_power(cdip, NULL, op, 8604 arg, result); 8605 /* 8606 * Update the current node. 8607 */ 8608 if ((cinfo) && (ret == DDI_SUCCESS)) 8609 (void) pm_noinvol_update_node(cdip, 8610 bpi); 8611 return (ret); 8612 } 8613 } else { 8614 /* 8615 * For attach, detach, power up: 8616 * Do nothing for leaf node since its 8617 * counts are already updated. 8618 * For CFB and driver removal, since the 8619 * path and the target dip passed in is up to and incl. 8620 * the immediate ancestor, need to do the update. 8621 */ 8622 PMD(PMD_NOINVOL, ("%s: target %s@%s(%s#%d) is " 8623 "reached\n", pmf, PM_DEVICE(cdip))) 8624 if (cinfo && ((bpi->bpni_cmd == PM_BP_NOINVOL_REMDRV) || 8625 (bpi->bpni_cmd == PM_BP_NOINVOL_CFB))) 8626 (void) pm_noinvol_update_node(cdip, bpi); 8627 return (DDI_SUCCESS); 8628 } 8629 8630 default: 8631 PMD(PMD_SET, ("%s: operation %d is not supported!\n", pmf, op)) 8632 return (DDI_FAILURE); 8633 } 8634 } 8635 8636 static int 8637 pm_busop_set_power(dev_info_t *dip, void *impl_arg, pm_bus_power_op_t op, 8638 void *arg, void *resultp) 8639 { 8640 _NOTE(ARGUNUSED(impl_arg)) 8641 PMD_FUNC(pmf, "bp_set_power") 8642 pm_ppm_devlist_t *devl = NULL; 8643 int clevel; 8644 int ret = DDI_SUCCESS; 8645 dev_info_t *cdip; 8646 pm_bp_child_pwrchg_t *bpc = (pm_bp_child_pwrchg_t *)arg; 8647 pm_sp_misc_t *pspm = (pm_sp_misc_t *)bpc->bpc_private; 8648 pm_canblock_t canblock = pspm->pspm_canblock; 8649 int scan = pspm->pspm_scan; 8650 int comp = bpc->bpc_comp; 8651 int olevel = bpc->bpc_olevel; 8652 int nlevel = bpc->bpc_nlevel; 8653 int comps_off_incr = 0; 8654 dev_info_t *pdip = ddi_get_parent(dip); 8655 int dodeps; 8656 int direction = pspm->pspm_direction; 8657 int *errnop = pspm->pspm_errnop; 8658 #ifdef PMDDEBUG 8659 char *dir = pm_decode_direction(direction); 8660 #endif 8661 int *iresp = (int *)resultp; 8662 time_t idletime, thresh; 8663 pm_component_t *cp = PM_CP(dip, comp); 8664 int work_type; 8665 8666 *iresp = DDI_SUCCESS; 8667 *errnop = 0; 8668 ASSERT(op == BUS_POWER_CHILD_PWRCHG); 8669 PMD(PMD_SET, ("%s: %s@%s(%s#%d) %s\n", pmf, PM_DEVICE(dip), 8670 pm_decode_op(op))) 8671 8672 /* 8673 * The following set of conditions indicate we are here to handle a 8674 * driver's pm_[raise|lower]_power request, but the device is being 8675 * power managed (PM_DIRECT_PM) by a user process. For that case 8676 * we want to pm_block and pass a status back to the caller based 8677 * on whether the controlling process's next activity on the device 8678 * matches the current request or not. This distinction tells 8679 * downstream functions to avoid calling into a driver or changing 8680 * the framework's power state. To actually block, we need: 8681 * 8682 * PM_ISDIRECT(dip) 8683 * no reason to block unless a process is directly controlling dev 8684 * direction != PM_LEVEL_EXACT 8685 * EXACT is used by controlling proc's PM_SET_CURRENT_POWER ioctl 8686 * !pm_processes_stopped 8687 * don't block if controlling proc already be stopped for cpr 8688 * canblock != PM_CANBLOCK_BYPASS 8689 * our caller must not have explicitly prevented blocking 8690 */ 8691 if (direction != PM_LEVEL_EXACT && canblock != PM_CANBLOCK_BYPASS) { 8692 PM_LOCK_DIP(dip); 8693 while (PM_ISDIRECT(dip) && !pm_processes_stopped) { 8694 /* releases dip lock */ 8695 ret = pm_busop_match_request(dip, bpc); 8696 if (ret == EAGAIN) { 8697 PM_LOCK_DIP(dip); 8698 continue; 8699 } 8700 return (*iresp = ret); 8701 } 8702 PM_UNLOCK_DIP(dip); 8703 } 8704 /* BC device is never scanned, so power will stick until we are done */ 8705 if (PM_ISBC(dip) && comp != 0 && nlevel != 0 && 8706 direction != PM_LEVEL_DOWNONLY) { 8707 int nrmpwr0 = pm_get_normal_power(dip, 0); 8708 if (pm_set_power(dip, 0, nrmpwr0, direction, 8709 canblock, 0, resultp) != DDI_SUCCESS) { 8710 /* *resultp set by pm_set_power */ 8711 return (DDI_FAILURE); 8712 } 8713 } 8714 if (PM_WANTS_NOTIFICATION(pdip)) { 8715 PMD(PMD_SET, ("%s: pre_notify %s@%s(%s#%d) for child " 8716 "%s@%s(%s#%d)\n", pmf, PM_DEVICE(pdip), PM_DEVICE(dip))) 8717 ret = (*PM_BUS_POWER_FUNC(pdip))(pdip, NULL, 8718 BUS_POWER_PRE_NOTIFICATION, bpc, resultp); 8719 if (ret != DDI_SUCCESS) { 8720 PMD(PMD_SET, ("%s: failed to pre_notify %s@%s(%s#%d)\n", 8721 pmf, PM_DEVICE(pdip))) 8722 return (DDI_FAILURE); 8723 } 8724 } else { 8725 /* 8726 * Since we don't know what the actual power level is, 8727 * we place a power hold on the parent no matter what 8728 * component and level is changing. 8729 */ 8730 pm_hold_power(pdip); 8731 } 8732 PM_LOCK_POWER(dip); 8733 clevel = PM_CURPOWER(dip, comp); 8734 /* 8735 * It's possible that a call was made to pm_update_maxpower() 8736 * on another thread before we took the lock above. So, we need to 8737 * make sure that this request isn't processed after the 8738 * change of power executed on behalf of pm_update_maxpower(). 8739 */ 8740 if (nlevel > pm_get_normal_power(dip, comp)) { 8741 PMD(PMD_SET, ("%s: requested level is higher than normal.\n", 8742 pmf)) 8743 ret = DDI_FAILURE; 8744 *iresp = DDI_FAILURE; 8745 goto post_notify; 8746 } 8747 PMD(PMD_SET, ("%s: %s@%s(%s#%d), cmp=%d, olvl=%d, nlvl=%d, clvl=%d, " 8748 "dir=%s\n", pmf, PM_DEVICE(dip), comp, bpc->bpc_olevel, nlevel, 8749 clevel, dir)) 8750 switch (direction) { 8751 case PM_LEVEL_UPONLY: 8752 /* Powering up */ 8753 if (clevel >= nlevel) { 8754 PMD(PMD_SET, ("%s: current level is already " 8755 "at or above the requested level.\n", pmf)) 8756 *iresp = DDI_SUCCESS; 8757 ret = DDI_SUCCESS; 8758 goto post_notify; 8759 } 8760 break; 8761 case PM_LEVEL_EXACT: 8762 /* specific level request */ 8763 if (clevel == nlevel && !PM_ISBC(dip)) { 8764 PMD(PMD_SET, ("%s: current level is already " 8765 "at the requested level.\n", pmf)) 8766 *iresp = DDI_SUCCESS; 8767 ret = DDI_SUCCESS; 8768 goto post_notify; 8769 } else if (PM_IS_CFB(dip) && (nlevel < clevel)) { 8770 PMD(PMD_CFB, ("%s: powerdown of console\n", pmf)) 8771 if (!pm_cfb_enabled) { 8772 PMD(PMD_ERROR | PMD_CFB, 8773 ("%s: !pm_cfb_enabled, fails\n", pmf)) 8774 *errnop = EINVAL; 8775 *iresp = DDI_FAILURE; 8776 ret = DDI_FAILURE; 8777 goto post_notify; 8778 } 8779 mutex_enter(&pm_cfb_lock); 8780 while (cfb_inuse) { 8781 mutex_exit(&pm_cfb_lock); 8782 if (delay_sig(1) == EINTR) { 8783 ret = DDI_FAILURE; 8784 *iresp = DDI_FAILURE; 8785 *errnop = EINTR; 8786 goto post_notify; 8787 } 8788 mutex_enter(&pm_cfb_lock); 8789 } 8790 mutex_exit(&pm_cfb_lock); 8791 } 8792 break; 8793 case PM_LEVEL_DOWNONLY: 8794 /* Powering down */ 8795 thresh = cur_threshold(dip, comp); 8796 idletime = gethrestime_sec() - cp->pmc_timestamp; 8797 if (scan && ((PM_KUC(dip) != 0) || 8798 (cp->pmc_busycount > 0) || 8799 ((idletime < thresh) && !PM_IS_PID(dip)))) { 8800 #ifdef DEBUG 8801 if (DEVI(dip)->devi_pm_kidsupcnt != 0) 8802 PMD(PMD_SET, ("%s: scan failed: " 8803 "kidsupcnt != 0\n", pmf)) 8804 if (cp->pmc_busycount > 0) 8805 PMD(PMD_SET, ("%s: scan failed: " 8806 "device become busy\n", pmf)) 8807 if (idletime < thresh) 8808 PMD(PMD_SET, ("%s: scan failed: device " 8809 "hasn't been idle long enough\n", pmf)) 8810 #endif 8811 *iresp = DDI_FAILURE; 8812 *errnop = EBUSY; 8813 ret = DDI_FAILURE; 8814 goto post_notify; 8815 } else if (clevel != PM_LEVEL_UNKNOWN && clevel <= nlevel) { 8816 PMD(PMD_SET, ("%s: current level is already at " 8817 "or below the requested level.\n", pmf)) 8818 *iresp = DDI_SUCCESS; 8819 ret = DDI_SUCCESS; 8820 goto post_notify; 8821 } 8822 break; 8823 } 8824 8825 if (PM_IS_CFB(dip) && (comps_off_incr = 8826 calc_cfb_comps_incr(dip, comp, clevel, nlevel)) > 0) { 8827 /* 8828 * Pre-adjust pm_cfb_comps_off if lowering a console fb 8829 * component from full power. Remember that we tried to 8830 * lower power in case it fails and we need to back out 8831 * the adjustment. 8832 */ 8833 update_comps_off(comps_off_incr, dip); 8834 PMD(PMD_CFB, ("%s: %s@%s(%s#%d)[%d] %d->%d cfb_comps_off->%d\n", 8835 pmf, PM_DEVICE(dip), comp, clevel, nlevel, 8836 pm_cfb_comps_off)) 8837 } 8838 8839 if ((*iresp = power_dev(dip, 8840 comp, nlevel, clevel, canblock, &devl)) == DDI_SUCCESS) { 8841 #ifdef DEBUG 8842 /* 8843 * All descendents of this node should already be powered off. 8844 */ 8845 if (PM_CURPOWER(dip, comp) == 0) { 8846 pm_desc_pwrchk_t pdpchk; 8847 pdpchk.pdpc_dip = dip; 8848 pdpchk.pdpc_par_involved = PM_WANTS_NOTIFICATION(dip); 8849 ndi_devi_enter(dip); 8850 for (cdip = ddi_get_child(dip); cdip != NULL; 8851 cdip = ddi_get_next_sibling(cdip)) { 8852 ndi_devi_enter(cdip); 8853 ddi_walk_devs(cdip, pm_desc_pwrchk_walk, 8854 (void *)&pdpchk); 8855 ndi_devi_exit(cdip); 8856 } 8857 ndi_devi_exit(dip); 8858 } 8859 #endif 8860 /* 8861 * Post-adjust pm_cfb_comps_off if we brought an fb component 8862 * back up to full power. 8863 */ 8864 if (PM_IS_CFB(dip) && comps_off_incr < 0) { 8865 update_comps_off(comps_off_incr, dip); 8866 PMD(PMD_CFB, ("%s: %s@%s(%s#%d)[%d] %d->%d " 8867 "cfb_comps_off->%d\n", pmf, PM_DEVICE(dip), 8868 comp, clevel, nlevel, pm_cfb_comps_off)) 8869 } 8870 dodeps = 0; 8871 if (POWERING_OFF(clevel, nlevel)) { 8872 if (PM_ISBC(dip)) { 8873 dodeps = (comp == 0); 8874 } else { 8875 int i; 8876 dodeps = 1; 8877 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 8878 /* if some component still on */ 8879 if (PM_CURPOWER(dip, i)) { 8880 dodeps = 0; 8881 break; 8882 } 8883 } 8884 } 8885 if (dodeps) 8886 work_type = PM_DEP_WK_POWER_OFF; 8887 } else if (POWERING_ON(clevel, nlevel)) { 8888 if (PM_ISBC(dip)) { 8889 dodeps = (comp == 0); 8890 } else { 8891 int i; 8892 dodeps = 1; 8893 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 8894 if (i == comp) 8895 continue; 8896 if (PM_CURPOWER(dip, i) > 0) { 8897 dodeps = 0; 8898 break; 8899 } 8900 } 8901 } 8902 if (dodeps) 8903 work_type = PM_DEP_WK_POWER_ON; 8904 } 8905 8906 if (dodeps) { 8907 char *pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 8908 8909 (void) ddi_pathname(dip, pathbuf); 8910 pm_dispatch_to_dep_thread(work_type, pathbuf, NULL, 8911 PM_DEP_NOWAIT, NULL, 0); 8912 kmem_free(pathbuf, MAXPATHLEN); 8913 } 8914 if ((PM_CURPOWER(dip, comp) == nlevel) && pm_watchers()) { 8915 int old; 8916 8917 /* If old power cached during deadlock, use it. */ 8918 old = (cp->pmc_flags & PM_PHC_WHILE_SET_POWER ? 8919 cp->pmc_phc_pwr : olevel); 8920 mutex_enter(&pm_rsvp_lock); 8921 pm_enqueue_notify(PSC_HAS_CHANGED, dip, comp, nlevel, 8922 old, canblock); 8923 pm_enqueue_notify_others(&devl, canblock); 8924 mutex_exit(&pm_rsvp_lock); 8925 } else { 8926 pm_ppm_devlist_t *p; 8927 pm_ppm_devlist_t *next; 8928 for (p = devl; p != NULL; p = next) { 8929 next = p->ppd_next; 8930 kmem_free(p, sizeof (pm_ppm_devlist_t)); 8931 } 8932 devl = NULL; 8933 } 8934 8935 /* 8936 * If we are coming from a scan, don't do it again, 8937 * else we can have infinite loops. 8938 */ 8939 if (!scan) 8940 pm_rescan(dip); 8941 } else { 8942 /* if we incremented pm_comps_off_count, but failed */ 8943 if (comps_off_incr > 0) { 8944 update_comps_off(-comps_off_incr, dip); 8945 PMD(PMD_CFB, ("%s: %s@%s(%s#%d)[%d] %d->%d " 8946 "cfb_comps_off->%d\n", pmf, PM_DEVICE(dip), 8947 comp, clevel, nlevel, pm_cfb_comps_off)) 8948 } 8949 *errnop = EIO; 8950 } 8951 8952 post_notify: 8953 /* 8954 * This thread may have been in deadlock with pm_power_has_changed. 8955 * Before releasing power lock, clear the flag which marks this 8956 * condition. 8957 */ 8958 cp->pmc_flags &= ~PM_PHC_WHILE_SET_POWER; 8959 8960 /* 8961 * Update the old power level in the bus power structure with the 8962 * actual power level before the transition was made to the new level. 8963 * Some involved parents depend on this information to keep track of 8964 * their children's power transition. 8965 */ 8966 if (*iresp != DDI_FAILURE) 8967 bpc->bpc_olevel = clevel; 8968 8969 if (PM_WANTS_NOTIFICATION(pdip)) { 8970 ret = (*PM_BUS_POWER_FUNC(pdip))(pdip, NULL, 8971 BUS_POWER_POST_NOTIFICATION, bpc, resultp); 8972 PM_UNLOCK_POWER(dip); 8973 PMD(PMD_SET, ("%s: post_notify %s@%s(%s#%d) for " 8974 "child %s@%s(%s#%d), ret=%d\n", pmf, PM_DEVICE(pdip), 8975 PM_DEVICE(dip), ret)) 8976 } else { 8977 nlevel = cur_power(cp); /* in case phc deadlock updated pwr */ 8978 PM_UNLOCK_POWER(dip); 8979 /* 8980 * Now that we know what power transition has occurred 8981 * (if any), release the power hold. Leave the hold 8982 * in effect in the case of OFF->ON transition. 8983 */ 8984 if (!(clevel == 0 && nlevel > 0 && 8985 (!PM_ISBC(dip) || comp == 0))) 8986 pm_rele_power(pdip); 8987 /* 8988 * If the power transition was an ON->OFF transition, 8989 * remove the power hold from the parent. 8990 */ 8991 if ((clevel > 0 || clevel == PM_LEVEL_UNKNOWN) && 8992 nlevel == 0 && (!PM_ISBC(dip) || comp == 0)) 8993 pm_rele_power(pdip); 8994 } 8995 if (*iresp != DDI_SUCCESS || ret != DDI_SUCCESS) 8996 return (DDI_FAILURE); 8997 else 8998 return (DDI_SUCCESS); 8999 } 9000 9001 /* 9002 * If an app (SunVTS or Xsun) has taken control, then block until it 9003 * gives it up or makes the requested power level change, unless 9004 * we have other instructions about blocking. Returns DDI_SUCCESS, 9005 * DDI_FAILURE or EAGAIN (owner released device from directpm). 9006 */ 9007 static int 9008 pm_busop_match_request(dev_info_t *dip, void *arg) 9009 { 9010 PMD_FUNC(pmf, "bp_match_request") 9011 pm_bp_child_pwrchg_t *bpc = (pm_bp_child_pwrchg_t *)arg; 9012 pm_sp_misc_t *pspm = (pm_sp_misc_t *)bpc->bpc_private; 9013 int comp = bpc->bpc_comp; 9014 int nlevel = bpc->bpc_nlevel; 9015 pm_canblock_t canblock = pspm->pspm_canblock; 9016 int direction = pspm->pspm_direction; 9017 int clevel; 9018 9019 ASSERT(PM_IAM_LOCKING_DIP(dip)); 9020 PM_LOCK_POWER(dip); 9021 clevel = PM_CURPOWER(dip, comp); 9022 PMD(PMD_SET, ("%s: %s@%s(%s#%d), cmp=%d, nlvl=%d, clvl=%d\n", 9023 pmf, PM_DEVICE(dip), comp, nlevel, clevel)) 9024 if (direction == PM_LEVEL_UPONLY) { 9025 if (clevel >= nlevel) { 9026 PM_UNLOCK_POWER(dip); 9027 PM_UNLOCK_DIP(dip); 9028 return (DDI_SUCCESS); 9029 } 9030 } else if (clevel == nlevel) { 9031 PM_UNLOCK_POWER(dip); 9032 PM_UNLOCK_DIP(dip); 9033 return (DDI_SUCCESS); 9034 } 9035 if (canblock == PM_CANBLOCK_FAIL) { 9036 PM_UNLOCK_POWER(dip); 9037 PM_UNLOCK_DIP(dip); 9038 return (DDI_FAILURE); 9039 } 9040 if (canblock == PM_CANBLOCK_BLOCK) { 9041 /* 9042 * To avoid a deadlock, we must not hold the 9043 * power lock when we pm_block. 9044 */ 9045 PM_UNLOCK_POWER(dip); 9046 PMD(PMD_SET, ("%s: blocking\n", pmf)) 9047 /* pm_block releases dip lock */ 9048 switch (pm_block(dip, comp, nlevel, clevel)) { 9049 case PMP_RELEASE: 9050 return (EAGAIN); 9051 case PMP_SUCCEED: 9052 return (DDI_SUCCESS); 9053 case PMP_FAIL: 9054 return (DDI_FAILURE); 9055 } 9056 } else { 9057 ASSERT(0); 9058 } 9059 _NOTE(NOTREACHED); 9060 return (DDI_FAILURE); /* keep gcc happy */ 9061 } 9062 9063 static int 9064 pm_all_to_normal_nexus(dev_info_t *dip, pm_canblock_t canblock) 9065 { 9066 PMD_FUNC(pmf, "all_to_normal_nexus") 9067 int *normal; 9068 int i, ncomps; 9069 size_t size; 9070 int changefailed = 0; 9071 int ret, result = DDI_SUCCESS; 9072 pm_bp_nexus_pwrup_t bpn; 9073 pm_sp_misc_t pspm; 9074 9075 ASSERT(PM_GET_PM_INFO(dip)); 9076 PMD(PMD_ALLNORM, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 9077 if (pm_get_norm_pwrs(dip, &normal, &size) != DDI_SUCCESS) { 9078 PMD(PMD_ALLNORM, ("%s: can't get norm pwrs\n", pmf)) 9079 return (DDI_FAILURE); 9080 } 9081 ncomps = PM_NUMCMPTS(dip); 9082 for (i = 0; i < ncomps; i++) { 9083 bpn.bpn_dip = dip; 9084 bpn.bpn_comp = i; 9085 bpn.bpn_level = normal[i]; 9086 pspm.pspm_canblock = canblock; 9087 pspm.pspm_scan = 0; 9088 bpn.bpn_private = &pspm; 9089 ret = pm_busop_bus_power(dip, NULL, BUS_POWER_NEXUS_PWRUP, 9090 (void *)&bpn, (void *)&result); 9091 if (ret != DDI_SUCCESS || result != DDI_SUCCESS) { 9092 PMD(PMD_FAIL | PMD_ALLNORM, ("%s: %s@%s(%s#%d)[%d] " 9093 "->%d failure result %d\n", pmf, PM_DEVICE(dip), 9094 i, normal[i], result)) 9095 changefailed++; 9096 } 9097 } 9098 kmem_free(normal, size); 9099 if (changefailed) { 9100 PMD(PMD_FAIL, ("%s: failed to set %d comps %s@%s(%s#%d) " 9101 "full power\n", pmf, changefailed, PM_DEVICE(dip))) 9102 return (DDI_FAILURE); 9103 } 9104 return (DDI_SUCCESS); 9105 } 9106 9107 int 9108 pm_noinvol_update(int subcmd, int volpmd, int wasvolpmd, char *path, 9109 dev_info_t *tdip) 9110 { 9111 PMD_FUNC(pmf, "noinvol_update") 9112 pm_bp_noinvol_t args; 9113 int ret; 9114 int result = DDI_SUCCESS; 9115 9116 args.bpni_path = path; 9117 args.bpni_dip = tdip; 9118 args.bpni_cmd = subcmd; 9119 args.bpni_wasvolpmd = wasvolpmd; 9120 args.bpni_volpmd = volpmd; 9121 PMD(PMD_NOINVOL, ("%s: update for path %s tdip %p subcmd %d " 9122 "volpmd %d wasvolpmd %d\n", pmf, 9123 path, (void *)tdip, subcmd, wasvolpmd, volpmd)) 9124 ret = pm_busop_bus_power(ddi_root_node(), NULL, BUS_POWER_NOINVOL, 9125 &args, &result); 9126 return (ret); 9127 } 9128 9129 void 9130 pm_noinvol_update_node(dev_info_t *dip, pm_bp_noinvol_t *req) 9131 { 9132 PMD_FUNC(pmf, "noinvol_update_node") 9133 9134 PMD(PMD_NOINVOL, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 9135 switch (req->bpni_cmd) { 9136 case PM_BP_NOINVOL_ATTACH: 9137 PMD(PMD_NOINVOL, ("%s: PM_PB_NOINVOL_ATTACH %s@%s(%s#%d) " 9138 "noinvol %d->%d\n", pmf, PM_DEVICE(dip), 9139 DEVI(dip)->devi_pm_noinvolpm, 9140 DEVI(dip)->devi_pm_noinvolpm - 1)) 9141 ASSERT(DEVI(dip)->devi_pm_noinvolpm); 9142 PM_LOCK_DIP(dip); 9143 DEVI(dip)->devi_pm_noinvolpm--; 9144 if (req->bpni_wasvolpmd) { 9145 PMD(PMD_NOINVOL, ("%s: PM_BP_NOINVOL_ATTACH " 9146 "%s@%s(%s#%d) volpmd %d->%d\n", pmf, 9147 PM_DEVICE(dip), DEVI(dip)->devi_pm_volpmd, 9148 DEVI(dip)->devi_pm_volpmd - 1)) 9149 if (DEVI(dip)->devi_pm_volpmd) 9150 DEVI(dip)->devi_pm_volpmd--; 9151 } 9152 PM_UNLOCK_DIP(dip); 9153 break; 9154 9155 case PM_BP_NOINVOL_DETACH: 9156 PMD(PMD_NOINVOL, ("%s: PM_BP_NOINVOL_DETACH %s@%s(%s#%d) " 9157 "noinvolpm %d->%d\n", pmf, PM_DEVICE(dip), 9158 DEVI(dip)->devi_pm_noinvolpm, 9159 DEVI(dip)->devi_pm_noinvolpm + 1)) 9160 PM_LOCK_DIP(dip); 9161 DEVI(dip)->devi_pm_noinvolpm++; 9162 if (req->bpni_wasvolpmd) { 9163 PMD(PMD_NOINVOL, ("%s: PM_BP_NOINVOL_DETACH " 9164 "%s@%s(%s#%d) volpmd %d->%d\n", pmf, 9165 PM_DEVICE(dip), DEVI(dip)->devi_pm_volpmd, 9166 DEVI(dip)->devi_pm_volpmd + 1)) 9167 DEVI(dip)->devi_pm_volpmd++; 9168 } 9169 PM_UNLOCK_DIP(dip); 9170 break; 9171 9172 case PM_BP_NOINVOL_REMDRV: 9173 PMD(PMD_NOINVOL, ("%s: PM_BP_NOINVOL_REMDRV %s@%s(%s#%d) " 9174 "noinvol %d->%d\n", pmf, PM_DEVICE(dip), 9175 DEVI(dip)->devi_pm_noinvolpm, 9176 DEVI(dip)->devi_pm_noinvolpm - 1)) 9177 ASSERT(DEVI(dip)->devi_pm_noinvolpm); 9178 PM_LOCK_DIP(dip); 9179 DEVI(dip)->devi_pm_noinvolpm--; 9180 if (req->bpni_wasvolpmd) { 9181 PMD(PMD_NOINVOL, 9182 ("%s: PM_BP_NOINVOL_REMDRV %s@%s(%s#%d) " 9183 "volpmd %d->%d\n", pmf, PM_DEVICE(dip), 9184 DEVI(dip)->devi_pm_volpmd, 9185 DEVI(dip)->devi_pm_volpmd - 1)) 9186 /* 9187 * A power up could come in between and 9188 * clear the volpmd, if that's the case, 9189 * volpmd would be clear. 9190 */ 9191 if (DEVI(dip)->devi_pm_volpmd) 9192 DEVI(dip)->devi_pm_volpmd--; 9193 } 9194 PM_UNLOCK_DIP(dip); 9195 break; 9196 9197 case PM_BP_NOINVOL_CFB: 9198 PMD(PMD_NOINVOL, 9199 ("%s: PM_BP_NOIVOL_CFB %s@%s(%s#%d) noinvol %d->%d\n", 9200 pmf, PM_DEVICE(dip), DEVI(dip)->devi_pm_noinvolpm, 9201 DEVI(dip)->devi_pm_noinvolpm + 1)) 9202 PM_LOCK_DIP(dip); 9203 DEVI(dip)->devi_pm_noinvolpm++; 9204 PM_UNLOCK_DIP(dip); 9205 break; 9206 9207 case PM_BP_NOINVOL_POWER: 9208 PMD(PMD_NOINVOL, 9209 ("%s: PM_BP_NOIVOL_PWR %s@%s(%s#%d) volpmd %d->%d\n", 9210 pmf, PM_DEVICE(dip), 9211 DEVI(dip)->devi_pm_volpmd, DEVI(dip)->devi_pm_volpmd - 9212 req->bpni_volpmd)) 9213 PM_LOCK_DIP(dip); 9214 DEVI(dip)->devi_pm_volpmd -= req->bpni_volpmd; 9215 PM_UNLOCK_DIP(dip); 9216 break; 9217 9218 default: 9219 break; 9220 } 9221 9222 } 9223 9224 #ifdef DEBUG 9225 static int 9226 pm_desc_pwrchk_walk(dev_info_t *dip, void *arg) 9227 { 9228 PMD_FUNC(pmf, "desc_pwrchk") 9229 pm_desc_pwrchk_t *pdpchk = (pm_desc_pwrchk_t *)arg; 9230 pm_info_t *info = PM_GET_PM_INFO(dip); 9231 int i; 9232 /* LINTED */ 9233 int curpwr, ce_level; 9234 9235 if (!info) 9236 return (DDI_WALK_CONTINUE); 9237 9238 PMD(PMD_SET, ("%s: %s@%s(%s#%d)\n", pmf, PM_DEVICE(dip))) 9239 for (i = 0; i < PM_NUMCMPTS(dip); i++) { 9240 /* LINTED */ 9241 if ((curpwr = PM_CURPOWER(dip, i)) == 0) 9242 continue; 9243 /* E_FUNC_SET_NOT_USED */ 9244 ce_level = (pdpchk->pdpc_par_involved == 0) ? CE_PANIC : 9245 CE_WARN; 9246 PMD(PMD_SET, ("%s: %s@%s(%s#%d) is powered off while desc " 9247 "%s@%s(%s#%d)[%d] is at %d\n", pmf, 9248 PM_DEVICE(pdpchk->pdpc_dip), PM_DEVICE(dip), i, curpwr)) 9249 cmn_err(ce_level, "!device %s@%s(%s#%d) is powered on, " 9250 "while its ancestor, %s@%s(%s#%d), is powering off!", 9251 PM_DEVICE(dip), PM_DEVICE(pdpchk->pdpc_dip)); 9252 } 9253 return (DDI_WALK_CONTINUE); 9254 } 9255 #endif 9256 9257 /* 9258 * Record the fact that one thread is borrowing the lock on a device node. 9259 * Use is restricted to the case where the lending thread will block until 9260 * the borrowing thread (always curthread) completes. 9261 */ 9262 void 9263 pm_borrow_lock(kthread_t *lender) 9264 { 9265 lock_loan_t *prev = &lock_loan_head; 9266 lock_loan_t *cur = (lock_loan_t *)kmem_zalloc(sizeof (*cur), KM_SLEEP); 9267 9268 cur->pmlk_borrower = curthread; 9269 cur->pmlk_lender = lender; 9270 mutex_enter(&pm_loan_lock); 9271 cur->pmlk_next = prev->pmlk_next; 9272 prev->pmlk_next = cur; 9273 mutex_exit(&pm_loan_lock); 9274 } 9275 9276 /* 9277 * Return the borrowed lock. A thread can borrow only one. 9278 */ 9279 void 9280 pm_return_lock(void) 9281 { 9282 lock_loan_t *cur; 9283 lock_loan_t *prev = &lock_loan_head; 9284 9285 mutex_enter(&pm_loan_lock); 9286 ASSERT(prev->pmlk_next != NULL); 9287 for (cur = prev->pmlk_next; cur; prev = cur, cur = cur->pmlk_next) 9288 if (cur->pmlk_borrower == curthread) 9289 break; 9290 9291 ASSERT(cur != NULL); 9292 prev->pmlk_next = cur->pmlk_next; 9293 mutex_exit(&pm_loan_lock); 9294 kmem_free(cur, sizeof (*cur)); 9295 } 9296 9297 #if defined(__x86) 9298 9299 #define CPR_RXR 0x1 9300 #define CPR_TXR 0x20 9301 #define CPR_DATAREG 0x3f8 9302 #define CPR_LSTAT 0x3fd 9303 #define CPR_INTRCTL 0x3f9 9304 9305 char 9306 pm_getchar(void) 9307 { 9308 while ((inb(CPR_LSTAT) & CPR_RXR) != CPR_RXR) 9309 drv_usecwait(10); 9310 9311 return (inb(CPR_DATAREG)); 9312 9313 } 9314 9315 void 9316 pm_putchar(char c) 9317 { 9318 while ((inb(CPR_LSTAT) & CPR_TXR) == 0) 9319 drv_usecwait(10); 9320 9321 outb(CPR_DATAREG, c); 9322 } 9323 9324 void 9325 pm_printf(char *s) 9326 { 9327 while (*s) { 9328 pm_putchar(*s++); 9329 } 9330 } 9331 9332 #endif 9333 9334 int 9335 pm_ppm_searchlist(pm_searchargs_t *sp) 9336 { 9337 power_req_t power_req; 9338 int result = 0; 9339 /* LINTED */ 9340 int ret; 9341 9342 power_req.request_type = PMR_PPM_SEARCH_LIST; 9343 power_req.req.ppm_search_list_req.searchlist = sp; 9344 ASSERT(DEVI(ddi_root_node())->devi_pm_ppm); 9345 ret = pm_ctlops((dev_info_t *)DEVI(ddi_root_node())->devi_pm_ppm, 9346 ddi_root_node(), DDI_CTLOPS_POWER, &power_req, &result); 9347 PMD(PMD_SX, ("pm_ppm_searchlist returns %d, result %d\n", 9348 ret, result)) 9349 return (result); 9350 } 9351