xref: /titanic_41/usr/src/uts/common/io/pm.c (revision 29949e866e40b95795203f3ee46f44a197c946e4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * pm	This driver now only handles the ioctl interface.  The scanning
31  *	and policy stuff now lives in common/os/sunpm.c.
32  *	Not DDI compliant
33  */
34 
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/modctl.h>
38 #include <sys/conf.h>		/* driver flags and functions */
39 #include <sys/open.h>		/* OTYP_CHR definition */
40 #include <sys/stat.h>		/* S_IFCHR definition */
41 #include <sys/pathname.h>	/* name -> dev_info xlation */
42 #include <sys/kmem.h>		/* memory alloc stuff */
43 #include <sys/debug.h>
44 #include <sys/pm.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/epm.h>
48 #include <sys/vfs.h>
49 #include <sys/mode.h>
50 #include <sys/mkdev.h>
51 #include <sys/promif.h>
52 #include <sys/consdev.h>
53 #include <sys/ddi_impldefs.h>
54 #include <sys/poll.h>
55 #include <sys/note.h>
56 #include <sys/taskq.h>
57 #include <sys/policy.h>
58 
59 /*
60  * Minor number is instance<<8 + clone minor from range 1-255; (0 reserved
61  * for "original"
62  */
63 #define	PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE - 1))
64 
65 #define	PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
66 #define	PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
67 #define	PM_MAJOR(dip) ddi_driver_major(dip)
68 #define	PM_RELE(dip) ddi_release_devi(dip)
69 
70 #define	PM_IDLEDOWN_TIME	10
71 
72 extern kmutex_t	pm_scan_lock;	/* protects autopm_enable, pm_scans_disabled */
73 extern kmutex_t	pm_clone_lock;	/* protects pm_clones array */
74 extern int	autopm_enabled;
75 extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
76 extern uint_t	pm_poll_cnt[PM_MAX_CLONE];
77 
78 /*
79  * The soft state of the power manager.  Since there will only
80  * one of these, just reference it through a static pointer.
81  */
82 static struct pmstate {
83 	dev_info_t	*pm_dip;		/* ptr to our dev_info node */
84 	int		pm_instance;		/* for ddi_get_instance() */
85 	timeout_id_t	pm_idledown_id;		/* pm idledown timeout id */
86 	uchar_t		pm_clones[PM_MAX_CLONE]; /* uniqueify multiple opens */
87 	struct cred	*pm_cred[PM_MAX_CLONE];	/* cred for each unique open */
88 } pm_state = { NULL, -1, (timeout_id_t)0 };
89 typedef struct pmstate *pm_state_t;
90 static pm_state_t pmstp = &pm_state;
91 
92 static int	pm_open(dev_t *, int, int, cred_t *);
93 static int	pm_close(dev_t, int, int, cred_t *);
94 static int	pm_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
95 static int	pm_chpoll(dev_t, short, int, short *, struct pollhead **);
96 
97 static struct cb_ops pm_cb_ops = {
98 	pm_open,	/* open */
99 	pm_close,	/* close */
100 	nodev,		/* strategy */
101 	nodev,		/* print */
102 	nodev,		/* dump */
103 	nodev,		/* read */
104 	nodev,		/* write */
105 	pm_ioctl,	/* ioctl */
106 	nodev,		/* devmap */
107 	nodev,		/* mmap */
108 	nodev,		/* segmap */
109 	pm_chpoll,	/* poll */
110 	ddi_prop_op,	/* prop_op */
111 	NULL,		/* streamtab */
112 	D_NEW | D_MP	/* driver compatibility flag */
113 };
114 
115 static int pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
116     void **result);
117 static int pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
118 static int pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
119 
120 static struct dev_ops pm_ops = {
121 	DEVO_REV,		/* devo_rev */
122 	0,			/* refcnt */
123 	pm_getinfo,		/* info */
124 	nulldev,		/* identify */
125 	nulldev,		/* probe */
126 	pm_attach,		/* attach */
127 	pm_detach,		/* detach */
128 	nodev,			/* reset */
129 	&pm_cb_ops,		/* driver operations */
130 	NULL,			/* bus operations */
131 	NULL			/* power */
132 };
133 
134 static struct modldrv modldrv = {
135 	&mod_driverops,
136 	"power management driver v%I%",
137 	&pm_ops
138 };
139 
140 static struct modlinkage modlinkage = {
141 	MODREV_1, &modldrv, 0
142 };
143 
144 /* Local functions */
145 #ifdef DEBUG
146 static int	print_info(dev_info_t *, void *);
147 
148 #endif
149 
150 int
151 _init(void)
152 {
153 	return (mod_install(&modlinkage));
154 }
155 
156 int
157 _fini(void)
158 {
159 	return (mod_remove(&modlinkage));
160 }
161 
162 int
163 _info(struct modinfo *modinfop)
164 {
165 	return (mod_info(&modlinkage, modinfop));
166 }
167 
168 static int
169 pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
170 {
171 	int		i;
172 
173 	switch (cmd) {
174 
175 	case DDI_ATTACH:
176 		if (pmstp->pm_instance != -1)	/* Only allow one instance */
177 			return (DDI_FAILURE);
178 		pmstp->pm_instance = ddi_get_instance(dip);
179 		if (ddi_create_minor_node(dip, "pm", S_IFCHR,
180 		    (pmstp->pm_instance << 8) + 0,
181 			DDI_PSEUDO, 0) != DDI_SUCCESS) {
182 			return (DDI_FAILURE);
183 		}
184 		pmstp->pm_dip = dip;	/* pm_init and getinfo depend on it */
185 
186 		for (i = 0; i < PM_MAX_CLONE; i++)
187 			cv_init(&pm_clones_cv[i], NULL, CV_DEFAULT, NULL);
188 
189 		ddi_report_dev(dip);
190 		return (DDI_SUCCESS);
191 
192 	default:
193 		return (DDI_FAILURE);
194 	}
195 }
196 
197 /* ARGSUSED */
198 static int
199 pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
200 {
201 	int i;
202 
203 	switch (cmd) {
204 	case DDI_DETACH:
205 		/*
206 		 * Don't detach while idledown timeout is pending.  Note that
207 		 * we already know we're not in pm_ioctl() due to framework
208 		 * synchronization, so this is a sufficient test
209 		 */
210 		if (pmstp->pm_idledown_id)
211 			return (DDI_FAILURE);
212 
213 		for (i = 0; i < PM_MAX_CLONE; i++)
214 			cv_destroy(&pm_clones_cv[i]);
215 
216 		ddi_remove_minor_node(dip, NULL);
217 		pmstp->pm_instance = -1;
218 		return (DDI_SUCCESS);
219 
220 	default:
221 		return (DDI_FAILURE);
222 	}
223 }
224 
225 static int
226 pm_close_direct_pm_device(dev_info_t *dip, void *arg)
227 {
228 	int clone;
229 	char *pathbuf;
230 	pm_info_t *info = PM_GET_PM_INFO(dip);
231 
232 	clone = *((int *)arg);
233 
234 	if (!info)
235 		return (DDI_WALK_CONTINUE);
236 
237 	pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
238 	PM_LOCK_DIP(dip);
239 	if (clone == info->pmi_clone) {
240 		PMD(PMD_CLOSE, ("pm_close: found %s@%s(%s#%d)\n",
241 		    PM_DEVICE(dip)))
242 		ASSERT(PM_ISDIRECT(dip));
243 		info->pmi_dev_pm_state &= ~PM_DIRECT;
244 		PM_UNLOCK_DIP(dip);
245 		pm_proceed(dip, PMP_RELEASE, -1, -1);
246 		/* Bring ourselves up if there is a keeper that is up */
247 		(void) ddi_pathname(dip, pathbuf);
248 		pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF, NULL,
249 		    pathbuf, PM_DEP_NOWAIT, NULL, 0);
250 		PM_LOCK_DIP(dip);
251 		info->pmi_clone = 0;
252 		PM_UNLOCK_DIP(dip);
253 	} else {
254 		PM_UNLOCK_DIP(dip);
255 	}
256 	kmem_free(pathbuf, MAXPATHLEN);
257 
258 	/* restart autopm on device released from direct pm */
259 	pm_rescan(dip);
260 
261 	return (DDI_WALK_CONTINUE);
262 }
263 
264 #define	PM_REQ		1
265 #define	NOSTRUCT	2
266 #define	DIP		3
267 #define	NODIP		4
268 #define	NODEP		5
269 #define	DEP		6
270 #define	PM_PSC		7
271 
272 #define	CHECKPERMS	0x001
273 #define	SU		0x002
274 #define	SG		0x004
275 #define	OWNER		0x008
276 
277 #define	INWHO		0x001
278 #define	INDATAINT	0x002
279 #define	INDATASTRING	0x004
280 #define	INDEP		0x008
281 #define	INDATAOUT	0x010
282 #define	INDATA	(INDATAOUT | INDATAINT | INDATASTRING | INDEP)
283 
284 struct pm_cmd_info {
285 	int cmd;		/* command code */
286 	char *name;		/* printable string */
287 	int supported;		/* true if still supported */
288 	int str_type;		/* PM_REQ or NOSTRUCT */
289 	int inargs;		/* INWHO, INDATAINT, INDATASTRING, INDEP, */
290 				/* INDATAOUT */
291 	int diptype;		/* DIP or NODIP */
292 	int deptype;		/* DEP or NODEP */
293 	int permission;		/* SU, GU, or CHECKPERMS */
294 };
295 
296 #ifdef DEBUG
297 char *pm_cmd_string;
298 int pm_cmd;
299 #endif
300 
301 /*
302  * Returns true if permission granted by credentials
303  */
304 static int
305 pm_perms(int perm, cred_t *cr)
306 {
307 	if (perm == 0)			/* no restrictions */
308 		return (1);
309 	if (perm == CHECKPERMS)		/* ok for now (is checked later) */
310 		return (1);
311 	if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
312 		return (1);
313 	if ((perm & SG) && (crgetgid(cr) == 0))	/* group 0 is ok */
314 		return (1);
315 	return (0);
316 }
317 
318 #ifdef DEBUG
319 static int
320 print_info(dev_info_t *dip, void *arg)
321 {
322 	_NOTE(ARGUNUSED(arg))
323 	pm_info_t	*info;
324 	int		i, j;
325 	struct pm_component *cp;
326 	extern int pm_cur_power(pm_component_t *cp);
327 
328 	info = PM_GET_PM_INFO(dip);
329 	if (!info)
330 		return (DDI_WALK_CONTINUE);
331 	cmn_err(CE_CONT, "pm_info for %s\n", ddi_node_name(dip));
332 	for (i = 0; i < PM_NUMCMPTS(dip); i++) {
333 		cp = PM_CP(dip, i);
334 		cmn_err(CE_CONT, "\tThresholds[%d] =",  i);
335 		for (j = 0; j < cp->pmc_comp.pmc_numlevels; j++)
336 			cmn_err(CE_CONT, " %d", cp->pmc_comp.pmc_thresh[i]);
337 		cmn_err(CE_CONT, "\n");
338 		cmn_err(CE_CONT, "\tCurrent power[%d] = %d\n", i,
339 		    pm_cur_power(cp));
340 	}
341 	if (PM_ISDIRECT(dip))
342 		cmn_err(CE_CONT, "\tDirect power management\n");
343 	return (DDI_WALK_CONTINUE);
344 }
345 #endif
346 
347 /*
348  * command, name, supported, str_type, inargs, diptype, deptype, permission
349  */
350 static struct pm_cmd_info pmci[] = {
351 	{PM_SCHEDULE, "PM_SCHEDULE", 0},
352 	{PM_GET_IDLE_TIME, "PM_GET_IDLE_TIME", 0},
353 	{PM_GET_NUM_CMPTS, "PM_GET_NUM_CMPTS", 0},
354 	{PM_GET_THRESHOLD, "PM_GET_THRESHOLD", 0},
355 	{PM_SET_THRESHOLD, "PM_SET_THRESHOLD", 0},
356 	{PM_GET_NORM_PWR, "PM_GET_NORM_PWR", 0},
357 	{PM_SET_CUR_PWR, "PM_SET_CUR_PWR", 0},
358 	{PM_GET_CUR_PWR, "PM_GET_CUR_PWR", 0},
359 	{PM_GET_NUM_DEPS, "PM_GET_NUM_DEPS", 0},
360 	{PM_GET_DEP, "PM_GET_DEP", 0},
361 	{PM_ADD_DEP, "PM_ADD_DEP", 0},
362 	{PM_REM_DEP, "PM_REM_DEP", 0},
363 	{PM_REM_DEVICE, "PM_REM_DEVICE", 0},
364 	{PM_REM_DEVICES, "PM_REM_DEVICES", 0},
365 	{PM_REPARSE_PM_PROPS, "PM_REPARSE_PM_PROPS", 1, PM_REQ, INWHO, DIP,
366 	    NODEP},
367 	{PM_DISABLE_AUTOPM, "PM_DISABLE_AUTOPM", 0},
368 	{PM_REENABLE_AUTOPM, "PM_REENABLE_AUTOPM", 0},
369 	{PM_SET_NORM_PWR, "PM_SET_NORM_PWR", 0 },
370 	{PM_SET_DEVICE_THRESHOLD, "PM_SET_DEVICE_THRESHOLD", 1, PM_REQ,
371 	    INWHO, NODIP, NODEP, SU},
372 	{PM_GET_SYSTEM_THRESHOLD, "PM_GET_SYSTEM_THRESHOLD", 1, NOSTRUCT},
373 	{PM_GET_DEFAULT_SYSTEM_THRESHOLD, "PM_GET_DEFAULT_SYSTEM_THRESHOLD",
374 	    1, NOSTRUCT},
375 	{PM_SET_SYSTEM_THRESHOLD, "PM_SET_SYSTEM_THRESHOLD", 1, NOSTRUCT,
376 	    0, 0, 0, SU},
377 	{PM_START_PM, "PM_START_PM", 1, NOSTRUCT, 0, 0, 0, SU},
378 	{PM_STOP_PM, "PM_STOP_PM", 1, NOSTRUCT, 0, 0, 0, SU},
379 	{PM_RESET_PM, "PM_RESET_PM", 1, NOSTRUCT, 0, 0, 0, SU},
380 	{PM_GET_STATS, "PM_GET_STATS", 1, PM_REQ, INWHO | INDATAOUT,
381 	    DIP, NODEP},
382 	{PM_GET_DEVICE_THRESHOLD, "PM_GET_DEVICE_THRESHOLD", 1, PM_REQ, INWHO,
383 	    DIP, NODEP},
384 	{PM_GET_POWER_NAME, "PM_GET_POWER_NAME", 1, PM_REQ, INWHO | INDATAOUT,
385 	    DIP, NODEP},
386 	{PM_GET_POWER_LEVELS, "PM_GET_POWER_LEVELS", 1, PM_REQ,
387 	    INWHO | INDATAOUT, DIP, NODEP},
388 	{PM_GET_NUM_COMPONENTS, "PM_GET_NUM_COMPONENTS", 1, PM_REQ, INWHO,
389 	    DIP, NODEP},
390 	{PM_GET_COMPONENT_NAME, "PM_GET_COMPONENT_NAME", 1, PM_REQ,
391 	    INWHO | INDATAOUT, DIP, NODEP},
392 	{PM_GET_NUM_POWER_LEVELS, "PM_GET_NUM_POWER_LEVELS", 1, PM_REQ, INWHO,
393 	    DIP, NODEP},
394 	{PM_GET_STATE_CHANGE, "PM_GET_STATE_CHANGE", 1, PM_PSC},
395 	{PM_GET_STATE_CHANGE_WAIT, "PM_GET_STATE_CHANGE_WAIT", 1, PM_PSC},
396 	{PM_DIRECT_PM, "PM_DIRECT_PM", 1, PM_REQ, INWHO, DIP, NODEP,
397 	    (SU | SG)},
398 	{PM_RELEASE_DIRECT_PM, "PM_RELEASE_DIRECT_PM", 1, PM_REQ, INWHO,
399 	    DIP, NODEP},
400 	{PM_DIRECT_NOTIFY, "PM_DIRECT_NOTIFY", 1, PM_PSC},
401 	{PM_DIRECT_NOTIFY_WAIT, "PM_DIRECT_NOTIFY_WAIT", 1, PM_PSC},
402 	{PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
403 	    INWHO, DIP, NODEP, SU},
404 	{PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
405 	{PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
406 	    DIP, NODEP},
407 	{PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
408 	    INWHO | INDATAINT, NODIP, NODEP, SU},
409 	{PM_GET_COMPONENT_THRESHOLDS, "PM_GET_COMPONENT_THRESHOLDS", 1, PM_REQ,
410 	    INWHO | INDATAOUT, DIP, NODEP},
411 	{PM_IDLE_DOWN, "PM_IDLE_DOWN", 1, NOSTRUCT, 0, 0, 0, SU},
412 	{PM_GET_DEVICE_THRESHOLD_BASIS, "PM_GET_DEVICE_THRESHOLD_BASIS", 1,
413 	    PM_REQ, INWHO, DIP, NODEP},
414 	{PM_SET_CURRENT_POWER, "PM_SET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
415 	    NODEP},
416 	{PM_GET_CURRENT_POWER, "PM_GET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
417 	    NODEP},
418 	{PM_GET_FULL_POWER, "PM_GET_FULL_POWER", 1, PM_REQ, INWHO, DIP,
419 	    NODEP},
420 	{PM_ADD_DEPENDENT, "PM_ADD_DEPENDENT", 1, PM_REQ, INWHO | INDATASTRING,
421 	    DIP, DEP, SU},
422 	{PM_GET_TIME_IDLE, "PM_GET_TIME_IDLE", 1, PM_REQ, INWHO, DIP, NODEP},
423 	{PM_ADD_DEPENDENT_PROPERTY, "PM_ADD_DEPENDENT_PROPERTY", 1, PM_REQ,
424 	    INWHO | INDATASTRING, NODIP, DEP, SU},
425 	{0, NULL}
426 };
427 
428 struct pm_cmd_info *
429 pc_info(int cmd)
430 {
431 	struct pm_cmd_info *pcip;
432 
433 	for (pcip = pmci; pcip->name; pcip++) {
434 		if (cmd == pcip->cmd)
435 			return (pcip);
436 	}
437 	return (NULL);
438 }
439 
440 static char *
441 pm_decode_cmd(int cmd)
442 {
443 	static char invbuf[64];
444 	struct pm_cmd_info *pcip = pc_info(cmd);
445 	if (pcip != NULL)
446 		return (pcip->name);
447 	(void) sprintf(invbuf, "ioctl: invalid command %d\n", cmd);
448 	return (invbuf);
449 }
450 
451 /*
452  * Allocate scan resource, create taskq, then dispatch scan,
453  * called only if autopm is enabled.
454  */
455 int
456 pm_start_pm_walk(dev_info_t *dip, void *arg)
457 {
458 	char *cmdstr = pm_decode_cmd(*((int *)arg));
459 
460 	if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip))
461 		return (DDI_WALK_CONTINUE);
462 
463 	/*
464 	 * Construct per dip scan taskq
465 	 */
466 	mutex_enter(&pm_scan_lock);
467 	if (autopm_enabled)
468 		pm_scan_init(dip);
469 	mutex_exit(&pm_scan_lock);
470 
471 	/*
472 	 * Start doing pm on device: ensure pm_scan data structure initiated,
473 	 * no need to gurantee a successful scan run.
474 	 */
475 	PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: scan %s@%s(%s#%d)\n", cmdstr,
476 	    PM_DEVICE(dip)))
477 	pm_rescan(dip);
478 
479 	return (DDI_WALK_CONTINUE);
480 }
481 
482 /*
483  * Bring devices to full power level, then stop scan
484  */
485 int
486 pm_stop_pm_walk(dev_info_t *dip, void *arg)
487 {
488 	pm_info_t *info = PM_GET_PM_INFO(dip);
489 	char *cmdstr = pm_decode_cmd(*((int *)arg));
490 
491 	if (!info)
492 		return (DDI_WALK_CONTINUE);
493 	/*
494 	 * Stop the current scan, and then bring it back to normal power.
495 	 */
496 	if (!PM_ISBC(dip)) {
497 		PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: stop scan for "
498 		    "%s@%s(%s#%d)\n", cmdstr, PM_DEVICE(dip)))
499 		pm_scan_stop(dip);
500 	}
501 
502 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip) &&
503 	    !pm_all_at_normal(dip)) {
504 		PM_LOCK_DIP(dip);
505 		if (info->pmi_dev_pm_state & PM_DETACHING) {
506 			PMD(PMD_ALLNORM, ("ioctl: %s: deferring "
507 			    "all_to_normal because %s@%s(%s#%d) is detaching\n",
508 			    cmdstr, PM_DEVICE(dip)))
509 			info->pmi_dev_pm_state |= PM_ALLNORM_DEFERRED;
510 			PM_UNLOCK_DIP(dip);
511 			return (DDI_WALK_CONTINUE);
512 		}
513 		PM_UNLOCK_DIP(dip);
514 		if (pm_all_to_normal(dip, PM_CANBLOCK_FAIL) != DDI_SUCCESS) {
515 			PMD(PMD_ERROR, ("ioctl: %s: could not bring %s@%s"
516 			    "(%s#%d) to normal\n", cmdstr, PM_DEVICE(dip)))
517 		}
518 	}
519 
520 	return (DDI_WALK_CONTINUE);
521 }
522 
523 static int
524 pm_start_idledown(dev_info_t *dip, void *arg)
525 {
526 	int		flag = (int)(intptr_t)arg;
527 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
528 
529 	if (!scanp)
530 		return (DDI_WALK_CONTINUE);
531 
532 	PM_LOCK_DIP(dip);
533 	scanp->ps_idle_down |= flag;
534 	PM_UNLOCK_DIP(dip);
535 	pm_rescan(dip);
536 
537 	return (DDI_WALK_CONTINUE);
538 }
539 
540 /*ARGSUSED*/
541 static int
542 pm_end_idledown(dev_info_t *dip, void *ignore)
543 {
544 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
545 
546 	if (!scanp)
547 		return (DDI_WALK_CONTINUE);
548 
549 	PM_LOCK_DIP(dip);
550 	/*
551 	 * The PMID_TIMERS bits are place holder till idledown expires.
552 	 * The bits are also the base for regenerating PMID_SCANS bits.
553 	 * While it's up to scan thread to clear up the PMID_SCANS bits
554 	 * after each scan run, PMID_TIMERS ensure aggressive scan down
555 	 * performance throughout the idledown period.
556 	 */
557 	scanp->ps_idle_down &= ~PMID_TIMERS;
558 	PM_UNLOCK_DIP(dip);
559 
560 	return (DDI_WALK_CONTINUE);
561 }
562 
563 /*ARGSUSED*/
564 static void
565 pm_end_idledown_walk(void *ignore)
566 {
567 	PMD(PMD_IDLEDOWN, ("ioctl: end_idledown: idledown_id(%lx) timer is "
568 	    "off\n", (ulong_t)pmstp->pm_idledown_id));
569 
570 	mutex_enter(&pm_scan_lock);
571 	pmstp->pm_idledown_id = 0;
572 	mutex_exit(&pm_scan_lock);
573 
574 	ddi_walk_devs(ddi_root_node(), pm_end_idledown, NULL);
575 }
576 
577 /*
578  * pm_timeout_idledown - keep idledown effect for 10 seconds.
579  *
580  * Return 0 if another competing caller scheduled idledown timeout,
581  * otherwise, return idledown timeout_id.
582  */
583 static timeout_id_t
584 pm_timeout_idledown(void)
585 {
586 	timeout_id_t	to_id;
587 
588 	/*
589 	 * Keep idle-down in effect for either 10 seconds
590 	 * or length of a scan interval, which ever is greater.
591 	 */
592 	mutex_enter(&pm_scan_lock);
593 	if (pmstp->pm_idledown_id != 0) {
594 		to_id = pmstp->pm_idledown_id;
595 		pmstp->pm_idledown_id = 0;
596 		mutex_exit(&pm_scan_lock);
597 		(void) untimeout(to_id);
598 		mutex_enter(&pm_scan_lock);
599 		if (pmstp->pm_idledown_id != 0) {
600 			PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: "
601 			    "another caller got it, idledown_id(%lx)!\n",
602 			    (ulong_t)pmstp->pm_idledown_id))
603 			mutex_exit(&pm_scan_lock);
604 			return (0);
605 		}
606 	}
607 	pmstp->pm_idledown_id = timeout(pm_end_idledown_walk, NULL,
608 	    PM_IDLEDOWN_TIME * hz);
609 	PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: idledown_id(%lx)\n",
610 	    (ulong_t)pmstp->pm_idledown_id))
611 	mutex_exit(&pm_scan_lock);
612 
613 	return (pmstp->pm_idledown_id);
614 }
615 
616 static int
617 pm_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
618 	struct pollhead **phpp)
619 {
620 	extern struct pollhead pm_pollhead;	/* common/os/sunpm.c */
621 	int	clone;
622 
623 	clone = PM_MINOR_TO_CLONE(getminor(dev));
624 	PMD(PMD_IOCTL, ("ioctl: pm_chpoll: clone %d\n", clone))
625 	if ((events & (POLLIN | POLLRDNORM)) && pm_poll_cnt[clone]) {
626 		*reventsp |= (POLLIN | POLLRDNORM);
627 		PMD(PMD_IOCTL, ("ioctl: pm_chpoll: reventsp set\n"))
628 	} else {
629 		*reventsp = 0;
630 		if (!anyyet) {
631 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: not anyyet\n"))
632 			*phpp = &pm_pollhead;
633 		}
634 #ifdef DEBUG
635 		else {
636 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: anyyet\n"))
637 		}
638 #endif
639 	}
640 	return (0);
641 }
642 
643 /*
644  * called by pm_dicard_entries to free up the memory. It also decrements
645  * pm_poll_cnt, if direct is non zero.
646  */
647 static void
648 pm_free_entries(psce_t *pscep, int clone, int direct)
649 {
650 	pm_state_change_t	*p;
651 
652 	if (pscep) {
653 		p = pscep->psce_out;
654 		while (p->size) {
655 			if (direct) {
656 				PMD(PMD_IOCTL, ("ioctl: discard: "
657 				    "pm_poll_cnt[%d] is %d before "
658 				    "ASSERT\n", clone,
659 				    pm_poll_cnt[clone]))
660 				ASSERT(pm_poll_cnt[clone]);
661 				pm_poll_cnt[clone]--;
662 			}
663 			kmem_free(p->physpath, p->size);
664 			p->size = 0;
665 			if (p == pscep->psce_last)
666 				p = pscep->psce_first;
667 			else
668 				p++;
669 		}
670 		pscep->psce_out = pscep->psce_first;
671 		pscep->psce_in = pscep->psce_first;
672 		mutex_exit(&pscep->psce_lock);
673 	}
674 }
675 
676 /*
677  * Discard entries for this clone. Calls pm_free_entries to free up memory.
678  */
679 static void
680 pm_discard_entries(int clone)
681 {
682 	psce_t	*pscep;
683 	psce_t			*pm_psc_clone_to_direct(int);
684 	psce_t			*pm_psc_clone_to_interest(int);
685 	int			direct = 0;
686 
687 	mutex_enter(&pm_clone_lock);
688 	if ((pscep = pm_psc_clone_to_direct(clone)) != NULL)
689 		direct = 1;
690 	pm_free_entries(pscep, clone, direct);
691 	pscep = pm_psc_clone_to_interest(clone);
692 	pm_free_entries(pscep, clone, 0);
693 	mutex_exit(&pm_clone_lock);
694 }
695 
696 int
697 pm_set_sys_threshold(dev_info_t *dip, void *arg)
698 {
699 	int pm_system_idle_threshold = *((int *)arg);
700 	pm_info_t *info = PM_GET_PM_INFO(dip);
701 	int	processed = 0;
702 
703 	if (!info)
704 		return (DDI_WALK_CONTINUE);
705 
706 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip)) {
707 		switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
708 		case PMC_DEF_THRESH:
709 			PMD(PMD_IOCTL, ("ioctl: set_sys_threshold: set "
710 			    "%s@%s(%s#%d) default thresh to 0t%d\n",
711 			    PM_DEVICE(dip), pm_system_idle_threshold))
712 			pm_set_device_threshold(dip, pm_system_idle_threshold,
713 			    PMC_DEF_THRESH);
714 			processed++;
715 			break;
716 		default:
717 			break;
718 		}
719 	}
720 
721 	if (processed && autopm_enabled)
722 		pm_rescan(dip);
723 
724 	return (DDI_WALK_CONTINUE);
725 }
726 
727 /*ARGSUSED*/
728 static int
729 pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
730 {
731 	dev_t	dev;
732 	int	instance;
733 
734 	switch (infocmd) {
735 	case DDI_INFO_DEVT2DEVINFO:
736 		if (pmstp->pm_instance == -1)
737 			return (DDI_FAILURE);
738 		*result = pmstp->pm_dip;
739 		return (DDI_SUCCESS);
740 
741 	case DDI_INFO_DEVT2INSTANCE:
742 		dev = (dev_t)arg;
743 		instance = getminor(dev) >> 8;
744 		*result = (void *)(uintptr_t)instance;
745 		return (DDI_SUCCESS);
746 
747 	default:
748 		return (DDI_FAILURE);
749 	}
750 }
751 
752 
753 /*ARGSUSED1*/
754 static int
755 pm_open(dev_t *devp, int flag, int otyp, cred_t *cr)
756 {
757 	int		clone;
758 
759 	if (otyp != OTYP_CHR)
760 		return (EINVAL);
761 
762 	mutex_enter(&pm_clone_lock);
763 	for (clone = 1; clone < PM_MAX_CLONE; clone++)
764 		if (!pmstp->pm_clones[clone])
765 			break;
766 
767 	if (clone == PM_MAX_CLONE) {
768 		mutex_exit(&pm_clone_lock);
769 		return (ENXIO);
770 	}
771 	pmstp->pm_cred[clone] = cr;
772 	crhold(cr);
773 
774 	*devp = makedevice(getmajor(*devp), (pmstp->pm_instance << 8) + clone);
775 	pmstp->pm_clones[clone] = 1;
776 	mutex_exit(&pm_clone_lock);
777 
778 	return (0);
779 }
780 
781 /*ARGSUSED1*/
782 static int
783 pm_close(dev_t dev, int flag, int otyp, cred_t *cr)
784 {
785 	int clone;
786 
787 	if (otyp != OTYP_CHR)
788 		return (EINVAL);
789 
790 	clone = PM_MINOR_TO_CLONE(getminor(dev));
791 	PMD(PMD_CLOSE, ("pm_close: minor %x, clone %x\n", getminor(dev),
792 	    clone))
793 
794 	/*
795 	 * Walk the entire device tree to find the corresponding
796 	 * device and operate on it.
797 	 */
798 	ddi_walk_devs(ddi_root_node(), pm_close_direct_pm_device,
799 	    (void *) &clone);
800 
801 	crfree(pmstp->pm_cred[clone]);
802 	pmstp->pm_cred[clone] = 0;
803 	pmstp->pm_clones[clone] = 0;
804 	pm_discard_entries(clone);
805 	ASSERT(pm_poll_cnt[clone] == 0);
806 	pm_deregister_watcher(clone, NULL);
807 	return (0);
808 }
809 
810 /*ARGSUSED*/
811 static int
812 pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
813 {
814 	struct pm_cmd_info *pc_info(int);
815 	struct pm_cmd_info *pcip = pc_info(cmd);
816 	pm_req_t	req;
817 	dev_info_t	*dip = NULL;
818 	pm_info_t	*info = NULL;
819 	int		clone;
820 	char		*cmdstr = pm_decode_cmd(cmd);
821 	/*
822 	 * To keep devinfo nodes from going away while we're holding a
823 	 * pointer to their dip, pm_name_to_dip() optionally holds
824 	 * the devinfo node.  If we've done that, we set dipheld
825 	 * so we know at the end of the ioctl processing to release the
826 	 * node again.
827 	 */
828 	int		dipheld = 0;
829 	int		icount = 0;
830 	int		i;
831 	int		comps;
832 	size_t		lencopied;
833 	int		ret = ENOTTY;
834 	int		curpower;
835 	char		who[MAXNAMELEN];
836 	size_t		wholen;			/* copyinstr length */
837 	size_t		deplen = MAXNAMELEN;
838 	char		*dep, i_dep_buf[MAXNAMELEN];
839 	char		*pathbuf;
840 	struct pm_component *cp;
841 #ifdef	_MULTI_DATAMODEL
842 	pm_state_change32_t		*pscp32;
843 	pm_state_change32_t		psc32;
844 	size_t				copysize32;
845 #endif
846 	pm_state_change_t		*pscp;
847 	pm_state_change_t		psc;
848 	size_t		copysize;
849 	extern int	pm_default_idle_threshold;
850 	extern int	pm_system_idle_threshold;
851 	extern void	pm_record_thresh(pm_thresh_rec_t *);
852 	psce_t		*pm_psc_clone_to_direct(int);
853 	psce_t		*pm_psc_clone_to_interest(int);
854 	extern	void	pm_register_watcher(int, dev_info_t *);
855 	extern	int	pm_get_current_power(dev_info_t *, int, int *);
856 	extern	int	pm_interest_registered(int);
857 	extern	void	pm_all_to_default_thresholds(void);
858 	extern	int	pm_current_threshold(dev_info_t *, int, int *);
859 	extern void	pm_deregister_watcher(int, dev_info_t *);
860 	extern void	pm_unrecord_threshold(char *);
861 
862 	PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
863 
864 #ifdef DEBUG
865 	if (cmd == 666) {
866 		ddi_walk_devs(ddi_root_node(), print_info, NULL);
867 		return (0);
868 	}
869 	ret = 0x0badcafe;			/* sanity checking */
870 	pm_cmd = cmd;				/* for ASSERT debugging */
871 	pm_cmd_string = cmdstr;	/* for ASSERT debugging */
872 #endif
873 
874 
875 	if (pcip == NULL) {
876 		PMD(PMD_ERROR, ("ioctl: unknown command %d\n", cmd))
877 		return (ENOTTY);
878 	}
879 	if (pcip == NULL || pcip->supported == 0) {
880 		PMD(PMD_ERROR, ("ioctl: command %s no longer supported\n",
881 		    pcip->name))
882 		return (ENOTTY);
883 	}
884 
885 	wholen = 0;
886 	dep = i_dep_buf;
887 	i_dep_buf[0] = 0;
888 	clone = PM_MINOR_TO_CLONE(getminor(dev));
889 	if (!pm_perms(pcip->permission, pmstp->pm_cred[clone])) {
890 		ret = EPERM;
891 		return (ret);
892 	}
893 	switch (pcip->str_type) {
894 	case PM_REQ:
895 #ifdef	_MULTI_DATAMODEL
896 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
897 			pm_req32_t	req32;
898 
899 			if (ddi_copyin((caddr_t)arg, &req32,
900 			    sizeof (req32), mode) != 0) {
901 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
902 				    "EFAULT\n\n", cmdstr))
903 				ret = EFAULT;
904 				break;
905 			}
906 			req.component = req32.component;
907 			req.value = req32.value;
908 			req.datasize = req32.datasize;
909 			if (pcip->inargs & INWHO) {
910 				ret = copyinstr((char *)(uintptr_t)
911 				    req32.physpath, who, MAXNAMELEN, &wholen);
912 				if (ret) {
913 					PMD(PMD_ERROR, ("ioctl: %s: "
914 					    "copyinstr fails returning %d\n",
915 					    cmdstr, ret))
916 					break;
917 				}
918 				req.physpath = who;
919 			}
920 			PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
921 			    req.physpath))
922 			if (pcip->inargs & INDATA) {
923 				req.data = (void *)(uintptr_t)req32.data;
924 				req.datasize = req32.datasize;
925 			} else {
926 				req.data = NULL;
927 				req.datasize = 0;
928 			}
929 			switch (pcip->diptype) {
930 			case DIP:
931 				if (!(dip =
932 				    pm_name_to_dip(req.physpath, 1))) {
933 					PMD(PMD_ERROR, ("ioctl: %s: "
934 					    "pm_name_to_dip for %s failed\n",
935 					    cmdstr, req.physpath))
936 					return (ENODEV);
937 				}
938 				ASSERT(!dipheld);
939 				dipheld++;
940 				break;
941 			case NODIP:
942 				break;
943 			default:
944 				/*
945 				 * Internal error, invalid ioctl description
946 				 * force debug entry even if pm_debug not set
947 				 */
948 #ifdef	DEBUG
949 				pm_log("invalid diptype %d for cmd %d (%s)\n",
950 				    pcip->diptype, cmd, pcip->name);
951 #endif
952 				ASSERT(0);
953 				return (EIO);
954 			}
955 			if (pcip->inargs & INDATAINT) {
956 				int32_t int32buf;
957 				int32_t *i32p;
958 				int *ip;
959 				icount = req32.datasize / sizeof (int32_t);
960 				if (icount <= 0) {
961 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
962 					    " 0 or neg EFAULT\n\n", cmdstr))
963 					ret = EFAULT;
964 					break;
965 				}
966 				ASSERT(!(pcip->inargs & INDATASTRING));
967 				req.datasize = icount * sizeof (int);
968 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
969 				ip = req.data;
970 				ret = 0;
971 				for (i = 0,
972 				    i32p = (int32_t *)(uintptr_t)req32.data;
973 				    i < icount; i++, i32p++) {
974 					if (ddi_copyin((void *)i32p, &int32buf,
975 					    sizeof (int32_t), mode)) {
976 						kmem_free(req.data,
977 						    req.datasize);
978 						PMD(PMD_ERROR, ("ioctl: %s: "
979 						    "entry %d EFAULT\n",
980 						    cmdstr, i))
981 						ret = EFAULT;
982 						break;
983 					}
984 					*ip++ = (int)int32buf;
985 				}
986 				if (ret)
987 					break;
988 			}
989 			if (pcip->inargs & INDATASTRING) {
990 				ASSERT(!(pcip->inargs & INDATAINT));
991 				ASSERT(pcip->deptype == DEP);
992 				if (req32.data != NULL) {
993 					size_t dummy;
994 					if (copyinstr((void *)(uintptr_t)
995 					    req32.data, dep, deplen, &dummy)) {
996 						PMD(PMD_ERROR, ("ioctl: %s: "
997 						    "0x%p dep size %lx, EFAULT"
998 						    "\n", cmdstr,
999 						    (void *)req.data, deplen))
1000 						ret = EFAULT;
1001 						break;
1002 					}
1003 #ifdef DEBUG
1004 					else {
1005 						PMD(PMD_DEP, ("ioctl: %s: "
1006 						    "dep %s\n", cmdstr, dep))
1007 					}
1008 #endif
1009 				} else {
1010 					PMD(PMD_ERROR, ("ioctl: %s: no "
1011 					    "dependent\n", cmdstr))
1012 					ret = EINVAL;
1013 					break;
1014 				}
1015 			}
1016 		} else
1017 #endif /* _MULTI_DATAMODEL */
1018 		{
1019 			if (ddi_copyin((caddr_t)arg,
1020 			    &req, sizeof (req), mode) != 0) {
1021 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1022 				    "EFAULT\n\n", cmdstr))
1023 				ret = EFAULT;
1024 				break;
1025 			}
1026 			if (pcip->inargs & INWHO) {
1027 				ret = copyinstr((char *)req.physpath, who,
1028 				    MAXNAMELEN, &wholen);
1029 				if (ret) {
1030 					PMD(PMD_ERROR, ("ioctl: %s copyinstr"
1031 					    " fails returning %d\n", cmdstr,
1032 					    ret))
1033 					break;
1034 				}
1035 				req.physpath = who;
1036 			}
1037 			PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
1038 			    req.physpath))
1039 			if (!(pcip->inargs & INDATA)) {
1040 				req.data = NULL;
1041 				req.datasize = 0;
1042 			}
1043 			switch (pcip->diptype) {
1044 			case DIP:
1045 				if (!(dip =
1046 				    pm_name_to_dip(req.physpath, 1))) {
1047 					PMD(PMD_ERROR, ("ioctl: %s: "
1048 					    "pm_name_to_dip for %s failed\n",
1049 					    cmdstr, req.physpath))
1050 					return (ENODEV);
1051 				}
1052 				ASSERT(!dipheld);
1053 				dipheld++;
1054 				break;
1055 			case NODIP:
1056 				break;
1057 			default:
1058 				/*
1059 				 * Internal error, invalid ioctl description
1060 				 * force debug entry even if pm_debug not set
1061 				 */
1062 #ifdef	DEBUG
1063 				pm_log("invalid diptype %d for cmd %d (%s)\n",
1064 				    pcip->diptype, cmd, pcip->name);
1065 #endif
1066 				ASSERT(0);
1067 				return (EIO);
1068 			}
1069 			if (pcip->inargs & INDATAINT) {
1070 				int *ip;
1071 
1072 				ASSERT(!(pcip->inargs & INDATASTRING));
1073 				ip = req.data;
1074 				icount = req.datasize / sizeof (int);
1075 				if (icount <= 0) {
1076 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1077 					    " 0 or neg EFAULT\n\n", cmdstr))
1078 					ret = EFAULT;
1079 					break;
1080 				}
1081 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1082 				if (ddi_copyin((caddr_t)ip, req.data,
1083 				    req.datasize, mode) != 0) {
1084 					PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1085 					    "EFAULT\n\n", cmdstr))
1086 					ret = EFAULT;
1087 					break;
1088 				}
1089 			}
1090 			if (pcip->inargs & INDATASTRING) {
1091 				ASSERT(!(pcip->inargs & INDATAINT));
1092 				ASSERT(pcip->deptype == DEP);
1093 				if (req.data != NULL) {
1094 					size_t dummy;
1095 					if (copyinstr((caddr_t)req.data,
1096 					    dep, deplen, &dummy)) {
1097 						PMD(PMD_ERROR, ("ioctl: %s: "
1098 						    "0x%p dep size %lu, "
1099 						    "EFAULT\n", cmdstr,
1100 						    (void *)req.data, deplen))
1101 						ret = EFAULT;
1102 						break;
1103 					}
1104 #ifdef DEBUG
1105 					else {
1106 						PMD(PMD_DEP, ("ioctl: %s: "
1107 						    "dep %s\n", cmdstr, dep))
1108 					}
1109 #endif
1110 				} else {
1111 					PMD(PMD_ERROR, ("ioctl: %s: no "
1112 					    "dependent\n", cmdstr))
1113 					ret = EINVAL;
1114 					break;
1115 				}
1116 			}
1117 		}
1118 		/*
1119 		 * Now we've got all the args in for the commands that
1120 		 * use the new pm_req struct.
1121 		 */
1122 		switch (cmd) {
1123 		case PM_REPARSE_PM_PROPS:
1124 		{
1125 			struct dev_ops	*drv;
1126 			struct cb_ops	*cb;
1127 			void		*propval;
1128 			int length;
1129 			/*
1130 			 * This ioctl is provided only for the ddivs pm test.
1131 			 * We only do it to a driver which explicitly allows
1132 			 * us to do so by exporting a pm-reparse-ok property.
1133 			 * We only care whether the property exists or not.
1134 			 */
1135 			if ((drv = ddi_get_driver(dip)) == NULL) {
1136 				ret = EINVAL;
1137 				break;
1138 			}
1139 			if ((cb = drv->devo_cb_ops) != NULL) {
1140 				if ((*cb->cb_prop_op)(DDI_DEV_T_ANY, dip,
1141 				    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1142 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1143 				    "pm-reparse-ok", (caddr_t)&propval,
1144 				    &length) != DDI_SUCCESS) {
1145 					ret = EINVAL;
1146 					break;
1147 				}
1148 			} else if (ddi_prop_op(DDI_DEV_T_ANY, dip,
1149 			    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1150 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1151 			    "pm-reparse-ok", (caddr_t)&propval,
1152 			    &length) != DDI_SUCCESS) {
1153 				ret = EINVAL;
1154 				break;
1155 			}
1156 			kmem_free(propval, length);
1157 			ret =  e_new_pm_props(dip);
1158 			break;
1159 		}
1160 
1161 		case PM_GET_DEVICE_THRESHOLD:
1162 			PM_LOCK_DIP(dip);
1163 			if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
1164 				PM_UNLOCK_DIP(dip);
1165 				PMD(PMD_ERROR, ("ioctl: %s: ENODEV\n",
1166 				    cmdstr))
1167 				ret = ENODEV;
1168 				break;
1169 			}
1170 			*rval_p = DEVI(dip)->devi_pm_dev_thresh;
1171 			PM_UNLOCK_DIP(dip);
1172 			ret = 0;
1173 			break;
1174 
1175 		case PM_DIRECT_PM:
1176 		{
1177 			int has_dep;
1178 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1179 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1180 				    "ENODEV\n", cmdstr))
1181 				ret = ENODEV;
1182 				break;
1183 			}
1184 			/*
1185 			 * Check to see if we are there is a dependency on
1186 			 * this kept device, if so, return EBUSY.
1187 			 */
1188 			pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1189 			(void) ddi_pathname(dip, pathbuf);
1190 			pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
1191 			    NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
1192 			kmem_free(pathbuf, MAXPATHLEN);
1193 			if (has_dep) {
1194 				PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
1195 				    cmdstr))
1196 				ret = EBUSY;
1197 				break;
1198 			}
1199 			PM_LOCK_DIP(dip);
1200 			if (PM_ISDIRECT(dip) || (info->pmi_clone != 0)) {
1201 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1202 				    "%s@%s(%s#%d): EBUSY\n", cmdstr,
1203 				    PM_DEVICE(dip)))
1204 				PM_UNLOCK_DIP(dip);
1205 				ret = EBUSY;
1206 				break;
1207 			}
1208 			info->pmi_dev_pm_state |= PM_DIRECT;
1209 			info->pmi_clone = clone;
1210 			PM_UNLOCK_DIP(dip);
1211 			PMD(PMD_DPM, ("ioctl: %s: info %p, pmi_clone %d\n",
1212 			    cmdstr, (void *)info, clone))
1213 			mutex_enter(&pm_clone_lock);
1214 			pm_register_watcher(clone, dip);
1215 			mutex_exit(&pm_clone_lock);
1216 			ret = 0;
1217 			break;
1218 		}
1219 
1220 		case PM_RELEASE_DIRECT_PM:
1221 		{
1222 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1223 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1224 				    "ENODEV\n", cmdstr))
1225 				ret = ENODEV;
1226 				break;
1227 			}
1228 			PM_LOCK_DIP(dip);
1229 			if (info->pmi_clone != clone) {
1230 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1231 				    "%s@%s(%s#%d) EINVAL\n", cmdstr,
1232 				    PM_DEVICE(dip)))
1233 				ret = EINVAL;
1234 				PM_UNLOCK_DIP(dip);
1235 				break;
1236 			}
1237 			ASSERT(PM_ISDIRECT(dip));
1238 			info->pmi_dev_pm_state &= ~PM_DIRECT;
1239 			PM_UNLOCK_DIP(dip);
1240 			/* Bring ourselves up if there is a keeper. */
1241 			pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1242 			(void) ddi_pathname(dip, pathbuf);
1243 			pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
1244 			    NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
1245 			kmem_free(pathbuf, MAXPATHLEN);
1246 			pm_discard_entries(clone);
1247 			pm_deregister_watcher(clone, dip);
1248 			/*
1249 			 * Now we could let the other threads that are
1250 			 * trying to do a DIRECT_PM thru
1251 			 */
1252 			PM_LOCK_DIP(dip);
1253 			info->pmi_clone = 0;
1254 			PM_UNLOCK_DIP(dip);
1255 			pm_proceed(dip, PMP_RELEASE, -1, -1);
1256 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1257 			    cmdstr))
1258 			pm_rescan(dip);
1259 			ret = 0;
1260 			break;
1261 		}
1262 
1263 		case PM_SET_CURRENT_POWER:
1264 		{
1265 			int comp = req.component;
1266 			int  value = req.value;
1267 			PMD(PMD_DPM, ("ioctl: %s: %s component %d to value "
1268 			    "%d\n", cmdstr, req.physpath, comp, value))
1269 			if (!e_pm_valid_comp(dip, comp, NULL) ||
1270 			    !e_pm_valid_power(dip, comp, value)) {
1271 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1272 				    "physpath=%s, comp=%d, level=%d, fails\n",
1273 				    cmdstr, req.physpath, comp, value))
1274 				ret = EINVAL;
1275 				break;
1276 			}
1277 
1278 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1279 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1280 				    "ENODEV\n", cmdstr))
1281 				ret = ENODEV;
1282 				break;
1283 			}
1284 			if (info->pmi_clone != clone) {
1285 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1286 				    "(not owner) %s fails; clone %d, owner %d"
1287 				    "\n", cmdstr, req.physpath, clone,
1288 				    info->pmi_clone))
1289 				ret = EINVAL;
1290 				break;
1291 			}
1292 			ASSERT(PM_ISDIRECT(dip));
1293 
1294 			if (pm_set_power(dip, comp, value, PM_LEVEL_EXACT,
1295 			    PM_CANBLOCK_BLOCK, 0, &ret) != DDI_SUCCESS) {
1296 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1297 				    "pm_set_power for %s fails, errno=%d\n",
1298 				    cmdstr, req.physpath, ret))
1299 				break;
1300 			}
1301 
1302 			pm_proceed(dip, PMP_SETPOWER, comp, value);
1303 
1304 			/*
1305 			 * Power down all idle components if console framebuffer
1306 			 * is powered off.
1307 			 */
1308 			if (PM_IS_CFB(dip) && (pm_system_idle_threshold ==
1309 			    pm_default_idle_threshold)) {
1310 				dev_info_t	*root = ddi_root_node();
1311 				if (PM_ISBC(dip)) {
1312 					if (comp == 0 && value == 0 &&
1313 					    (pm_timeout_idledown() != 0)) {
1314 						ddi_walk_devs(root,
1315 						    pm_start_idledown,
1316 						    (void *)PMID_CFB);
1317 					}
1318 				} else {
1319 					int count = 0;
1320 					for (i = 0; i < PM_NUMCMPTS(dip); i++) {
1321 						ret = pm_get_current_power(dip,
1322 						    i, &curpower);
1323 						if (ret == DDI_SUCCESS &&
1324 						    curpower == 0)
1325 							count++;
1326 					}
1327 					if ((count == PM_NUMCMPTS(dip)) &&
1328 					    (pm_timeout_idledown() != 0)) {
1329 						ddi_walk_devs(root,
1330 						    pm_start_idledown,
1331 						    (void *)PMID_CFB);
1332 					}
1333 				}
1334 			}
1335 
1336 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1337 			    cmdstr))
1338 			pm_rescan(dip);
1339 			*rval_p = 0;
1340 			ret = 0;
1341 			break;
1342 		}
1343 
1344 		case PM_GET_FULL_POWER:
1345 		{
1346 			int normal;
1347 			ASSERT(dip);
1348 			PMD(PMD_NORM, ("ioctl: %s: %s component %d\n",
1349 			    cmdstr, req.physpath, req.component))
1350 			normal =  pm_get_normal_power(dip, req.component);
1351 
1352 			if (normal == DDI_FAILURE) {
1353 				PMD(PMD_ERROR | PMD_NORM, ("ioctl: %s: "
1354 				    "returns EINVAL\n", cmdstr))
1355 				ret = EINVAL;
1356 				break;
1357 			}
1358 			*rval_p = normal;
1359 			PMD(PMD_NORM, ("ioctl: %s: returns %d\n",
1360 			    cmdstr, normal))
1361 			ret = 0;
1362 			break;
1363 		}
1364 
1365 		case PM_GET_CURRENT_POWER:
1366 			if (pm_get_current_power(dip, req.component,
1367 			    rval_p) != DDI_SUCCESS) {
1368 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
1369 				    "EINVAL\n", cmdstr))
1370 				ret = EINVAL;
1371 				break;
1372 			}
1373 			PMD(PMD_DPM, ("ioctl: %s: %s comp %d returns %d\n",
1374 			    cmdstr, req.physpath, req.component, *rval_p))
1375 			if (*rval_p == PM_LEVEL_UNKNOWN)
1376 				ret = EAGAIN;
1377 			else
1378 				ret = 0;
1379 			break;
1380 
1381 		case PM_GET_TIME_IDLE:
1382 		{
1383 			time_t timestamp;
1384 			int comp = req.component;
1385 			pm_component_t *cp;
1386 			if (!e_pm_valid_comp(dip, comp, &cp)) {
1387 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1388 				    "component %d > numcmpts - 1 %d--EINVAL\n",
1389 				    cmdstr, PM_DEVICE(dip), comp,
1390 				    PM_NUMCMPTS(dip) - 1))
1391 				ret = EINVAL;
1392 				break;
1393 			}
1394 			timestamp = cp->pmc_timestamp;
1395 			if (timestamp) {
1396 				time_t now;
1397 				(void) drv_getparm(TIME, &now);
1398 				*rval_p = (now - timestamp);
1399 			} else {
1400 				*rval_p = 0;
1401 			}
1402 			ret = 0;
1403 			break;
1404 		}
1405 
1406 		case PM_ADD_DEPENDENT:
1407 		{
1408 			dev_info_t	*kept_dip;
1409 
1410 			PMD(PMD_KEEPS, ("%s, kept %s, keeper %s\n", cmdstr,
1411 			    dep, req.physpath))
1412 
1413 			/*
1414 			 * hold and install kept while processing dependency
1415 			 * keeper (in .physpath) has already been held.
1416 			 */
1417 			if (dep[0] == '\0') {
1418 				PMD(PMD_ERROR, ("kept NULL or null\n"))
1419 				ret = EINVAL;
1420 				break;
1421 			} else if ((kept_dip =
1422 			    pm_name_to_dip(dep, 1)) == NULL) {
1423 				PMD(PMD_ERROR, ("no dip for kept %s\n", dep))
1424 				ret = ENODEV;
1425 				break;
1426 			} else if (kept_dip == dip) {
1427 				PMD(PMD_ERROR, ("keeper(%s, %p) - kept(%s, %p) "
1428 				    "self-dependency not allowed.\n",
1429 				    dep, (void *)kept_dip, req.physpath,
1430 				    (void *) dip))
1431 				PM_RELE(dip);	/* release "double" hold */
1432 				ret = EINVAL;
1433 				break;
1434 			}
1435 			ASSERT(!(strcmp(req.physpath, (char *)dep) == 0));
1436 
1437 			/*
1438 			 * record dependency, then walk through device tree
1439 			 * independently on behalf of kept and keeper to
1440 			 * establish newly created dependency.
1441 			 */
1442 			pm_dispatch_to_dep_thread(PM_DEP_WK_RECORD_KEEPER,
1443 			    req.physpath, dep, PM_DEP_WAIT, NULL, 0);
1444 
1445 			/*
1446 			 * release kept after establishing dependency, keeper
1447 			 * is released as part of ioctl exit processing.
1448 			 */
1449 			PM_RELE(kept_dip);
1450 			*rval_p = 0;
1451 			ret = 0;
1452 			break;
1453 		}
1454 
1455 		case PM_ADD_DEPENDENT_PROPERTY:
1456 		{
1457 			char *keeper, *kept;
1458 
1459 			if (dep[0] == '\0') {
1460 				PMD(PMD_ERROR, ("ioctl: %s: dep NULL or "
1461 				    "null\n", cmdstr))
1462 				ret = EINVAL;
1463 				break;
1464 			}
1465 			kept = dep;
1466 			keeper = req.physpath;
1467 			/*
1468 			 * record keeper - kept dependency, then walk through
1469 			 * device tree to find out all attached keeper, walk
1470 			 * through again to apply dependency to all the
1471 			 * potential kept.
1472 			 */
1473 			pm_dispatch_to_dep_thread(
1474 			    PM_DEP_WK_RECORD_KEEPER_PROP, keeper, kept,
1475 			    PM_DEP_WAIT, NULL, 0);
1476 
1477 			*rval_p = 0;
1478 			ret = 0;
1479 			break;
1480 		}
1481 
1482 		case PM_SET_DEVICE_THRESHOLD:
1483 		{
1484 			pm_thresh_rec_t *rp;
1485 			pm_pte_t *ep;	/* threshold header storage */
1486 			int *tp;	/* threshold storage */
1487 			size_t size;
1488 			extern int pm_thresh_specd(dev_info_t *);
1489 
1490 			/*
1491 			 * The header struct plus one entry struct plus one
1492 			 * threshold plus the length of the string
1493 			 */
1494 			size = sizeof (pm_thresh_rec_t) +
1495 			    (sizeof (pm_pte_t) * 1) +
1496 			    (1 * sizeof (int)) +
1497 			    strlen(req.physpath) + 1;
1498 
1499 			rp = kmem_zalloc(size, KM_SLEEP);
1500 			rp->ptr_size = size;
1501 			rp->ptr_numcomps = 0;	/* means device threshold */
1502 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1503 			rp->ptr_entries = ep;
1504 			tp = (int *)((intptr_t)ep +
1505 			    (1 * sizeof (pm_pte_t)));
1506 			ep->pte_numthresh = 1;
1507 			ep->pte_thresh = tp;
1508 			*tp++ = req.value;
1509 			(void) strcat((char *)tp, req.physpath);
1510 			rp->ptr_physpath = (char *)tp;
1511 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1512 			    (intptr_t)rp + rp->ptr_size);
1513 			PMD(PMD_THRESH, ("ioctl: %s: record thresh %d for "
1514 			    "%s\n", cmdstr, req.value, req.physpath))
1515 			pm_record_thresh(rp);
1516 			/*
1517 			 * Don't free rp, pm_record_thresh() keeps it.
1518 			 * We don't try to apply it ourselves because we'd need
1519 			 * to know too much about locking.  Since we don't
1520 			 * hold a lock the entry could be removed before
1521 			 * we get here
1522 			 */
1523 			ASSERT(dip == NULL);
1524 			ret = 0;		/* can't fail now */
1525 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1526 				break;
1527 			}
1528 			(void) pm_thresh_specd(dip);
1529 			PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d)\n",
1530 			    cmdstr, PM_DEVICE(dip)))
1531 			PM_RELE(dip);
1532 			break;
1533 		}
1534 
1535 		case PM_RESET_DEVICE_THRESHOLD:
1536 		{
1537 			/*
1538 			 * This only applies to a currently attached and power
1539 			 * managed node
1540 			 */
1541 			/*
1542 			 * We don't do this to old-style drivers
1543 			 */
1544 			info = PM_GET_PM_INFO(dip);
1545 			if (info == NULL) {
1546 				PMD(PMD_ERROR, ("ioctl: %s: %s not power "
1547 				    "managed\n", cmdstr, req.physpath))
1548 				ret = EINVAL;
1549 				break;
1550 			}
1551 			if (PM_ISBC(dip)) {
1552 				PMD(PMD_ERROR, ("ioctl: %s: %s is BC\n",
1553 				    cmdstr, req.physpath))
1554 				ret = EINVAL;
1555 				break;
1556 			}
1557 			pm_unrecord_threshold(req.physpath);
1558 			pm_set_device_threshold(dip, pm_system_idle_threshold,
1559 			    PMC_DEF_THRESH);
1560 			ret = 0;
1561 			break;
1562 		}
1563 
1564 		case PM_GET_NUM_COMPONENTS:
1565 			ret = 0;
1566 			*rval_p = PM_NUMCMPTS(dip);
1567 			break;
1568 
1569 		case PM_GET_DEVICE_TYPE:
1570 			ret = 0;
1571 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1572 				PMD(PMD_ERROR, ("ioctl: %s: "
1573 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
1574 				*rval_p = PM_NO_PM_COMPONENTS;
1575 				break;
1576 			}
1577 			if (PM_ISBC(dip)) {
1578 				*rval_p = PM_CREATE_COMPONENTS;
1579 			} else {
1580 				*rval_p = PM_AUTOPM;
1581 			}
1582 			break;
1583 
1584 		case PM_SET_COMPONENT_THRESHOLDS:
1585 		{
1586 			int comps = 0;
1587 			int *end = (int *)req.data + icount;
1588 			pm_thresh_rec_t *rp;
1589 			pm_pte_t *ep;	/* threshold header storage */
1590 			int *tp;	/* threshold storage */
1591 			int *ip;
1592 			int j;
1593 			size_t size;
1594 			extern int pm_thresh_specd(dev_info_t *);
1595 			extern int pm_valid_thresh(dev_info_t *,
1596 			    pm_thresh_rec_t *);
1597 
1598 			for (ip = req.data; *ip; ip++) {
1599 				if (ip >= end) {
1600 					ret = EFAULT;
1601 					break;
1602 				}
1603 				comps++;
1604 				/* skip over indicated number of entries */
1605 				for (j = *ip; j; j--) {
1606 					if (++ip >= end) {
1607 						ret = EFAULT;
1608 						break;
1609 					}
1610 				}
1611 				if (ret)
1612 					break;
1613 			}
1614 			if (ret)
1615 				break;
1616 			if ((intptr_t)ip != (intptr_t)end - sizeof (int)) {
1617 				/* did not exactly fill buffer */
1618 				ret = EINVAL;
1619 				break;
1620 			}
1621 			if (comps == 0) {
1622 				PMD(PMD_ERROR, ("ioctl: %s: %s 0 components"
1623 				    "--EINVAL\n", cmdstr, req.physpath))
1624 				ret = EINVAL;
1625 				break;
1626 			}
1627 			/*
1628 			 * The header struct plus one entry struct per component
1629 			 * plus the size of the lists minus the counts
1630 			 * plus the length of the string
1631 			 */
1632 			size = sizeof (pm_thresh_rec_t) +
1633 			    (sizeof (pm_pte_t) * comps) + req.datasize -
1634 			    ((comps + 1) * sizeof (int)) +
1635 			    strlen(req.physpath) + 1;
1636 
1637 			rp = kmem_zalloc(size, KM_SLEEP);
1638 			rp->ptr_size = size;
1639 			rp->ptr_numcomps = comps;
1640 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1641 			rp->ptr_entries = ep;
1642 			tp = (int *)((intptr_t)ep +
1643 			    (comps * sizeof (pm_pte_t)));
1644 			for (ip = req.data; *ip; ep++) {
1645 				ep->pte_numthresh = *ip;
1646 				ep->pte_thresh = tp;
1647 				for (j = *ip++; j; j--) {
1648 					*tp++ = *ip++;
1649 				}
1650 			}
1651 			(void) strcat((char *)tp, req.physpath);
1652 			rp->ptr_physpath = (char *)tp;
1653 			ASSERT((intptr_t)end == (intptr_t)ip + sizeof (int));
1654 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1655 			    (intptr_t)rp + rp->ptr_size);
1656 
1657 			ASSERT(dip == NULL);
1658 			/*
1659 			 * If this is not a currently power managed node,
1660 			 * then we can't check for validity of the thresholds
1661 			 */
1662 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1663 				/* don't free rp, pm_record_thresh uses it */
1664 				pm_record_thresh(rp);
1665 				PMD(PMD_ERROR, ("ioctl: %s: pm_name_to_dip "
1666 				    "for %s failed\n", cmdstr, req.physpath))
1667 				ret = 0;
1668 				break;
1669 			}
1670 			ASSERT(!dipheld);
1671 			dipheld++;
1672 
1673 			if (!pm_valid_thresh(dip, rp)) {
1674 				PMD(PMD_ERROR, ("ioctl: %s: invalid thresh "
1675 				    "for %s@%s(%s#%d)\n", cmdstr,
1676 				    PM_DEVICE(dip)))
1677 				kmem_free(rp, size);
1678 				ret = EINVAL;
1679 				break;
1680 			}
1681 			/*
1682 			 * We don't just apply it ourselves because we'd need
1683 			 * to know too much about locking.  Since we don't
1684 			 * hold a lock the entry could be removed before
1685 			 * we get here
1686 			 */
1687 			pm_record_thresh(rp);
1688 			(void) pm_thresh_specd(dip);
1689 			ret = 0;
1690 			break;
1691 		}
1692 
1693 		case PM_GET_COMPONENT_THRESHOLDS:
1694 		{
1695 			int musthave;
1696 			int numthresholds = 0;
1697 			int wordsize;
1698 			int numcomps;
1699 			caddr_t uaddr = req.data;	/* user address */
1700 			int val;	/* int value to be copied out */
1701 			int32_t val32;	/* int32 value to be copied out */
1702 			caddr_t vaddr;	/* address to copyout from */
1703 			int j;
1704 
1705 #ifdef	_MULTI_DATAMODEL
1706 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1707 				wordsize = sizeof (int32_t);
1708 			} else
1709 #endif /* _MULTI_DATAMODEL */
1710 			{
1711 				wordsize = sizeof (int);
1712 			}
1713 
1714 			ASSERT(dip);
1715 
1716 			numcomps = PM_NUMCMPTS(dip);
1717 			for (i = 0; i < numcomps; i++) {
1718 				cp = PM_CP(dip, i);
1719 				numthresholds += cp->pmc_comp.pmc_numlevels - 1;
1720 			}
1721 			musthave = (numthresholds + numcomps + 1) *  wordsize;
1722 			if (req.datasize < musthave) {
1723 				PMD(PMD_ERROR, ("ioctl: %s: size %ld, need "
1724 				    "%d--EINVAL\n", cmdstr, req.datasize,
1725 				    musthave))
1726 				ret = EINVAL;
1727 				break;
1728 			}
1729 			PM_LOCK_DIP(dip);
1730 			for (i = 0; i < numcomps; i++) {
1731 				int *thp;
1732 				cp = PM_CP(dip, i);
1733 				thp = cp->pmc_comp.pmc_thresh;
1734 				/* first copyout the count */
1735 				if (wordsize == sizeof (int32_t)) {
1736 					val32 = cp->pmc_comp.pmc_numlevels - 1;
1737 					vaddr = (caddr_t)&val32;
1738 				} else {
1739 					val = cp->pmc_comp.pmc_numlevels - 1;
1740 					vaddr = (caddr_t)&val;
1741 				}
1742 				if (ddi_copyout(vaddr, (void *)uaddr,
1743 				    wordsize, mode) != 0) {
1744 					PM_UNLOCK_DIP(dip);
1745 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1746 					    "(%s#%d) vaddr %p EFAULT\n",
1747 					    cmdstr, PM_DEVICE(dip),
1748 					    (void*)vaddr))
1749 					ret = EFAULT;
1750 					break;
1751 				}
1752 				vaddr = uaddr;
1753 				vaddr += wordsize;
1754 				uaddr = (caddr_t)vaddr;
1755 				/* then copyout each threshold value */
1756 				for (j = 0; j < cp->pmc_comp.pmc_numlevels - 1;
1757 				    j++) {
1758 					if (wordsize == sizeof (int32_t)) {
1759 						val32 = thp[j + 1];
1760 						vaddr = (caddr_t)&val32;
1761 					} else {
1762 						val = thp[i + 1];
1763 						vaddr = (caddr_t)&val;
1764 					}
1765 					if (ddi_copyout(vaddr, (void *) uaddr,
1766 					    wordsize, mode) != 0) {
1767 						PM_UNLOCK_DIP(dip);
1768 						PMD(PMD_ERROR, ("ioctl: %s: "
1769 						    "%s@%s(%s#%d) uaddr %p "
1770 						    "EFAULT\n", cmdstr,
1771 						    PM_DEVICE(dip),
1772 						    (void *)uaddr))
1773 						ret = EFAULT;
1774 						break;
1775 					}
1776 					vaddr = uaddr;
1777 					vaddr += wordsize;
1778 					uaddr = (caddr_t)vaddr;
1779 				}
1780 			}
1781 			if (ret)
1782 				break;
1783 			/* last copyout a terminating 0 count */
1784 			if (wordsize == sizeof (int32_t)) {
1785 				val32 = 0;
1786 				vaddr = (caddr_t)&val32;
1787 			} else {
1788 				ASSERT(wordsize == sizeof (int));
1789 				val = 0;
1790 				vaddr = (caddr_t)&val;
1791 			}
1792 			if (ddi_copyout(vaddr, uaddr, wordsize, mode) != 0) {
1793 				PM_UNLOCK_DIP(dip);
1794 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1795 				    "vaddr %p (0 count) EFAULT\n", cmdstr,
1796 				    PM_DEVICE(dip), (void *)vaddr))
1797 				ret = EFAULT;
1798 				break;
1799 			}
1800 			/* finished, so don't need to increment addresses */
1801 			PM_UNLOCK_DIP(dip);
1802 			ret = 0;
1803 			break;
1804 		}
1805 
1806 		case PM_GET_STATS:
1807 		{
1808 			time_t now;
1809 			time_t *timestamp;
1810 			extern int pm_cur_power(pm_component_t *cp);
1811 			int musthave;
1812 			int wordsize;
1813 
1814 #ifdef	_MULTI_DATAMODEL
1815 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1816 				wordsize = sizeof (int32_t);
1817 			} else
1818 #endif /* _MULTI_DATAMODEL */
1819 			{
1820 				wordsize = sizeof (int);
1821 			}
1822 
1823 			comps = PM_NUMCMPTS(dip);
1824 			if (comps == 0 || PM_GET_PM_INFO(dip) == NULL) {
1825 				PMD(PMD_ERROR, ("ioctl: %s: %s no components"
1826 				    " or not power managed--EINVAL\n", cmdstr,
1827 				    req.physpath))
1828 				ret = EINVAL;
1829 				break;
1830 			}
1831 			musthave = comps * 2 * wordsize;
1832 			if (req.datasize < musthave) {
1833 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
1834 				    "%d--EINVAL\n", cmdstr, req.datasize,
1835 				    musthave))
1836 				ret = EINVAL;
1837 				break;
1838 			}
1839 
1840 			PM_LOCK_DIP(dip);
1841 			(void) drv_getparm(TIME, &now);
1842 			timestamp = kmem_zalloc(comps * sizeof (time_t),
1843 			    KM_SLEEP);
1844 			pm_get_timestamps(dip, timestamp);
1845 			/*
1846 			 * First the current power levels
1847 			 */
1848 			for (i = 0; i < comps; i++) {
1849 				int curpwr;
1850 				int32_t curpwr32;
1851 				caddr_t cpaddr;
1852 
1853 				cp = PM_CP(dip, i);
1854 				if (wordsize == sizeof (int)) {
1855 					curpwr = pm_cur_power(cp);
1856 					cpaddr = (caddr_t)&curpwr;
1857 				} else {
1858 					ASSERT(wordsize == sizeof (int32_t));
1859 					curpwr32 = pm_cur_power(cp);
1860 					cpaddr = (caddr_t)&curpwr32;
1861 				}
1862 				if (ddi_copyout(cpaddr, (void *) req.data,
1863 				    wordsize, mode) != 0) {
1864 					PM_UNLOCK_DIP(dip);
1865 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1866 					    "(%s#%d) req.data %p EFAULT\n",
1867 					    cmdstr, PM_DEVICE(dip),
1868 					    (void *)req.data))
1869 					ASSERT(!dipheld);
1870 					return (EFAULT);
1871 				}
1872 				cpaddr = (caddr_t)req.data;
1873 				cpaddr += wordsize;
1874 				req.data = cpaddr;
1875 			}
1876 			/*
1877 			 * Then the times remaining
1878 			 */
1879 			for (i = 0; i < comps; i++) {
1880 				int retval;
1881 				int32_t retval32;
1882 				caddr_t rvaddr;
1883 				int curpwr;
1884 
1885 				cp = PM_CP(dip, i);
1886 				curpwr = cp->pmc_cur_pwr;
1887 				if (curpwr == 0 || timestamp[i] == 0) {
1888 					PMD(PMD_STATS, ("ioctl: %s: "
1889 					    "cur_pwer %x, timestamp %lx\n",
1890 					    cmdstr, curpwr, timestamp[i]))
1891 					retval = INT_MAX;
1892 				} else {
1893 					int thresh;
1894 					(void) pm_current_threshold(dip, i,
1895 					    &thresh);
1896 					retval = thresh - (now - timestamp[i]);
1897 					PMD(PMD_STATS, ("ioctl: %s: current "
1898 					    "thresh %x, now %lx, timestamp %lx,"
1899 					    " retval %x\n", cmdstr, thresh, now,
1900 					    timestamp[i], retval))
1901 				}
1902 				if (wordsize == sizeof (int)) {
1903 					rvaddr = (caddr_t)&retval;
1904 				} else {
1905 					ASSERT(wordsize == sizeof (int32_t));
1906 					retval32 = retval;
1907 					rvaddr = (caddr_t)&retval32;
1908 				}
1909 				if (ddi_copyout(rvaddr, (void *) req.data,
1910 				    wordsize, mode) != 0) {
1911 					PM_UNLOCK_DIP(dip);
1912 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1913 					    "(%s#%d) req.data %p EFAULT\n",
1914 					    cmdstr, PM_DEVICE(dip),
1915 					    (void *)req.data))
1916 					ASSERT(!dipheld);
1917 					return (EFAULT);
1918 				}
1919 				rvaddr = (caddr_t)req.data;
1920 				rvaddr += wordsize;
1921 				req.data = (int *)rvaddr;
1922 			}
1923 			PM_UNLOCK_DIP(dip);
1924 			*rval_p = comps;
1925 			ret = 0;
1926 			kmem_free(timestamp, comps * sizeof (time_t));
1927 			break;
1928 		}
1929 
1930 		case PM_GET_COMPONENT_NAME:
1931 			ASSERT(dip);
1932 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
1933 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1934 				    "component %d > numcmpts - 1 %d--EINVAL\n",
1935 				    cmdstr, PM_DEVICE(dip), req.component,
1936 				    PM_NUMCMPTS(dip) - 1))
1937 				ret = EINVAL;
1938 				break;
1939 			}
1940 			if (ret = copyoutstr(cp->pmc_comp.pmc_name,
1941 			    (char *)req.data, req.datasize, &lencopied)) {
1942 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1943 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
1944 				    PM_DEVICE(dip), (void *)req.data))
1945 				break;
1946 			}
1947 			*rval_p = lencopied;
1948 			ret = 0;
1949 			break;
1950 
1951 		case PM_GET_POWER_NAME:
1952 		{
1953 			int i;
1954 
1955 			ASSERT(dip);
1956 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
1957 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1958 				    "component %d > numcmpts - 1 %d--EINVAL\n",
1959 				    cmdstr, PM_DEVICE(dip), req.component,
1960 				    PM_NUMCMPTS(dip) - 1))
1961 				ret = EINVAL;
1962 				break;
1963 			}
1964 			if ((i = req.value) < 0 ||
1965 			    i > cp->pmc_comp.pmc_numlevels - 1) {
1966 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1967 				    "value %d > num_levels - 1 %d--EINVAL\n",
1968 				    cmdstr, PM_DEVICE(dip), req.value,
1969 				    cp->pmc_comp.pmc_numlevels - 1))
1970 				ret = EINVAL;
1971 				break;
1972 			}
1973 			dep = cp->pmc_comp.pmc_lnames[req.value];
1974 			if (ret = copyoutstr(dep,
1975 			    req.data, req.datasize, &lencopied)) {
1976 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1977 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
1978 				    PM_DEVICE(dip), (void *)req.data))
1979 				break;
1980 			}
1981 			*rval_p = lencopied;
1982 			ret = 0;
1983 			break;
1984 		}
1985 
1986 		case PM_GET_POWER_LEVELS:
1987 		{
1988 			int musthave;
1989 			int numlevels;
1990 			int wordsize;
1991 
1992 #ifdef	_MULTI_DATAMODEL
1993 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1994 				wordsize = sizeof (int32_t);
1995 			} else
1996 #endif /* _MULTI_DATAMODEL */
1997 			{
1998 				wordsize = sizeof (int);
1999 			}
2000 			ASSERT(dip);
2001 
2002 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2003 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2004 				    "has %d components, component %d requested"
2005 				    "--EINVAL\n", cmdstr, PM_DEVICE(dip),
2006 				    PM_NUMCMPTS(dip), req.component))
2007 				ret = EINVAL;
2008 				break;
2009 			}
2010 			numlevels = cp->pmc_comp.pmc_numlevels;
2011 			musthave = numlevels *  wordsize;
2012 			if (req.datasize < musthave) {
2013 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
2014 				    "%d--EINVAL\n", cmdstr, req.datasize,
2015 				    musthave))
2016 				ret = EINVAL;
2017 				break;
2018 			}
2019 			PM_LOCK_DIP(dip);
2020 			for (i = 0; i < numlevels; i++) {
2021 				int level;
2022 				int32_t level32;
2023 				caddr_t laddr;
2024 
2025 				if (wordsize == sizeof (int)) {
2026 					level = cp->pmc_comp.pmc_lvals[i];
2027 					laddr = (caddr_t)&level;
2028 				} else {
2029 					level32 = cp->pmc_comp.pmc_lvals[i];
2030 					laddr = (caddr_t)&level32;
2031 				}
2032 				if (ddi_copyout(laddr, (void *) req.data,
2033 				    wordsize, mode) != 0) {
2034 					PM_UNLOCK_DIP(dip);
2035 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2036 					    "(%s#%d) laddr %p EFAULT\n",
2037 					    cmdstr, PM_DEVICE(dip),
2038 					    (void *)laddr))
2039 					ASSERT(!dipheld);
2040 					return (EFAULT);
2041 				}
2042 				laddr = (caddr_t)req.data;
2043 				laddr += wordsize;
2044 				req.data = (int *)laddr;
2045 			}
2046 			PM_UNLOCK_DIP(dip);
2047 			*rval_p = numlevels;
2048 			ret = 0;
2049 			break;
2050 		}
2051 
2052 
2053 		case PM_GET_NUM_POWER_LEVELS:
2054 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2055 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2056 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2057 				    cmdstr, PM_DEVICE(dip), req.component,
2058 				    PM_NUMCMPTS(dip) - 1))
2059 				ret = EINVAL;
2060 				break;
2061 			}
2062 			*rval_p = cp->pmc_comp.pmc_numlevels;
2063 			ret = 0;
2064 			break;
2065 
2066 		case PM_GET_DEVICE_THRESHOLD_BASIS:
2067 			ret = 0;
2068 			PM_LOCK_DIP(dip);
2069 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
2070 				PM_UNLOCK_DIP(dip);
2071 				PMD(PMD_ERROR, ("ioctl: %s: "
2072 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
2073 				*rval_p = PM_NO_PM_COMPONENTS;
2074 				break;
2075 			}
2076 			if (PM_ISDIRECT(dip)) {
2077 				PM_UNLOCK_DIP(dip);
2078 				*rval_p = PM_DIRECTLY_MANAGED;
2079 				break;
2080 			}
2081 			switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
2082 			case PMC_DEF_THRESH:
2083 			case PMC_NEXDEF_THRESH:
2084 				*rval_p = PM_DEFAULT_THRESHOLD;
2085 				break;
2086 			case PMC_DEV_THRESH:
2087 				*rval_p = PM_DEVICE_THRESHOLD;
2088 				break;
2089 			case PMC_COMP_THRESH:
2090 				*rval_p = PM_COMPONENT_THRESHOLD;
2091 				break;
2092 			default:
2093 				if (PM_ISBC(dip)) {
2094 					*rval_p = PM_OLD_THRESHOLD;
2095 					break;
2096 				}
2097 				PMD(PMD_ERROR, ("ioctl: %s: default, not "
2098 				    "BC--EINVAL", cmdstr))
2099 				ret = EINVAL;
2100 				break;
2101 			}
2102 			PM_UNLOCK_DIP(dip);
2103 			break;
2104 		}
2105 		break;
2106 
2107 	case PM_PSC:
2108 		/*
2109 		 * Commands that require pm_state_change_t as arg
2110 		 */
2111 #ifdef	_MULTI_DATAMODEL
2112 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2113 			pscp32 = (pm_state_change32_t *)arg;
2114 			if (ddi_copyin((caddr_t)arg, &psc32,
2115 			    sizeof (psc32), mode) != 0) {
2116 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2117 				    "EFAULT\n\n", cmdstr))
2118 				ASSERT(!dipheld);
2119 				return (EFAULT);
2120 			}
2121 			psc.physpath = (caddr_t)(uintptr_t)psc32.physpath;
2122 			psc.size = psc32.size;
2123 		} else
2124 #endif /* _MULTI_DATAMODEL */
2125 		{
2126 			pscp = (pm_state_change_t *)arg;
2127 			if (ddi_copyin((caddr_t)arg, &psc,
2128 			    sizeof (psc), mode) != 0) {
2129 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2130 				    "EFAULT\n\n", cmdstr))
2131 				ASSERT(!dipheld);
2132 				return (EFAULT);
2133 			}
2134 		}
2135 		switch (cmd) {
2136 
2137 		case PM_GET_STATE_CHANGE:
2138 		case PM_GET_STATE_CHANGE_WAIT:
2139 		{
2140 			psce_t			*pscep;
2141 			pm_state_change_t	*p;
2142 			caddr_t			physpath;
2143 			size_t			physlen;
2144 
2145 			/*
2146 			 * We want to know if any device has changed state.
2147 			 * We look up by clone.  In case we have another thread
2148 			 * from the same process, we loop.
2149 			 * pm_psc_clone_to_interest() returns a locked entry.
2150 			 * We create an internal copy of the event entry prior
2151 			 * to copyout to user space because we don't want to
2152 			 * hold the psce_lock while doing copyout as we might
2153 			 * hit page fault  which eventually brings us back
2154 			 * here requesting the same lock.
2155 			 */
2156 			mutex_enter(&pm_clone_lock);
2157 			if (!pm_interest_registered(clone))
2158 				pm_register_watcher(clone, NULL);
2159 			while ((pscep =
2160 			    pm_psc_clone_to_interest(clone)) == NULL) {
2161 				if (cmd == PM_GET_STATE_CHANGE) {
2162 					PMD(PMD_IOCTL, ("ioctl: %s: "
2163 					    "EWOULDBLOCK\n", cmdstr))
2164 					mutex_exit(&pm_clone_lock);
2165 					ASSERT(!dipheld);
2166 					return (EWOULDBLOCK);
2167 				} else {
2168 					if (cv_wait_sig(&pm_clones_cv[clone],
2169 					    &pm_clone_lock) == 0) {
2170 						mutex_exit(&pm_clone_lock);
2171 						PMD(PMD_ERROR, ("ioctl: %s "
2172 						    "EINTR\n", cmdstr))
2173 						ASSERT(!dipheld);
2174 						return (EINTR);
2175 					}
2176 				}
2177 			}
2178 			mutex_exit(&pm_clone_lock);
2179 
2180 			physlen = pscep->psce_out->size;
2181 			physpath = NULL;
2182 			/*
2183 			 * If we were unable to store the path while bringing
2184 			 * up the console fb upon entering the prom, we give
2185 			 * a "" name with the overrun event set
2186 			 */
2187 			if (physlen == (size_t)-1) {	/* kmemalloc failed */
2188 				physpath = kmem_zalloc(1, KM_SLEEP);
2189 				physlen = 1;
2190 			}
2191 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2192 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n", cmdstr))
2193 				mutex_exit(&pscep->psce_lock);
2194 				ret = EFAULT;
2195 				break;
2196 			}
2197 			if (physpath == NULL) {
2198 				physpath = kmem_zalloc(physlen, KM_SLEEP);
2199 				bcopy((const void *) pscep->psce_out->physpath,
2200 				    (void *) physpath, physlen);
2201 			}
2202 
2203 			p = pscep->psce_out;
2204 #ifdef	_MULTI_DATAMODEL
2205 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2206 #ifdef DEBUG
2207 				size_t usrcopysize;
2208 #endif
2209 				psc32.flags = (ushort_t)p->flags;
2210 				psc32.event = (ushort_t)p->event;
2211 				psc32.timestamp = (int32_t)p->timestamp;
2212 				psc32.component = (int32_t)p->component;
2213 				psc32.old_level = (int32_t)p->old_level;
2214 				psc32.new_level = (int32_t)p->new_level;
2215 				copysize32 = ((intptr_t)&psc32.size -
2216 				    (intptr_t)&psc32.component);
2217 #ifdef DEBUG
2218 				usrcopysize = ((intptr_t)&pscp32->size -
2219 				    (intptr_t)&pscp32->component);
2220 				ASSERT(usrcopysize == copysize32);
2221 #endif
2222 			} else
2223 #endif /* _MULTI_DATAMODEL */
2224 			{
2225 				psc.flags = p->flags;
2226 				psc.event = p->event;
2227 				psc.timestamp = p->timestamp;
2228 				psc.component = p->component;
2229 				psc.old_level = p->old_level;
2230 				psc.new_level = p->new_level;
2231 				copysize = ((long)&p->size -
2232 				    (long)&p->component);
2233 			}
2234 			if (p->size != (size_t)-1)
2235 				kmem_free(p->physpath, p->size);
2236 			p->size = 0;
2237 			p->physpath = NULL;
2238 			if (pscep->psce_out == pscep->psce_last)
2239 				p = pscep->psce_first;
2240 			else
2241 				p++;
2242 			pscep->psce_out = p;
2243 			mutex_exit(&pscep->psce_lock);
2244 
2245 			ret = copyoutstr(physpath, psc.physpath,
2246 			    physlen, &lencopied);
2247 			kmem_free(physpath, physlen);
2248 			if (ret) {
2249 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2250 				    "failed--EFAULT\n", cmdstr,
2251 				    (void *)psc.physpath))
2252 				break;
2253 			}
2254 
2255 #ifdef	_MULTI_DATAMODEL
2256 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2257 				if (ddi_copyout(&psc32.component,
2258 				    &pscp32->component, copysize32, mode)
2259 				    != 0) {
2260 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2261 					    "failed--EFAULT\n", cmdstr))
2262 					ret = EFAULT;
2263 					break;
2264 				}
2265 			} else
2266 #endif	/* _MULTI_DATAMODEL */
2267 			{
2268 				if (ddi_copyout(&psc.component,
2269 				    &pscp->component, copysize, mode) != 0) {
2270 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2271 					    "failed--EFAULT\n", cmdstr))
2272 					ret = EFAULT;
2273 					break;
2274 				}
2275 			}
2276 			ret = 0;
2277 			break;
2278 		}
2279 
2280 		case PM_DIRECT_NOTIFY:
2281 		case PM_DIRECT_NOTIFY_WAIT:
2282 		{
2283 			psce_t			*pscep;
2284 			pm_state_change_t	*p;
2285 			caddr_t			physpath;
2286 			size_t			physlen;
2287 			/*
2288 			 * We want to know if any direct device of ours has
2289 			 * something we should know about.  We look up by clone.
2290 			 * In case we have another thread from the same process,
2291 			 * we loop.
2292 			 * pm_psc_clone_to_direct() returns a locked entry.
2293 			 */
2294 			mutex_enter(&pm_clone_lock);
2295 			while (pm_poll_cnt[clone] == 0 ||
2296 			    (pscep = pm_psc_clone_to_direct(clone)) == NULL) {
2297 				if (cmd == PM_DIRECT_NOTIFY) {
2298 					PMD(PMD_IOCTL, ("ioctl: %s: "
2299 					    "EWOULDBLOCK\n", cmdstr))
2300 					mutex_exit(&pm_clone_lock);
2301 					ASSERT(!dipheld);
2302 					return (EWOULDBLOCK);
2303 				} else {
2304 					if (cv_wait_sig(&pm_clones_cv[clone],
2305 					    &pm_clone_lock) == 0) {
2306 						mutex_exit(&pm_clone_lock);
2307 						PMD(PMD_ERROR, ("ioctl: %s: "
2308 						    "EINTR\n", cmdstr))
2309 						ASSERT(!dipheld);
2310 						return (EINTR);
2311 					}
2312 				}
2313 			}
2314 			mutex_exit(&pm_clone_lock);
2315 			physlen = pscep->psce_out->size;
2316 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2317 				mutex_exit(&pscep->psce_lock);
2318 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n",
2319 				    cmdstr))
2320 				ret = EFAULT;
2321 				break;
2322 			}
2323 			physpath = kmem_zalloc(physlen, KM_SLEEP);
2324 			bcopy((const void *) pscep->psce_out->physpath,
2325 			    (void *) physpath, physlen);
2326 
2327 			p = pscep->psce_out;
2328 #ifdef	_MULTI_DATAMODEL
2329 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2330 #ifdef DEBUG
2331 				size_t usrcopysize;
2332 #endif
2333 				psc32.component = (int32_t)p->component;
2334 				psc32.flags = (ushort_t)p->flags;
2335 				psc32.event = (ushort_t)p->event;
2336 				psc32.timestamp = (int32_t)p->timestamp;
2337 				psc32.old_level = (int32_t)p->old_level;
2338 				psc32.new_level = (int32_t)p->new_level;
2339 				copysize32 = (intptr_t)&psc32.size -
2340 				    (intptr_t)&psc32.component;
2341 				PMD(PMD_DPM, ("ioctl: %s: PDN32 %s, comp %d "
2342 				    "%d -> %d\n", cmdstr, physpath,
2343 				    p->component, p->old_level, p->new_level))
2344 #ifdef DEBUG
2345 				usrcopysize = (intptr_t)&pscp32->size -
2346 				    (intptr_t)&pscp32->component;
2347 				ASSERT(usrcopysize == copysize32);
2348 #endif
2349 			} else
2350 #endif
2351 			{
2352 				psc.component = p->component;
2353 				psc.flags = p->flags;
2354 				psc.event = p->event;
2355 				psc.timestamp = p->timestamp;
2356 				psc.old_level = p->old_level;
2357 				psc.new_level = p->new_level;
2358 				copysize = (intptr_t)&p->size -
2359 				    (intptr_t)&p->component;
2360 				PMD(PMD_DPM, ("ioctl: %s: PDN %s, comp %d "
2361 				    "%d -> %d\n", cmdstr, physpath,
2362 				    p->component, p->old_level, p->new_level))
2363 			}
2364 			mutex_enter(&pm_clone_lock);
2365 			PMD(PMD_IOCTL, ("ioctl: %s: pm_poll_cnt[%d] is %d "
2366 			    "before decrement\n", cmdstr, clone,
2367 			    pm_poll_cnt[clone]))
2368 			pm_poll_cnt[clone]--;
2369 			mutex_exit(&pm_clone_lock);
2370 			kmem_free(p->physpath, p->size);
2371 			p->size = 0;
2372 			p->physpath = NULL;
2373 			if (pscep->psce_out == pscep->psce_last)
2374 				p = pscep->psce_first;
2375 			else
2376 				p++;
2377 			pscep->psce_out = p;
2378 			mutex_exit(&pscep->psce_lock);
2379 
2380 			ret = copyoutstr(physpath, psc.physpath,
2381 			    physlen, &lencopied);
2382 			kmem_free(physpath, physlen);
2383 			if (ret) {
2384 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2385 				    "failed--EFAULT\n", cmdstr,
2386 				    (void *)psc.physpath))
2387 				break;
2388 			}
2389 
2390 #ifdef	_MULTI_DATAMODEL
2391 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2392 				if (ddi_copyout(&psc32.component,
2393 				    &pscp32->component, copysize32, mode)
2394 					!= 0) {
2395 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2396 					    "failed--EFAULT\n", cmdstr))
2397 					ret = EFAULT;
2398 					break;
2399 				}
2400 			} else
2401 #endif	/* _MULTI_DATAMODEL */
2402 			{
2403 				if (ddi_copyout(&psc.component,
2404 				    &pscp->component, copysize, mode) != 0) {
2405 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2406 					    "failed--EFAULT\n", cmdstr))
2407 					ret = EFAULT;
2408 					break;
2409 				}
2410 			}
2411 			ret = 0;
2412 			break;
2413 		}
2414 		default:
2415 			ASSERT(0);
2416 		}
2417 		break;
2418 
2419 	case NOSTRUCT:
2420 		switch (cmd) {
2421 		case PM_START_PM:
2422 			mutex_enter(&pm_scan_lock);
2423 			if (autopm_enabled) {
2424 				mutex_exit(&pm_scan_lock);
2425 				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2426 				    cmdstr))
2427 				ret = EBUSY;
2428 				break;
2429 			}
2430 			autopm_enabled = 1;
2431 			mutex_exit(&pm_scan_lock);
2432 			ddi_walk_devs(ddi_root_node(), pm_start_pm_walk, &cmd);
2433 			ret = 0;
2434 			break;
2435 
2436 		case PM_RESET_PM:
2437 		case PM_STOP_PM:
2438 		{
2439 			extern void pm_discard_thresholds(void);
2440 
2441 			mutex_enter(&pm_scan_lock);
2442 			if (!autopm_enabled && cmd != PM_RESET_PM) {
2443 				mutex_exit(&pm_scan_lock);
2444 				PMD(PMD_ERROR, ("ioctl: %s: EINVAL\n",
2445 				    cmdstr))
2446 				ret = EINVAL;
2447 				break;
2448 			}
2449 			autopm_enabled = 0;
2450 			mutex_exit(&pm_scan_lock);
2451 			/*
2452 			 * bring devices to full power level, stop scan
2453 			 */
2454 			ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk, &cmd);
2455 			ret = 0;
2456 			if (cmd == PM_STOP_PM)
2457 				break;
2458 			/*
2459 			 * Now do only PM_RESET_PM stuff.
2460 			 */
2461 			pm_system_idle_threshold = pm_default_idle_threshold;
2462 			pm_discard_thresholds();
2463 			pm_all_to_default_thresholds();
2464 			pm_dispatch_to_dep_thread(PM_DEP_WK_REMOVE_DEP,
2465 			    NULL, NULL, PM_DEP_WAIT, NULL, 0);
2466 			break;
2467 		}
2468 
2469 		case PM_GET_SYSTEM_THRESHOLD:
2470 			*rval_p = pm_system_idle_threshold;
2471 			ret = 0;
2472 			break;
2473 
2474 		case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
2475 			*rval_p = pm_default_idle_threshold;
2476 			ret = 0;
2477 			break;
2478 
2479 		case PM_SET_SYSTEM_THRESHOLD:
2480 			if ((int)arg < 0) {
2481 				PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
2482 				    "--EINVAL\n", cmdstr, (int)arg))
2483 				ret = EINVAL;
2484 				break;
2485 			}
2486 			PMD(PMD_IOCTL, ("ioctl: %s: 0x%x 0t%d\n", cmdstr,
2487 			    (int)arg, (int)arg))
2488 			pm_system_idle_threshold = (int)arg;
2489 			ddi_walk_devs(ddi_root_node(), pm_set_sys_threshold,
2490 			    (void *) &pm_system_idle_threshold);
2491 			ret = 0;
2492 			break;
2493 
2494 		case PM_IDLE_DOWN:
2495 			if (pm_timeout_idledown() != 0) {
2496 				ddi_walk_devs(ddi_root_node(),
2497 				    pm_start_idledown, (void *)PMID_IOC);
2498 			}
2499 			ret = 0;
2500 			break;
2501 
2502 		case PM_GET_PM_STATE:
2503 			if (autopm_enabled) {
2504 				*rval_p = PM_SYSTEM_PM_ENABLED;
2505 			} else {
2506 				*rval_p = PM_SYSTEM_PM_DISABLED;
2507 			}
2508 			ret = 0;
2509 			break;
2510 		}
2511 		break;
2512 
2513 
2514 	default:
2515 		/*
2516 		 * Internal error, invalid ioctl description
2517 		 * force debug entry even if pm_debug not set
2518 		 */
2519 #ifdef	DEBUG
2520 		pm_log("ioctl: invalid str_type %d for cmd %d (%s)\n",
2521 		    pcip->str_type, cmd, pcip->name);
2522 #endif
2523 		ASSERT(0);
2524 		return (EIO);
2525 	}
2526 	ASSERT(ret != 0x0badcafe);	/* some cmd in wrong case! */
2527 	if (dipheld) {
2528 		ASSERT(dip);
2529 		PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d) for "
2530 		    "exiting pm_ioctl\n", cmdstr, PM_DEVICE(dip)))
2531 		PM_RELE(dip);
2532 	}
2533 	PMD(PMD_IOCTL, ("ioctl: %s: end, ret=%d\n", cmdstr, ret))
2534 	return (ret);
2535 }
2536