xref: /titanic_50/usr/src/uts/common/io/pm.c (revision dfb96a4f56fb431b915bc67e5d9d5c8d4f4f6679)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * pm	This driver now only handles the ioctl interface.  The scanning
30  *	and policy stuff now lives in common/os/sunpm.c.
31  *	Not DDI compliant
32  */
33 
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/modctl.h>
37 #include <sys/conf.h>		/* driver flags and functions */
38 #include <sys/open.h>		/* OTYP_CHR definition */
39 #include <sys/stat.h>		/* S_IFCHR definition */
40 #include <sys/pathname.h>	/* name -> dev_info xlation */
41 #include <sys/kmem.h>		/* memory alloc stuff */
42 #include <sys/debug.h>
43 #include <sys/pm.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/epm.h>
47 #include <sys/vfs.h>
48 #include <sys/mode.h>
49 #include <sys/mkdev.h>
50 #include <sys/promif.h>
51 #include <sys/consdev.h>
52 #include <sys/ddi_impldefs.h>
53 #include <sys/poll.h>
54 #include <sys/note.h>
55 #include <sys/taskq.h>
56 #include <sys/policy.h>
57 
58 /*
59  * Minor number is instance<<8 + clone minor from range 1-255; (0 reserved
60  * for "original"
61  */
62 #define	PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE - 1))
63 
64 #define	PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
65 #define	PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
66 #define	PM_MAJOR(dip) ddi_driver_major(dip)
67 #define	PM_RELE(dip) ddi_release_devi(dip)
68 
69 #define	PM_IDLEDOWN_TIME	10
70 
71 extern kmutex_t	pm_scan_lock;	/* protects autopm_enable, pm_scans_disabled */
72 extern kmutex_t	pm_clone_lock;	/* protects pm_clones array */
73 extern int	autopm_enabled;
74 extern pm_cpupm_t cpupm;
75 extern int	pm_default_idle_threshold;
76 extern int	pm_system_idle_threshold;
77 extern int	pm_cpu_idle_threshold;
78 extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
79 extern uint_t	pm_poll_cnt[PM_MAX_CLONE];
80 
81 /*
82  * The soft state of the power manager.  Since there will only
83  * one of these, just reference it through a static pointer.
84  */
85 static struct pmstate {
86 	dev_info_t	*pm_dip;		/* ptr to our dev_info node */
87 	int		pm_instance;		/* for ddi_get_instance() */
88 	timeout_id_t	pm_idledown_id;		/* pm idledown timeout id */
89 	uchar_t		pm_clones[PM_MAX_CLONE]; /* uniqueify multiple opens */
90 	struct cred	*pm_cred[PM_MAX_CLONE];	/* cred for each unique open */
91 } pm_state = { NULL, -1, (timeout_id_t)0 };
92 typedef struct pmstate *pm_state_t;
93 static pm_state_t pmstp = &pm_state;
94 
95 static int	pm_open(dev_t *, int, int, cred_t *);
96 static int	pm_close(dev_t, int, int, cred_t *);
97 static int	pm_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
98 static int	pm_chpoll(dev_t, short, int, short *, struct pollhead **);
99 
100 static struct cb_ops pm_cb_ops = {
101 	pm_open,	/* open */
102 	pm_close,	/* close */
103 	nodev,		/* strategy */
104 	nodev,		/* print */
105 	nodev,		/* dump */
106 	nodev,		/* read */
107 	nodev,		/* write */
108 	pm_ioctl,	/* ioctl */
109 	nodev,		/* devmap */
110 	nodev,		/* mmap */
111 	nodev,		/* segmap */
112 	pm_chpoll,	/* poll */
113 	ddi_prop_op,	/* prop_op */
114 	NULL,		/* streamtab */
115 	D_NEW | D_MP	/* driver compatibility flag */
116 };
117 
118 static int pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
119     void **result);
120 static int pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
121 static int pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
122 
123 static struct dev_ops pm_ops = {
124 	DEVO_REV,		/* devo_rev */
125 	0,			/* refcnt */
126 	pm_getinfo,		/* info */
127 	nulldev,		/* identify */
128 	nulldev,		/* probe */
129 	pm_attach,		/* attach */
130 	pm_detach,		/* detach */
131 	nodev,			/* reset */
132 	&pm_cb_ops,		/* driver operations */
133 	NULL,			/* bus operations */
134 	NULL			/* power */
135 };
136 
137 static struct modldrv modldrv = {
138 	&mod_driverops,
139 	"power management driver v%I%",
140 	&pm_ops
141 };
142 
143 static struct modlinkage modlinkage = {
144 	MODREV_1, &modldrv, 0
145 };
146 
147 /* Local functions */
148 #ifdef DEBUG
149 static int	print_info(dev_info_t *, void *);
150 
151 #endif
152 
153 int
154 _init(void)
155 {
156 	return (mod_install(&modlinkage));
157 }
158 
159 int
160 _fini(void)
161 {
162 	return (mod_remove(&modlinkage));
163 }
164 
165 int
166 _info(struct modinfo *modinfop)
167 {
168 	return (mod_info(&modlinkage, modinfop));
169 }
170 
171 static int
172 pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
173 {
174 	int		i;
175 
176 	switch (cmd) {
177 
178 	case DDI_ATTACH:
179 		if (pmstp->pm_instance != -1)	/* Only allow one instance */
180 			return (DDI_FAILURE);
181 		pmstp->pm_instance = ddi_get_instance(dip);
182 		if (ddi_create_minor_node(dip, "pm", S_IFCHR,
183 		    (pmstp->pm_instance << 8) + 0,
184 			DDI_PSEUDO, 0) != DDI_SUCCESS) {
185 			return (DDI_FAILURE);
186 		}
187 		pmstp->pm_dip = dip;	/* pm_init and getinfo depend on it */
188 
189 		for (i = 0; i < PM_MAX_CLONE; i++)
190 			cv_init(&pm_clones_cv[i], NULL, CV_DEFAULT, NULL);
191 
192 		ddi_report_dev(dip);
193 		return (DDI_SUCCESS);
194 
195 	default:
196 		return (DDI_FAILURE);
197 	}
198 }
199 
200 /* ARGSUSED */
201 static int
202 pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
203 {
204 	int i;
205 
206 	switch (cmd) {
207 	case DDI_DETACH:
208 		/*
209 		 * Don't detach while idledown timeout is pending.  Note that
210 		 * we already know we're not in pm_ioctl() due to framework
211 		 * synchronization, so this is a sufficient test
212 		 */
213 		if (pmstp->pm_idledown_id)
214 			return (DDI_FAILURE);
215 
216 		for (i = 0; i < PM_MAX_CLONE; i++)
217 			cv_destroy(&pm_clones_cv[i]);
218 
219 		ddi_remove_minor_node(dip, NULL);
220 		pmstp->pm_instance = -1;
221 		return (DDI_SUCCESS);
222 
223 	default:
224 		return (DDI_FAILURE);
225 	}
226 }
227 
228 static int
229 pm_close_direct_pm_device(dev_info_t *dip, void *arg)
230 {
231 	int clone;
232 	char *pathbuf;
233 	pm_info_t *info = PM_GET_PM_INFO(dip);
234 
235 	clone = *((int *)arg);
236 
237 	if (!info)
238 		return (DDI_WALK_CONTINUE);
239 
240 	pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
241 	PM_LOCK_DIP(dip);
242 	if (clone == info->pmi_clone) {
243 		PMD(PMD_CLOSE, ("pm_close: found %s@%s(%s#%d)\n",
244 		    PM_DEVICE(dip)))
245 		ASSERT(PM_ISDIRECT(dip));
246 		info->pmi_dev_pm_state &= ~PM_DIRECT;
247 		PM_UNLOCK_DIP(dip);
248 		pm_proceed(dip, PMP_RELEASE, -1, -1);
249 		/* Bring ourselves up if there is a keeper that is up */
250 		(void) ddi_pathname(dip, pathbuf);
251 		pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF, NULL,
252 		    pathbuf, PM_DEP_NOWAIT, NULL, 0);
253 		PM_LOCK_DIP(dip);
254 		info->pmi_clone = 0;
255 		PM_UNLOCK_DIP(dip);
256 	} else {
257 		PM_UNLOCK_DIP(dip);
258 	}
259 	kmem_free(pathbuf, MAXPATHLEN);
260 
261 	/* restart autopm on device released from direct pm */
262 	pm_rescan(dip);
263 
264 	return (DDI_WALK_CONTINUE);
265 }
266 
267 #define	PM_REQ		1
268 #define	NOSTRUCT	2
269 #define	DIP		3
270 #define	NODIP		4
271 #define	NODEP		5
272 #define	DEP		6
273 #define	PM_PSC		7
274 
275 #define	CHECKPERMS	0x001
276 #define	SU		0x002
277 #define	SG		0x004
278 #define	OWNER		0x008
279 
280 #define	INWHO		0x001
281 #define	INDATAINT	0x002
282 #define	INDATASTRING	0x004
283 #define	INDEP		0x008
284 #define	INDATAOUT	0x010
285 #define	INDATA	(INDATAOUT | INDATAINT | INDATASTRING | INDEP)
286 
287 struct pm_cmd_info {
288 	int cmd;		/* command code */
289 	char *name;		/* printable string */
290 	int supported;		/* true if still supported */
291 	int str_type;		/* PM_REQ or NOSTRUCT */
292 	int inargs;		/* INWHO, INDATAINT, INDATASTRING, INDEP, */
293 				/* INDATAOUT */
294 	int diptype;		/* DIP or NODIP */
295 	int deptype;		/* DEP or NODEP */
296 	int permission;		/* SU, GU, or CHECKPERMS */
297 };
298 
299 #ifdef DEBUG
300 char *pm_cmd_string;
301 int pm_cmd;
302 #endif
303 
304 /*
305  * Returns true if permission granted by credentials
306  */
307 static int
308 pm_perms(int perm, cred_t *cr)
309 {
310 	if (perm == 0)			/* no restrictions */
311 		return (1);
312 	if (perm == CHECKPERMS)		/* ok for now (is checked later) */
313 		return (1);
314 	if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
315 		return (1);
316 	if ((perm & SG) && (crgetgid(cr) == 0))	/* group 0 is ok */
317 		return (1);
318 	return (0);
319 }
320 
321 #ifdef DEBUG
322 static int
323 print_info(dev_info_t *dip, void *arg)
324 {
325 	_NOTE(ARGUNUSED(arg))
326 	pm_info_t	*info;
327 	int		i, j;
328 	struct pm_component *cp;
329 	extern int pm_cur_power(pm_component_t *cp);
330 
331 	info = PM_GET_PM_INFO(dip);
332 	if (!info)
333 		return (DDI_WALK_CONTINUE);
334 	cmn_err(CE_CONT, "pm_info for %s\n", ddi_node_name(dip));
335 	for (i = 0; i < PM_NUMCMPTS(dip); i++) {
336 		cp = PM_CP(dip, i);
337 		cmn_err(CE_CONT, "\tThresholds[%d] =",  i);
338 		for (j = 0; j < cp->pmc_comp.pmc_numlevels; j++)
339 			cmn_err(CE_CONT, " %d", cp->pmc_comp.pmc_thresh[i]);
340 		cmn_err(CE_CONT, "\n");
341 		cmn_err(CE_CONT, "\tCurrent power[%d] = %d\n", i,
342 		    pm_cur_power(cp));
343 	}
344 	if (PM_ISDIRECT(dip))
345 		cmn_err(CE_CONT, "\tDirect power management\n");
346 	return (DDI_WALK_CONTINUE);
347 }
348 #endif
349 
350 /*
351  * command, name, supported, str_type, inargs, diptype, deptype, permission
352  */
353 static struct pm_cmd_info pmci[] = {
354 	{PM_SCHEDULE, "PM_SCHEDULE", 0},
355 	{PM_GET_IDLE_TIME, "PM_GET_IDLE_TIME", 0},
356 	{PM_GET_NUM_CMPTS, "PM_GET_NUM_CMPTS", 0},
357 	{PM_GET_THRESHOLD, "PM_GET_THRESHOLD", 0},
358 	{PM_SET_THRESHOLD, "PM_SET_THRESHOLD", 0},
359 	{PM_GET_NORM_PWR, "PM_GET_NORM_PWR", 0},
360 	{PM_SET_CUR_PWR, "PM_SET_CUR_PWR", 0},
361 	{PM_GET_CUR_PWR, "PM_GET_CUR_PWR", 0},
362 	{PM_GET_NUM_DEPS, "PM_GET_NUM_DEPS", 0},
363 	{PM_GET_DEP, "PM_GET_DEP", 0},
364 	{PM_ADD_DEP, "PM_ADD_DEP", 0},
365 	{PM_REM_DEP, "PM_REM_DEP", 0},
366 	{PM_REM_DEVICE, "PM_REM_DEVICE", 0},
367 	{PM_REM_DEVICES, "PM_REM_DEVICES", 0},
368 	{PM_REPARSE_PM_PROPS, "PM_REPARSE_PM_PROPS", 1, PM_REQ, INWHO, DIP,
369 	    NODEP},
370 	{PM_DISABLE_AUTOPM, "PM_DISABLE_AUTOPM", 0},
371 	{PM_REENABLE_AUTOPM, "PM_REENABLE_AUTOPM", 0},
372 	{PM_SET_NORM_PWR, "PM_SET_NORM_PWR", 0 },
373 	{PM_SET_DEVICE_THRESHOLD, "PM_SET_DEVICE_THRESHOLD", 1, PM_REQ,
374 	    INWHO, NODIP, NODEP, SU},
375 	{PM_GET_SYSTEM_THRESHOLD, "PM_GET_SYSTEM_THRESHOLD", 1, NOSTRUCT},
376 	{PM_GET_DEFAULT_SYSTEM_THRESHOLD, "PM_GET_DEFAULT_SYSTEM_THRESHOLD",
377 	    1, NOSTRUCT},
378 	{PM_SET_SYSTEM_THRESHOLD, "PM_SET_SYSTEM_THRESHOLD", 1, NOSTRUCT,
379 	    0, 0, 0, SU},
380 	{PM_START_PM, "PM_START_PM", 1, NOSTRUCT, 0, 0, 0, SU},
381 	{PM_STOP_PM, "PM_STOP_PM", 1, NOSTRUCT, 0, 0, 0, SU},
382 	{PM_RESET_PM, "PM_RESET_PM", 1, NOSTRUCT, 0, 0, 0, SU},
383 	{PM_GET_STATS, "PM_GET_STATS", 1, PM_REQ, INWHO | INDATAOUT,
384 	    DIP, NODEP},
385 	{PM_GET_DEVICE_THRESHOLD, "PM_GET_DEVICE_THRESHOLD", 1, PM_REQ, INWHO,
386 	    DIP, NODEP},
387 	{PM_GET_POWER_NAME, "PM_GET_POWER_NAME", 1, PM_REQ, INWHO | INDATAOUT,
388 	    DIP, NODEP},
389 	{PM_GET_POWER_LEVELS, "PM_GET_POWER_LEVELS", 1, PM_REQ,
390 	    INWHO | INDATAOUT, DIP, NODEP},
391 	{PM_GET_NUM_COMPONENTS, "PM_GET_NUM_COMPONENTS", 1, PM_REQ, INWHO,
392 	    DIP, NODEP},
393 	{PM_GET_COMPONENT_NAME, "PM_GET_COMPONENT_NAME", 1, PM_REQ,
394 	    INWHO | INDATAOUT, DIP, NODEP},
395 	{PM_GET_NUM_POWER_LEVELS, "PM_GET_NUM_POWER_LEVELS", 1, PM_REQ, INWHO,
396 	    DIP, NODEP},
397 	{PM_GET_STATE_CHANGE, "PM_GET_STATE_CHANGE", 1, PM_PSC},
398 	{PM_GET_STATE_CHANGE_WAIT, "PM_GET_STATE_CHANGE_WAIT", 1, PM_PSC},
399 	{PM_DIRECT_PM, "PM_DIRECT_PM", 1, PM_REQ, INWHO, DIP, NODEP,
400 	    (SU | SG)},
401 	{PM_RELEASE_DIRECT_PM, "PM_RELEASE_DIRECT_PM", 1, PM_REQ, INWHO,
402 	    DIP, NODEP},
403 	{PM_DIRECT_NOTIFY, "PM_DIRECT_NOTIFY", 1, PM_PSC},
404 	{PM_DIRECT_NOTIFY_WAIT, "PM_DIRECT_NOTIFY_WAIT", 1, PM_PSC},
405 	{PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
406 	    INWHO, DIP, NODEP, SU},
407 	{PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
408 	{PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
409 	    DIP, NODEP},
410 	{PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
411 	    INWHO | INDATAINT, NODIP, NODEP, SU},
412 	{PM_GET_COMPONENT_THRESHOLDS, "PM_GET_COMPONENT_THRESHOLDS", 1, PM_REQ,
413 	    INWHO | INDATAOUT, DIP, NODEP},
414 	{PM_IDLE_DOWN, "PM_IDLE_DOWN", 1, NOSTRUCT, 0, 0, 0, SU},
415 	{PM_GET_DEVICE_THRESHOLD_BASIS, "PM_GET_DEVICE_THRESHOLD_BASIS", 1,
416 	    PM_REQ, INWHO, DIP, NODEP},
417 	{PM_SET_CURRENT_POWER, "PM_SET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
418 	    NODEP},
419 	{PM_GET_CURRENT_POWER, "PM_GET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
420 	    NODEP},
421 	{PM_GET_FULL_POWER, "PM_GET_FULL_POWER", 1, PM_REQ, INWHO, DIP,
422 	    NODEP},
423 	{PM_ADD_DEPENDENT, "PM_ADD_DEPENDENT", 1, PM_REQ, INWHO | INDATASTRING,
424 	    DIP, DEP, SU},
425 	{PM_GET_TIME_IDLE, "PM_GET_TIME_IDLE", 1, PM_REQ, INWHO, DIP, NODEP},
426 	{PM_ADD_DEPENDENT_PROPERTY, "PM_ADD_DEPENDENT_PROPERTY", 1, PM_REQ,
427 	    INWHO | INDATASTRING, NODIP, DEP, SU},
428 	{PM_START_CPUPM, "PM_START_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
429 	{PM_STOP_CPUPM, "PM_STOP_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
430 	{PM_GET_CPU_THRESHOLD, "PM_GET_CPU_THRESHOLD", 1, NOSTRUCT},
431 	{PM_SET_CPU_THRESHOLD, "PM_SET_CPU_THRESHOLD", 1, NOSTRUCT,
432 	    0, 0, 0, SU},
433 	{PM_GET_CPUPM_STATE, "PM_GET_CPUPM_STATE", 1, NOSTRUCT},
434 	{0, NULL}
435 };
436 
437 struct pm_cmd_info *
438 pc_info(int cmd)
439 {
440 	struct pm_cmd_info *pcip;
441 
442 	for (pcip = pmci; pcip->name; pcip++) {
443 		if (cmd == pcip->cmd)
444 			return (pcip);
445 	}
446 	return (NULL);
447 }
448 
449 static char *
450 pm_decode_cmd(int cmd)
451 {
452 	static char invbuf[64];
453 	struct pm_cmd_info *pcip = pc_info(cmd);
454 	if (pcip != NULL)
455 		return (pcip->name);
456 	(void) sprintf(invbuf, "ioctl: invalid command %d\n", cmd);
457 	return (invbuf);
458 }
459 
460 /*
461  * Allocate scan resource, create taskq, then dispatch scan,
462  * called only if autopm is enabled.
463  */
464 int
465 pm_start_pm_walk(dev_info_t *dip, void *arg)
466 {
467 	int cmd = *((int *)arg);
468 #ifdef PMDDEBUG
469 	char *cmdstr = pm_decode_cmd(cmd);
470 #endif
471 
472 	if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip))
473 		return (DDI_WALK_CONTINUE);
474 
475 	switch (cmd) {
476 	case PM_START_CPUPM:
477 		if (!PM_ISCPU(dip))
478 			return (DDI_WALK_CONTINUE);
479 		mutex_enter(&pm_scan_lock);
480 		if (!PM_CPUPM_DISABLED)
481 			pm_scan_init(dip);
482 		mutex_exit(&pm_scan_lock);
483 		break;
484 	case PM_START_PM:
485 		mutex_enter(&pm_scan_lock);
486 		if (PM_ISCPU(dip) && PM_CPUPM_DISABLED) {
487 			mutex_exit(&pm_scan_lock);
488 			return (DDI_WALK_CONTINUE);
489 		}
490 		if (autopm_enabled)
491 			pm_scan_init(dip);
492 		mutex_exit(&pm_scan_lock);
493 		break;
494 	}
495 
496 	/*
497 	 * Start doing pm on device: ensure pm_scan data structure initiated,
498 	 * no need to guarantee a successful scan run.
499 	 */
500 	PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: scan %s@%s(%s#%d)\n", cmdstr,
501 	    PM_DEVICE(dip)))
502 	pm_rescan(dip);
503 
504 	return (DDI_WALK_CONTINUE);
505 }
506 
507 /*
508  * Bring devices to full power level, then stop scan
509  */
510 int
511 pm_stop_pm_walk(dev_info_t *dip, void *arg)
512 {
513 	pm_info_t *info = PM_GET_PM_INFO(dip);
514 	int cmd = *((int *)arg);
515 #ifdef PMDDEBUG
516 	char *cmdstr = pm_decode_cmd(cmd);
517 #endif
518 
519 	if (!info)
520 		return (DDI_WALK_CONTINUE);
521 
522 	switch (cmd) {
523 	case PM_STOP_PM:
524 		/*
525 		 * If CPU devices are being managed independently, then don't
526 		 * stop them as part of PM_STOP_PM. Only stop them as part of
527 		 * PM_STOP_CPUPM and PM_RESET_PM.
528 		 */
529 		if (PM_ISCPU(dip) && PM_CPUPM_ENABLED)
530 			return (DDI_WALK_CONTINUE);
531 		break;
532 	case PM_STOP_CPUPM:
533 		/*
534 		 * If stopping CPU devices and this device is not marked
535 		 * as a CPU device, then skip.
536 		 */
537 		if (!PM_ISCPU(dip))
538 			return (DDI_WALK_CONTINUE);
539 		break;
540 	}
541 
542 	/*
543 	 * Stop the current scan, and then bring it back to normal power.
544 	 */
545 	if (!PM_ISBC(dip)) {
546 		PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: stop scan for "
547 		    "%s@%s(%s#%d)\n", cmdstr, PM_DEVICE(dip)))
548 		pm_scan_stop(dip);
549 	}
550 
551 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip) &&
552 	    !pm_all_at_normal(dip)) {
553 		PM_LOCK_DIP(dip);
554 		if (info->pmi_dev_pm_state & PM_DETACHING) {
555 			PMD(PMD_ALLNORM, ("ioctl: %s: deferring "
556 			    "all_to_normal because %s@%s(%s#%d) is detaching\n",
557 			    cmdstr, PM_DEVICE(dip)))
558 			info->pmi_dev_pm_state |= PM_ALLNORM_DEFERRED;
559 			PM_UNLOCK_DIP(dip);
560 			return (DDI_WALK_CONTINUE);
561 		}
562 		PM_UNLOCK_DIP(dip);
563 		if (pm_all_to_normal(dip, PM_CANBLOCK_FAIL) != DDI_SUCCESS) {
564 			PMD(PMD_ERROR, ("ioctl: %s: could not bring %s@%s"
565 			    "(%s#%d) to normal\n", cmdstr, PM_DEVICE(dip)))
566 		}
567 	}
568 
569 	return (DDI_WALK_CONTINUE);
570 }
571 
572 static int
573 pm_start_idledown(dev_info_t *dip, void *arg)
574 {
575 	int		flag = (int)(intptr_t)arg;
576 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
577 
578 	if (!scanp)
579 		return (DDI_WALK_CONTINUE);
580 
581 	PM_LOCK_DIP(dip);
582 	scanp->ps_idle_down |= flag;
583 	PM_UNLOCK_DIP(dip);
584 	pm_rescan(dip);
585 
586 	return (DDI_WALK_CONTINUE);
587 }
588 
589 /*ARGSUSED*/
590 static int
591 pm_end_idledown(dev_info_t *dip, void *ignore)
592 {
593 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
594 
595 	if (!scanp)
596 		return (DDI_WALK_CONTINUE);
597 
598 	PM_LOCK_DIP(dip);
599 	/*
600 	 * The PMID_TIMERS bits are place holder till idledown expires.
601 	 * The bits are also the base for regenerating PMID_SCANS bits.
602 	 * While it's up to scan thread to clear up the PMID_SCANS bits
603 	 * after each scan run, PMID_TIMERS ensure aggressive scan down
604 	 * performance throughout the idledown period.
605 	 */
606 	scanp->ps_idle_down &= ~PMID_TIMERS;
607 	PM_UNLOCK_DIP(dip);
608 
609 	return (DDI_WALK_CONTINUE);
610 }
611 
612 /*ARGSUSED*/
613 static void
614 pm_end_idledown_walk(void *ignore)
615 {
616 	PMD(PMD_IDLEDOWN, ("ioctl: end_idledown: idledown_id(%lx) timer is "
617 	    "off\n", (ulong_t)pmstp->pm_idledown_id));
618 
619 	mutex_enter(&pm_scan_lock);
620 	pmstp->pm_idledown_id = 0;
621 	mutex_exit(&pm_scan_lock);
622 
623 	ddi_walk_devs(ddi_root_node(), pm_end_idledown, NULL);
624 }
625 
626 /*
627  * pm_timeout_idledown - keep idledown effect for 10 seconds.
628  *
629  * Return 0 if another competing caller scheduled idledown timeout,
630  * otherwise, return idledown timeout_id.
631  */
632 static timeout_id_t
633 pm_timeout_idledown(void)
634 {
635 	timeout_id_t	to_id;
636 
637 	/*
638 	 * Keep idle-down in effect for either 10 seconds
639 	 * or length of a scan interval, which ever is greater.
640 	 */
641 	mutex_enter(&pm_scan_lock);
642 	if (pmstp->pm_idledown_id != 0) {
643 		to_id = pmstp->pm_idledown_id;
644 		pmstp->pm_idledown_id = 0;
645 		mutex_exit(&pm_scan_lock);
646 		(void) untimeout(to_id);
647 		mutex_enter(&pm_scan_lock);
648 		if (pmstp->pm_idledown_id != 0) {
649 			PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: "
650 			    "another caller got it, idledown_id(%lx)!\n",
651 			    (ulong_t)pmstp->pm_idledown_id))
652 			mutex_exit(&pm_scan_lock);
653 			return (0);
654 		}
655 	}
656 	pmstp->pm_idledown_id = timeout(pm_end_idledown_walk, NULL,
657 	    PM_IDLEDOWN_TIME * hz);
658 	PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: idledown_id(%lx)\n",
659 	    (ulong_t)pmstp->pm_idledown_id))
660 	mutex_exit(&pm_scan_lock);
661 
662 	return (pmstp->pm_idledown_id);
663 }
664 
665 static int
666 pm_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
667 	struct pollhead **phpp)
668 {
669 	extern struct pollhead pm_pollhead;	/* common/os/sunpm.c */
670 	int	clone;
671 
672 	clone = PM_MINOR_TO_CLONE(getminor(dev));
673 	PMD(PMD_IOCTL, ("ioctl: pm_chpoll: clone %d\n", clone))
674 	if ((events & (POLLIN | POLLRDNORM)) && pm_poll_cnt[clone]) {
675 		*reventsp |= (POLLIN | POLLRDNORM);
676 		PMD(PMD_IOCTL, ("ioctl: pm_chpoll: reventsp set\n"))
677 	} else {
678 		*reventsp = 0;
679 		if (!anyyet) {
680 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: not anyyet\n"))
681 			*phpp = &pm_pollhead;
682 		}
683 #ifdef DEBUG
684 		else {
685 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: anyyet\n"))
686 		}
687 #endif
688 	}
689 	return (0);
690 }
691 
692 /*
693  * called by pm_dicard_entries to free up the memory. It also decrements
694  * pm_poll_cnt, if direct is non zero.
695  */
696 static void
697 pm_free_entries(psce_t *pscep, int clone, int direct)
698 {
699 	pm_state_change_t	*p;
700 
701 	if (pscep) {
702 		p = pscep->psce_out;
703 		while (p->size) {
704 			if (direct) {
705 				PMD(PMD_IOCTL, ("ioctl: discard: "
706 				    "pm_poll_cnt[%d] is %d before "
707 				    "ASSERT\n", clone,
708 				    pm_poll_cnt[clone]))
709 				ASSERT(pm_poll_cnt[clone]);
710 				pm_poll_cnt[clone]--;
711 			}
712 			kmem_free(p->physpath, p->size);
713 			p->size = 0;
714 			if (p == pscep->psce_last)
715 				p = pscep->psce_first;
716 			else
717 				p++;
718 		}
719 		pscep->psce_out = pscep->psce_first;
720 		pscep->psce_in = pscep->psce_first;
721 		mutex_exit(&pscep->psce_lock);
722 	}
723 }
724 
725 /*
726  * Discard entries for this clone. Calls pm_free_entries to free up memory.
727  */
728 static void
729 pm_discard_entries(int clone)
730 {
731 	psce_t	*pscep;
732 	psce_t			*pm_psc_clone_to_direct(int);
733 	psce_t			*pm_psc_clone_to_interest(int);
734 	int			direct = 0;
735 
736 	mutex_enter(&pm_clone_lock);
737 	if ((pscep = pm_psc_clone_to_direct(clone)) != NULL)
738 		direct = 1;
739 	pm_free_entries(pscep, clone, direct);
740 	pscep = pm_psc_clone_to_interest(clone);
741 	pm_free_entries(pscep, clone, 0);
742 	mutex_exit(&pm_clone_lock);
743 }
744 
745 
746 static void
747 pm_set_idle_threshold(dev_info_t *dip, int thresh, int flag)
748 {
749 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip)) {
750 		switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
751 		case PMC_DEF_THRESH:
752 		case PMC_CPU_THRESH:
753 			PMD(PMD_IOCTL, ("ioctl: set_idle_threshold: set "
754 			    "%s@%s(%s#%d) default thresh to 0t%d\n",
755 			    PM_DEVICE(dip), thresh))
756 			pm_set_device_threshold(dip, thresh, flag);
757 			break;
758 		default:
759 			break;
760 		}
761 	}
762 }
763 
764 static int
765 pm_set_idle_thresh_walk(dev_info_t *dip, void *arg)
766 {
767 	int cmd = *((int *)arg);
768 
769 	if (!PM_GET_PM_INFO(dip))
770 		return (DDI_WALK_CONTINUE);
771 
772 	switch (cmd) {
773 	case PM_SET_SYSTEM_THRESHOLD:
774 		if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
775 			break;
776 		pm_set_idle_threshold(dip, pm_system_idle_threshold,
777 		    PMC_DEF_THRESH);
778 		pm_rescan(dip);
779 		break;
780 	case PM_SET_CPU_THRESHOLD:
781 		if (!PM_ISCPU(dip))
782 			break;
783 		pm_set_idle_threshold(dip, pm_cpu_idle_threshold,
784 		    PMC_CPU_THRESH);
785 		pm_rescan(dip);
786 		break;
787 	}
788 
789 	return (DDI_WALK_CONTINUE);
790 }
791 
792 /*ARGSUSED*/
793 static int
794 pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
795 {
796 	dev_t	dev;
797 	int	instance;
798 
799 	switch (infocmd) {
800 	case DDI_INFO_DEVT2DEVINFO:
801 		if (pmstp->pm_instance == -1)
802 			return (DDI_FAILURE);
803 		*result = pmstp->pm_dip;
804 		return (DDI_SUCCESS);
805 
806 	case DDI_INFO_DEVT2INSTANCE:
807 		dev = (dev_t)arg;
808 		instance = getminor(dev) >> 8;
809 		*result = (void *)(uintptr_t)instance;
810 		return (DDI_SUCCESS);
811 
812 	default:
813 		return (DDI_FAILURE);
814 	}
815 }
816 
817 
818 /*ARGSUSED1*/
819 static int
820 pm_open(dev_t *devp, int flag, int otyp, cred_t *cr)
821 {
822 	int		clone;
823 
824 	if (otyp != OTYP_CHR)
825 		return (EINVAL);
826 
827 	mutex_enter(&pm_clone_lock);
828 	for (clone = 1; clone < PM_MAX_CLONE; clone++)
829 		if (!pmstp->pm_clones[clone])
830 			break;
831 
832 	if (clone == PM_MAX_CLONE) {
833 		mutex_exit(&pm_clone_lock);
834 		return (ENXIO);
835 	}
836 	pmstp->pm_cred[clone] = cr;
837 	crhold(cr);
838 
839 	*devp = makedevice(getmajor(*devp), (pmstp->pm_instance << 8) + clone);
840 	pmstp->pm_clones[clone] = 1;
841 	mutex_exit(&pm_clone_lock);
842 
843 	return (0);
844 }
845 
846 /*ARGSUSED1*/
847 static int
848 pm_close(dev_t dev, int flag, int otyp, cred_t *cr)
849 {
850 	int clone;
851 
852 	if (otyp != OTYP_CHR)
853 		return (EINVAL);
854 
855 	clone = PM_MINOR_TO_CLONE(getminor(dev));
856 	PMD(PMD_CLOSE, ("pm_close: minor %x, clone %x\n", getminor(dev),
857 	    clone))
858 
859 	/*
860 	 * Walk the entire device tree to find the corresponding
861 	 * device and operate on it.
862 	 */
863 	ddi_walk_devs(ddi_root_node(), pm_close_direct_pm_device,
864 	    (void *) &clone);
865 
866 	crfree(pmstp->pm_cred[clone]);
867 	pmstp->pm_cred[clone] = 0;
868 	pmstp->pm_clones[clone] = 0;
869 	pm_discard_entries(clone);
870 	ASSERT(pm_poll_cnt[clone] == 0);
871 	pm_deregister_watcher(clone, NULL);
872 	return (0);
873 }
874 
875 /*ARGSUSED*/
876 static int
877 pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
878 {
879 	struct pm_cmd_info *pc_info(int);
880 	struct pm_cmd_info *pcip = pc_info(cmd);
881 	pm_req_t	req;
882 	dev_info_t	*dip = NULL;
883 	pm_info_t	*info = NULL;
884 	int		clone;
885 	char		*cmdstr = pm_decode_cmd(cmd);
886 	/*
887 	 * To keep devinfo nodes from going away while we're holding a
888 	 * pointer to their dip, pm_name_to_dip() optionally holds
889 	 * the devinfo node.  If we've done that, we set dipheld
890 	 * so we know at the end of the ioctl processing to release the
891 	 * node again.
892 	 */
893 	int		dipheld = 0;
894 	int		icount = 0;
895 	int		i;
896 	int		comps;
897 	size_t		lencopied;
898 	int		ret = ENOTTY;
899 	int		curpower;
900 	char		who[MAXNAMELEN];
901 	size_t		wholen;			/* copyinstr length */
902 	size_t		deplen = MAXNAMELEN;
903 	char		*dep, i_dep_buf[MAXNAMELEN];
904 	char		*pathbuf;
905 	struct pm_component *cp;
906 #ifdef	_MULTI_DATAMODEL
907 	pm_state_change32_t		*pscp32;
908 	pm_state_change32_t		psc32;
909 	size_t				copysize32;
910 #endif
911 	pm_state_change_t		*pscp;
912 	pm_state_change_t		psc;
913 	size_t		copysize;
914 	extern void	pm_record_thresh(pm_thresh_rec_t *);
915 	psce_t		*pm_psc_clone_to_direct(int);
916 	psce_t		*pm_psc_clone_to_interest(int);
917 	extern	void	pm_register_watcher(int, dev_info_t *);
918 	extern	int	pm_get_current_power(dev_info_t *, int, int *);
919 	extern	int	pm_interest_registered(int);
920 	extern	void	pm_all_to_default_thresholds(void);
921 	extern	int	pm_current_threshold(dev_info_t *, int, int *);
922 	extern void	pm_deregister_watcher(int, dev_info_t *);
923 	extern void	pm_unrecord_threshold(char *);
924 
925 	PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
926 
927 #ifdef DEBUG
928 	if (cmd == 666) {
929 		ddi_walk_devs(ddi_root_node(), print_info, NULL);
930 		return (0);
931 	}
932 	ret = 0x0badcafe;			/* sanity checking */
933 	pm_cmd = cmd;				/* for ASSERT debugging */
934 	pm_cmd_string = cmdstr;	/* for ASSERT debugging */
935 #endif
936 
937 
938 	if (pcip == NULL) {
939 		PMD(PMD_ERROR, ("ioctl: unknown command %d\n", cmd))
940 		return (ENOTTY);
941 	}
942 	if (pcip == NULL || pcip->supported == 0) {
943 		PMD(PMD_ERROR, ("ioctl: command %s no longer supported\n",
944 		    pcip->name))
945 		return (ENOTTY);
946 	}
947 
948 	wholen = 0;
949 	dep = i_dep_buf;
950 	i_dep_buf[0] = 0;
951 	clone = PM_MINOR_TO_CLONE(getminor(dev));
952 	if (!pm_perms(pcip->permission, pmstp->pm_cred[clone])) {
953 		ret = EPERM;
954 		return (ret);
955 	}
956 	switch (pcip->str_type) {
957 	case PM_REQ:
958 #ifdef	_MULTI_DATAMODEL
959 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
960 			pm_req32_t	req32;
961 
962 			if (ddi_copyin((caddr_t)arg, &req32,
963 			    sizeof (req32), mode) != 0) {
964 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
965 				    "EFAULT\n\n", cmdstr))
966 				ret = EFAULT;
967 				break;
968 			}
969 			req.component = req32.component;
970 			req.value = req32.value;
971 			req.datasize = req32.datasize;
972 			if (pcip->inargs & INWHO) {
973 				ret = copyinstr((char *)(uintptr_t)
974 				    req32.physpath, who, MAXNAMELEN, &wholen);
975 				if (ret) {
976 					PMD(PMD_ERROR, ("ioctl: %s: "
977 					    "copyinstr fails returning %d\n",
978 					    cmdstr, ret))
979 					break;
980 				}
981 				req.physpath = who;
982 			}
983 			PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
984 			    req.physpath))
985 			if (pcip->inargs & INDATA) {
986 				req.data = (void *)(uintptr_t)req32.data;
987 				req.datasize = req32.datasize;
988 			} else {
989 				req.data = NULL;
990 				req.datasize = 0;
991 			}
992 			switch (pcip->diptype) {
993 			case DIP:
994 				if (!(dip =
995 				    pm_name_to_dip(req.physpath, 1))) {
996 					PMD(PMD_ERROR, ("ioctl: %s: "
997 					    "pm_name_to_dip for %s failed\n",
998 					    cmdstr, req.physpath))
999 					return (ENODEV);
1000 				}
1001 				ASSERT(!dipheld);
1002 				dipheld++;
1003 				break;
1004 			case NODIP:
1005 				break;
1006 			default:
1007 				/*
1008 				 * Internal error, invalid ioctl description
1009 				 * force debug entry even if pm_debug not set
1010 				 */
1011 #ifdef	DEBUG
1012 				pm_log("invalid diptype %d for cmd %d (%s)\n",
1013 				    pcip->diptype, cmd, pcip->name);
1014 #endif
1015 				ASSERT(0);
1016 				return (EIO);
1017 			}
1018 			if (pcip->inargs & INDATAINT) {
1019 				int32_t int32buf;
1020 				int32_t *i32p;
1021 				int *ip;
1022 				icount = req32.datasize / sizeof (int32_t);
1023 				if (icount <= 0) {
1024 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1025 					    " 0 or neg EFAULT\n\n", cmdstr))
1026 					ret = EFAULT;
1027 					break;
1028 				}
1029 				ASSERT(!(pcip->inargs & INDATASTRING));
1030 				req.datasize = icount * sizeof (int);
1031 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1032 				ip = req.data;
1033 				ret = 0;
1034 				for (i = 0,
1035 				    i32p = (int32_t *)(uintptr_t)req32.data;
1036 				    i < icount; i++, i32p++) {
1037 					if (ddi_copyin((void *)i32p, &int32buf,
1038 					    sizeof (int32_t), mode)) {
1039 						kmem_free(req.data,
1040 						    req.datasize);
1041 						PMD(PMD_ERROR, ("ioctl: %s: "
1042 						    "entry %d EFAULT\n",
1043 						    cmdstr, i))
1044 						ret = EFAULT;
1045 						break;
1046 					}
1047 					*ip++ = (int)int32buf;
1048 				}
1049 				if (ret)
1050 					break;
1051 			}
1052 			if (pcip->inargs & INDATASTRING) {
1053 				ASSERT(!(pcip->inargs & INDATAINT));
1054 				ASSERT(pcip->deptype == DEP);
1055 				if (req32.data != NULL) {
1056 					size_t dummy;
1057 					if (copyinstr((void *)(uintptr_t)
1058 					    req32.data, dep, deplen, &dummy)) {
1059 						PMD(PMD_ERROR, ("ioctl: %s: "
1060 						    "0x%p dep size %lx, EFAULT"
1061 						    "\n", cmdstr,
1062 						    (void *)req.data, deplen))
1063 						ret = EFAULT;
1064 						break;
1065 					}
1066 #ifdef DEBUG
1067 					else {
1068 						PMD(PMD_DEP, ("ioctl: %s: "
1069 						    "dep %s\n", cmdstr, dep))
1070 					}
1071 #endif
1072 				} else {
1073 					PMD(PMD_ERROR, ("ioctl: %s: no "
1074 					    "dependent\n", cmdstr))
1075 					ret = EINVAL;
1076 					break;
1077 				}
1078 			}
1079 		} else
1080 #endif /* _MULTI_DATAMODEL */
1081 		{
1082 			if (ddi_copyin((caddr_t)arg,
1083 			    &req, sizeof (req), mode) != 0) {
1084 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1085 				    "EFAULT\n\n", cmdstr))
1086 				ret = EFAULT;
1087 				break;
1088 			}
1089 			if (pcip->inargs & INWHO) {
1090 				ret = copyinstr((char *)req.physpath, who,
1091 				    MAXNAMELEN, &wholen);
1092 				if (ret) {
1093 					PMD(PMD_ERROR, ("ioctl: %s copyinstr"
1094 					    " fails returning %d\n", cmdstr,
1095 					    ret))
1096 					break;
1097 				}
1098 				req.physpath = who;
1099 			}
1100 			PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
1101 			    req.physpath))
1102 			if (!(pcip->inargs & INDATA)) {
1103 				req.data = NULL;
1104 				req.datasize = 0;
1105 			}
1106 			switch (pcip->diptype) {
1107 			case DIP:
1108 				if (!(dip =
1109 				    pm_name_to_dip(req.physpath, 1))) {
1110 					PMD(PMD_ERROR, ("ioctl: %s: "
1111 					    "pm_name_to_dip for %s failed\n",
1112 					    cmdstr, req.physpath))
1113 					return (ENODEV);
1114 				}
1115 				ASSERT(!dipheld);
1116 				dipheld++;
1117 				break;
1118 			case NODIP:
1119 				break;
1120 			default:
1121 				/*
1122 				 * Internal error, invalid ioctl description
1123 				 * force debug entry even if pm_debug not set
1124 				 */
1125 #ifdef	DEBUG
1126 				pm_log("invalid diptype %d for cmd %d (%s)\n",
1127 				    pcip->diptype, cmd, pcip->name);
1128 #endif
1129 				ASSERT(0);
1130 				return (EIO);
1131 			}
1132 			if (pcip->inargs & INDATAINT) {
1133 				int *ip;
1134 
1135 				ASSERT(!(pcip->inargs & INDATASTRING));
1136 				ip = req.data;
1137 				icount = req.datasize / sizeof (int);
1138 				if (icount <= 0) {
1139 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1140 					    " 0 or neg EFAULT\n\n", cmdstr))
1141 					ret = EFAULT;
1142 					break;
1143 				}
1144 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1145 				if (ddi_copyin((caddr_t)ip, req.data,
1146 				    req.datasize, mode) != 0) {
1147 					PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1148 					    "EFAULT\n\n", cmdstr))
1149 					ret = EFAULT;
1150 					break;
1151 				}
1152 			}
1153 			if (pcip->inargs & INDATASTRING) {
1154 				ASSERT(!(pcip->inargs & INDATAINT));
1155 				ASSERT(pcip->deptype == DEP);
1156 				if (req.data != NULL) {
1157 					size_t dummy;
1158 					if (copyinstr((caddr_t)req.data,
1159 					    dep, deplen, &dummy)) {
1160 						PMD(PMD_ERROR, ("ioctl: %s: "
1161 						    "0x%p dep size %lu, "
1162 						    "EFAULT\n", cmdstr,
1163 						    (void *)req.data, deplen))
1164 						ret = EFAULT;
1165 						break;
1166 					}
1167 #ifdef DEBUG
1168 					else {
1169 						PMD(PMD_DEP, ("ioctl: %s: "
1170 						    "dep %s\n", cmdstr, dep))
1171 					}
1172 #endif
1173 				} else {
1174 					PMD(PMD_ERROR, ("ioctl: %s: no "
1175 					    "dependent\n", cmdstr))
1176 					ret = EINVAL;
1177 					break;
1178 				}
1179 			}
1180 		}
1181 		/*
1182 		 * Now we've got all the args in for the commands that
1183 		 * use the new pm_req struct.
1184 		 */
1185 		switch (cmd) {
1186 		case PM_REPARSE_PM_PROPS:
1187 		{
1188 			struct dev_ops	*drv;
1189 			struct cb_ops	*cb;
1190 			void		*propval;
1191 			int length;
1192 			/*
1193 			 * This ioctl is provided only for the ddivs pm test.
1194 			 * We only do it to a driver which explicitly allows
1195 			 * us to do so by exporting a pm-reparse-ok property.
1196 			 * We only care whether the property exists or not.
1197 			 */
1198 			if ((drv = ddi_get_driver(dip)) == NULL) {
1199 				ret = EINVAL;
1200 				break;
1201 			}
1202 			if ((cb = drv->devo_cb_ops) != NULL) {
1203 				if ((*cb->cb_prop_op)(DDI_DEV_T_ANY, dip,
1204 				    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1205 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1206 				    "pm-reparse-ok", (caddr_t)&propval,
1207 				    &length) != DDI_SUCCESS) {
1208 					ret = EINVAL;
1209 					break;
1210 				}
1211 			} else if (ddi_prop_op(DDI_DEV_T_ANY, dip,
1212 			    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1213 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1214 			    "pm-reparse-ok", (caddr_t)&propval,
1215 			    &length) != DDI_SUCCESS) {
1216 				ret = EINVAL;
1217 				break;
1218 			}
1219 			kmem_free(propval, length);
1220 			ret =  e_new_pm_props(dip);
1221 			break;
1222 		}
1223 
1224 		case PM_GET_DEVICE_THRESHOLD:
1225 			PM_LOCK_DIP(dip);
1226 			if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
1227 				PM_UNLOCK_DIP(dip);
1228 				PMD(PMD_ERROR, ("ioctl: %s: ENODEV\n",
1229 				    cmdstr))
1230 				ret = ENODEV;
1231 				break;
1232 			}
1233 			*rval_p = DEVI(dip)->devi_pm_dev_thresh;
1234 			PM_UNLOCK_DIP(dip);
1235 			ret = 0;
1236 			break;
1237 
1238 		case PM_DIRECT_PM:
1239 		{
1240 			int has_dep;
1241 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1242 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1243 				    "ENODEV\n", cmdstr))
1244 				ret = ENODEV;
1245 				break;
1246 			}
1247 			/*
1248 			 * Check to see if we are there is a dependency on
1249 			 * this kept device, if so, return EBUSY.
1250 			 */
1251 			pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1252 			(void) ddi_pathname(dip, pathbuf);
1253 			pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
1254 			    NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
1255 			kmem_free(pathbuf, MAXPATHLEN);
1256 			if (has_dep) {
1257 				PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
1258 				    cmdstr))
1259 				ret = EBUSY;
1260 				break;
1261 			}
1262 			PM_LOCK_DIP(dip);
1263 			if (PM_ISDIRECT(dip) || (info->pmi_clone != 0)) {
1264 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1265 				    "%s@%s(%s#%d): EBUSY\n", cmdstr,
1266 				    PM_DEVICE(dip)))
1267 				PM_UNLOCK_DIP(dip);
1268 				ret = EBUSY;
1269 				break;
1270 			}
1271 			info->pmi_dev_pm_state |= PM_DIRECT;
1272 			info->pmi_clone = clone;
1273 			PM_UNLOCK_DIP(dip);
1274 			PMD(PMD_DPM, ("ioctl: %s: info %p, pmi_clone %d\n",
1275 			    cmdstr, (void *)info, clone))
1276 			mutex_enter(&pm_clone_lock);
1277 			pm_register_watcher(clone, dip);
1278 			mutex_exit(&pm_clone_lock);
1279 			ret = 0;
1280 			break;
1281 		}
1282 
1283 		case PM_RELEASE_DIRECT_PM:
1284 		{
1285 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1286 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1287 				    "ENODEV\n", cmdstr))
1288 				ret = ENODEV;
1289 				break;
1290 			}
1291 			PM_LOCK_DIP(dip);
1292 			if (info->pmi_clone != clone) {
1293 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1294 				    "%s@%s(%s#%d) EINVAL\n", cmdstr,
1295 				    PM_DEVICE(dip)))
1296 				ret = EINVAL;
1297 				PM_UNLOCK_DIP(dip);
1298 				break;
1299 			}
1300 			ASSERT(PM_ISDIRECT(dip));
1301 			info->pmi_dev_pm_state &= ~PM_DIRECT;
1302 			PM_UNLOCK_DIP(dip);
1303 			/* Bring ourselves up if there is a keeper. */
1304 			pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1305 			(void) ddi_pathname(dip, pathbuf);
1306 			pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
1307 			    NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
1308 			kmem_free(pathbuf, MAXPATHLEN);
1309 			pm_discard_entries(clone);
1310 			pm_deregister_watcher(clone, dip);
1311 			/*
1312 			 * Now we could let the other threads that are
1313 			 * trying to do a DIRECT_PM thru
1314 			 */
1315 			PM_LOCK_DIP(dip);
1316 			info->pmi_clone = 0;
1317 			PM_UNLOCK_DIP(dip);
1318 			pm_proceed(dip, PMP_RELEASE, -1, -1);
1319 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1320 			    cmdstr))
1321 			pm_rescan(dip);
1322 			ret = 0;
1323 			break;
1324 		}
1325 
1326 		case PM_SET_CURRENT_POWER:
1327 		{
1328 			int comp = req.component;
1329 			int  value = req.value;
1330 			PMD(PMD_DPM, ("ioctl: %s: %s component %d to value "
1331 			    "%d\n", cmdstr, req.physpath, comp, value))
1332 			if (!e_pm_valid_comp(dip, comp, NULL) ||
1333 			    !e_pm_valid_power(dip, comp, value)) {
1334 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1335 				    "physpath=%s, comp=%d, level=%d, fails\n",
1336 				    cmdstr, req.physpath, comp, value))
1337 				ret = EINVAL;
1338 				break;
1339 			}
1340 
1341 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1342 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1343 				    "ENODEV\n", cmdstr))
1344 				ret = ENODEV;
1345 				break;
1346 			}
1347 			if (info->pmi_clone != clone) {
1348 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1349 				    "(not owner) %s fails; clone %d, owner %d"
1350 				    "\n", cmdstr, req.physpath, clone,
1351 				    info->pmi_clone))
1352 				ret = EINVAL;
1353 				break;
1354 			}
1355 			ASSERT(PM_ISDIRECT(dip));
1356 
1357 			if (pm_set_power(dip, comp, value, PM_LEVEL_EXACT,
1358 			    PM_CANBLOCK_BLOCK, 0, &ret) != DDI_SUCCESS) {
1359 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1360 				    "pm_set_power for %s fails, errno=%d\n",
1361 				    cmdstr, req.physpath, ret))
1362 				break;
1363 			}
1364 
1365 			pm_proceed(dip, PMP_SETPOWER, comp, value);
1366 
1367 			/*
1368 			 * Power down all idle components if console framebuffer
1369 			 * is powered off.
1370 			 */
1371 			if (PM_IS_CFB(dip) && (pm_system_idle_threshold ==
1372 			    pm_default_idle_threshold)) {
1373 				dev_info_t	*root = ddi_root_node();
1374 				if (PM_ISBC(dip)) {
1375 					if (comp == 0 && value == 0 &&
1376 					    (pm_timeout_idledown() != 0)) {
1377 						ddi_walk_devs(root,
1378 						    pm_start_idledown,
1379 						    (void *)PMID_CFB);
1380 					}
1381 				} else {
1382 					int count = 0;
1383 					for (i = 0; i < PM_NUMCMPTS(dip); i++) {
1384 						ret = pm_get_current_power(dip,
1385 						    i, &curpower);
1386 						if (ret == DDI_SUCCESS &&
1387 						    curpower == 0)
1388 							count++;
1389 					}
1390 					if ((count == PM_NUMCMPTS(dip)) &&
1391 					    (pm_timeout_idledown() != 0)) {
1392 						ddi_walk_devs(root,
1393 						    pm_start_idledown,
1394 						    (void *)PMID_CFB);
1395 					}
1396 				}
1397 			}
1398 
1399 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1400 			    cmdstr))
1401 			pm_rescan(dip);
1402 			*rval_p = 0;
1403 			ret = 0;
1404 			break;
1405 		}
1406 
1407 		case PM_GET_FULL_POWER:
1408 		{
1409 			int normal;
1410 			ASSERT(dip);
1411 			PMD(PMD_NORM, ("ioctl: %s: %s component %d\n",
1412 			    cmdstr, req.physpath, req.component))
1413 			normal =  pm_get_normal_power(dip, req.component);
1414 
1415 			if (normal == DDI_FAILURE) {
1416 				PMD(PMD_ERROR | PMD_NORM, ("ioctl: %s: "
1417 				    "returns EINVAL\n", cmdstr))
1418 				ret = EINVAL;
1419 				break;
1420 			}
1421 			*rval_p = normal;
1422 			PMD(PMD_NORM, ("ioctl: %s: returns %d\n",
1423 			    cmdstr, normal))
1424 			ret = 0;
1425 			break;
1426 		}
1427 
1428 		case PM_GET_CURRENT_POWER:
1429 			if (pm_get_current_power(dip, req.component,
1430 			    rval_p) != DDI_SUCCESS) {
1431 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
1432 				    "EINVAL\n", cmdstr))
1433 				ret = EINVAL;
1434 				break;
1435 			}
1436 			PMD(PMD_DPM, ("ioctl: %s: %s comp %d returns %d\n",
1437 			    cmdstr, req.physpath, req.component, *rval_p))
1438 			if (*rval_p == PM_LEVEL_UNKNOWN)
1439 				ret = EAGAIN;
1440 			else
1441 				ret = 0;
1442 			break;
1443 
1444 		case PM_GET_TIME_IDLE:
1445 		{
1446 			time_t timestamp;
1447 			int comp = req.component;
1448 			pm_component_t *cp;
1449 			if (!e_pm_valid_comp(dip, comp, &cp)) {
1450 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1451 				    "component %d > numcmpts - 1 %d--EINVAL\n",
1452 				    cmdstr, PM_DEVICE(dip), comp,
1453 				    PM_NUMCMPTS(dip) - 1))
1454 				ret = EINVAL;
1455 				break;
1456 			}
1457 			timestamp = cp->pmc_timestamp;
1458 			if (timestamp) {
1459 				time_t now;
1460 				(void) drv_getparm(TIME, &now);
1461 				*rval_p = (now - timestamp);
1462 			} else {
1463 				*rval_p = 0;
1464 			}
1465 			ret = 0;
1466 			break;
1467 		}
1468 
1469 		case PM_ADD_DEPENDENT:
1470 		{
1471 			dev_info_t	*kept_dip;
1472 
1473 			PMD(PMD_KEEPS, ("%s, kept %s, keeper %s\n", cmdstr,
1474 			    dep, req.physpath))
1475 
1476 			/*
1477 			 * hold and install kept while processing dependency
1478 			 * keeper (in .physpath) has already been held.
1479 			 */
1480 			if (dep[0] == '\0') {
1481 				PMD(PMD_ERROR, ("kept NULL or null\n"))
1482 				ret = EINVAL;
1483 				break;
1484 			} else if ((kept_dip =
1485 			    pm_name_to_dip(dep, 1)) == NULL) {
1486 				PMD(PMD_ERROR, ("no dip for kept %s\n", dep))
1487 				ret = ENODEV;
1488 				break;
1489 			} else if (kept_dip == dip) {
1490 				PMD(PMD_ERROR, ("keeper(%s, %p) - kept(%s, %p) "
1491 				    "self-dependency not allowed.\n",
1492 				    dep, (void *)kept_dip, req.physpath,
1493 				    (void *) dip))
1494 				PM_RELE(dip);	/* release "double" hold */
1495 				ret = EINVAL;
1496 				break;
1497 			}
1498 			ASSERT(!(strcmp(req.physpath, (char *)dep) == 0));
1499 
1500 			/*
1501 			 * record dependency, then walk through device tree
1502 			 * independently on behalf of kept and keeper to
1503 			 * establish newly created dependency.
1504 			 */
1505 			pm_dispatch_to_dep_thread(PM_DEP_WK_RECORD_KEEPER,
1506 			    req.physpath, dep, PM_DEP_WAIT, NULL, 0);
1507 
1508 			/*
1509 			 * release kept after establishing dependency, keeper
1510 			 * is released as part of ioctl exit processing.
1511 			 */
1512 			PM_RELE(kept_dip);
1513 			*rval_p = 0;
1514 			ret = 0;
1515 			break;
1516 		}
1517 
1518 		case PM_ADD_DEPENDENT_PROPERTY:
1519 		{
1520 			char *keeper, *kept;
1521 
1522 			if (dep[0] == '\0') {
1523 				PMD(PMD_ERROR, ("ioctl: %s: dep NULL or "
1524 				    "null\n", cmdstr))
1525 				ret = EINVAL;
1526 				break;
1527 			}
1528 			kept = dep;
1529 			keeper = req.physpath;
1530 			/*
1531 			 * record keeper - kept dependency, then walk through
1532 			 * device tree to find out all attached keeper, walk
1533 			 * through again to apply dependency to all the
1534 			 * potential kept.
1535 			 */
1536 			pm_dispatch_to_dep_thread(
1537 			    PM_DEP_WK_RECORD_KEEPER_PROP, keeper, kept,
1538 			    PM_DEP_WAIT, NULL, 0);
1539 
1540 			*rval_p = 0;
1541 			ret = 0;
1542 			break;
1543 		}
1544 
1545 		case PM_SET_DEVICE_THRESHOLD:
1546 		{
1547 			pm_thresh_rec_t *rp;
1548 			pm_pte_t *ep;	/* threshold header storage */
1549 			int *tp;	/* threshold storage */
1550 			size_t size;
1551 			extern int pm_thresh_specd(dev_info_t *);
1552 
1553 			/*
1554 			 * The header struct plus one entry struct plus one
1555 			 * threshold plus the length of the string
1556 			 */
1557 			size = sizeof (pm_thresh_rec_t) +
1558 			    (sizeof (pm_pte_t) * 1) +
1559 			    (1 * sizeof (int)) +
1560 			    strlen(req.physpath) + 1;
1561 
1562 			rp = kmem_zalloc(size, KM_SLEEP);
1563 			rp->ptr_size = size;
1564 			rp->ptr_numcomps = 0;	/* means device threshold */
1565 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1566 			rp->ptr_entries = ep;
1567 			tp = (int *)((intptr_t)ep +
1568 			    (1 * sizeof (pm_pte_t)));
1569 			ep->pte_numthresh = 1;
1570 			ep->pte_thresh = tp;
1571 			*tp++ = req.value;
1572 			(void) strcat((char *)tp, req.physpath);
1573 			rp->ptr_physpath = (char *)tp;
1574 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1575 			    (intptr_t)rp + rp->ptr_size);
1576 			PMD(PMD_THRESH, ("ioctl: %s: record thresh %d for "
1577 			    "%s\n", cmdstr, req.value, req.physpath))
1578 			pm_record_thresh(rp);
1579 			/*
1580 			 * Don't free rp, pm_record_thresh() keeps it.
1581 			 * We don't try to apply it ourselves because we'd need
1582 			 * to know too much about locking.  Since we don't
1583 			 * hold a lock the entry could be removed before
1584 			 * we get here
1585 			 */
1586 			ASSERT(dip == NULL);
1587 			ret = 0;		/* can't fail now */
1588 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1589 				break;
1590 			}
1591 			(void) pm_thresh_specd(dip);
1592 			PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d)\n",
1593 			    cmdstr, PM_DEVICE(dip)))
1594 			PM_RELE(dip);
1595 			break;
1596 		}
1597 
1598 		case PM_RESET_DEVICE_THRESHOLD:
1599 		{
1600 			/*
1601 			 * This only applies to a currently attached and power
1602 			 * managed node
1603 			 */
1604 			/*
1605 			 * We don't do this to old-style drivers
1606 			 */
1607 			info = PM_GET_PM_INFO(dip);
1608 			if (info == NULL) {
1609 				PMD(PMD_ERROR, ("ioctl: %s: %s not power "
1610 				    "managed\n", cmdstr, req.physpath))
1611 				ret = EINVAL;
1612 				break;
1613 			}
1614 			if (PM_ISBC(dip)) {
1615 				PMD(PMD_ERROR, ("ioctl: %s: %s is BC\n",
1616 				    cmdstr, req.physpath))
1617 				ret = EINVAL;
1618 				break;
1619 			}
1620 			pm_unrecord_threshold(req.physpath);
1621 			if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
1622 				pm_set_device_threshold(dip,
1623 				    pm_cpu_idle_threshold, PMC_CPU_THRESH);
1624 			else
1625 				pm_set_device_threshold(dip,
1626 				    pm_system_idle_threshold, PMC_DEF_THRESH);
1627 			ret = 0;
1628 			break;
1629 		}
1630 
1631 		case PM_GET_NUM_COMPONENTS:
1632 			ret = 0;
1633 			*rval_p = PM_NUMCMPTS(dip);
1634 			break;
1635 
1636 		case PM_GET_DEVICE_TYPE:
1637 			ret = 0;
1638 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1639 				PMD(PMD_ERROR, ("ioctl: %s: "
1640 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
1641 				*rval_p = PM_NO_PM_COMPONENTS;
1642 				break;
1643 			}
1644 			if (PM_ISBC(dip)) {
1645 				*rval_p = PM_CREATE_COMPONENTS;
1646 			} else {
1647 				*rval_p = PM_AUTOPM;
1648 			}
1649 			break;
1650 
1651 		case PM_SET_COMPONENT_THRESHOLDS:
1652 		{
1653 			int comps = 0;
1654 			int *end = (int *)req.data + icount;
1655 			pm_thresh_rec_t *rp;
1656 			pm_pte_t *ep;	/* threshold header storage */
1657 			int *tp;	/* threshold storage */
1658 			int *ip;
1659 			int j;
1660 			size_t size;
1661 			extern int pm_thresh_specd(dev_info_t *);
1662 			extern int pm_valid_thresh(dev_info_t *,
1663 			    pm_thresh_rec_t *);
1664 
1665 			for (ip = req.data; *ip; ip++) {
1666 				if (ip >= end) {
1667 					ret = EFAULT;
1668 					break;
1669 				}
1670 				comps++;
1671 				/* skip over indicated number of entries */
1672 				for (j = *ip; j; j--) {
1673 					if (++ip >= end) {
1674 						ret = EFAULT;
1675 						break;
1676 					}
1677 				}
1678 				if (ret)
1679 					break;
1680 			}
1681 			if (ret)
1682 				break;
1683 			if ((intptr_t)ip != (intptr_t)end - sizeof (int)) {
1684 				/* did not exactly fill buffer */
1685 				ret = EINVAL;
1686 				break;
1687 			}
1688 			if (comps == 0) {
1689 				PMD(PMD_ERROR, ("ioctl: %s: %s 0 components"
1690 				    "--EINVAL\n", cmdstr, req.physpath))
1691 				ret = EINVAL;
1692 				break;
1693 			}
1694 			/*
1695 			 * The header struct plus one entry struct per component
1696 			 * plus the size of the lists minus the counts
1697 			 * plus the length of the string
1698 			 */
1699 			size = sizeof (pm_thresh_rec_t) +
1700 			    (sizeof (pm_pte_t) * comps) + req.datasize -
1701 			    ((comps + 1) * sizeof (int)) +
1702 			    strlen(req.physpath) + 1;
1703 
1704 			rp = kmem_zalloc(size, KM_SLEEP);
1705 			rp->ptr_size = size;
1706 			rp->ptr_numcomps = comps;
1707 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1708 			rp->ptr_entries = ep;
1709 			tp = (int *)((intptr_t)ep +
1710 			    (comps * sizeof (pm_pte_t)));
1711 			for (ip = req.data; *ip; ep++) {
1712 				ep->pte_numthresh = *ip;
1713 				ep->pte_thresh = tp;
1714 				for (j = *ip++; j; j--) {
1715 					*tp++ = *ip++;
1716 				}
1717 			}
1718 			(void) strcat((char *)tp, req.physpath);
1719 			rp->ptr_physpath = (char *)tp;
1720 			ASSERT((intptr_t)end == (intptr_t)ip + sizeof (int));
1721 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1722 			    (intptr_t)rp + rp->ptr_size);
1723 
1724 			ASSERT(dip == NULL);
1725 			/*
1726 			 * If this is not a currently power managed node,
1727 			 * then we can't check for validity of the thresholds
1728 			 */
1729 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1730 				/* don't free rp, pm_record_thresh uses it */
1731 				pm_record_thresh(rp);
1732 				PMD(PMD_ERROR, ("ioctl: %s: pm_name_to_dip "
1733 				    "for %s failed\n", cmdstr, req.physpath))
1734 				ret = 0;
1735 				break;
1736 			}
1737 			ASSERT(!dipheld);
1738 			dipheld++;
1739 
1740 			if (!pm_valid_thresh(dip, rp)) {
1741 				PMD(PMD_ERROR, ("ioctl: %s: invalid thresh "
1742 				    "for %s@%s(%s#%d)\n", cmdstr,
1743 				    PM_DEVICE(dip)))
1744 				kmem_free(rp, size);
1745 				ret = EINVAL;
1746 				break;
1747 			}
1748 			/*
1749 			 * We don't just apply it ourselves because we'd need
1750 			 * to know too much about locking.  Since we don't
1751 			 * hold a lock the entry could be removed before
1752 			 * we get here
1753 			 */
1754 			pm_record_thresh(rp);
1755 			(void) pm_thresh_specd(dip);
1756 			ret = 0;
1757 			break;
1758 		}
1759 
1760 		case PM_GET_COMPONENT_THRESHOLDS:
1761 		{
1762 			int musthave;
1763 			int numthresholds = 0;
1764 			int wordsize;
1765 			int numcomps;
1766 			caddr_t uaddr = req.data;	/* user address */
1767 			int val;	/* int value to be copied out */
1768 			int32_t val32;	/* int32 value to be copied out */
1769 			caddr_t vaddr;	/* address to copyout from */
1770 			int j;
1771 
1772 #ifdef	_MULTI_DATAMODEL
1773 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1774 				wordsize = sizeof (int32_t);
1775 			} else
1776 #endif /* _MULTI_DATAMODEL */
1777 			{
1778 				wordsize = sizeof (int);
1779 			}
1780 
1781 			ASSERT(dip);
1782 
1783 			numcomps = PM_NUMCMPTS(dip);
1784 			for (i = 0; i < numcomps; i++) {
1785 				cp = PM_CP(dip, i);
1786 				numthresholds += cp->pmc_comp.pmc_numlevels - 1;
1787 			}
1788 			musthave = (numthresholds + numcomps + 1) *  wordsize;
1789 			if (req.datasize < musthave) {
1790 				PMD(PMD_ERROR, ("ioctl: %s: size %ld, need "
1791 				    "%d--EINVAL\n", cmdstr, req.datasize,
1792 				    musthave))
1793 				ret = EINVAL;
1794 				break;
1795 			}
1796 			PM_LOCK_DIP(dip);
1797 			for (i = 0; i < numcomps; i++) {
1798 				int *thp;
1799 				cp = PM_CP(dip, i);
1800 				thp = cp->pmc_comp.pmc_thresh;
1801 				/* first copyout the count */
1802 				if (wordsize == sizeof (int32_t)) {
1803 					val32 = cp->pmc_comp.pmc_numlevels - 1;
1804 					vaddr = (caddr_t)&val32;
1805 				} else {
1806 					val = cp->pmc_comp.pmc_numlevels - 1;
1807 					vaddr = (caddr_t)&val;
1808 				}
1809 				if (ddi_copyout(vaddr, (void *)uaddr,
1810 				    wordsize, mode) != 0) {
1811 					PM_UNLOCK_DIP(dip);
1812 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1813 					    "(%s#%d) vaddr %p EFAULT\n",
1814 					    cmdstr, PM_DEVICE(dip),
1815 					    (void*)vaddr))
1816 					ret = EFAULT;
1817 					break;
1818 				}
1819 				vaddr = uaddr;
1820 				vaddr += wordsize;
1821 				uaddr = (caddr_t)vaddr;
1822 				/* then copyout each threshold value */
1823 				for (j = 0; j < cp->pmc_comp.pmc_numlevels - 1;
1824 				    j++) {
1825 					if (wordsize == sizeof (int32_t)) {
1826 						val32 = thp[j + 1];
1827 						vaddr = (caddr_t)&val32;
1828 					} else {
1829 						val = thp[i + 1];
1830 						vaddr = (caddr_t)&val;
1831 					}
1832 					if (ddi_copyout(vaddr, (void *) uaddr,
1833 					    wordsize, mode) != 0) {
1834 						PM_UNLOCK_DIP(dip);
1835 						PMD(PMD_ERROR, ("ioctl: %s: "
1836 						    "%s@%s(%s#%d) uaddr %p "
1837 						    "EFAULT\n", cmdstr,
1838 						    PM_DEVICE(dip),
1839 						    (void *)uaddr))
1840 						ret = EFAULT;
1841 						break;
1842 					}
1843 					vaddr = uaddr;
1844 					vaddr += wordsize;
1845 					uaddr = (caddr_t)vaddr;
1846 				}
1847 			}
1848 			if (ret)
1849 				break;
1850 			/* last copyout a terminating 0 count */
1851 			if (wordsize == sizeof (int32_t)) {
1852 				val32 = 0;
1853 				vaddr = (caddr_t)&val32;
1854 			} else {
1855 				ASSERT(wordsize == sizeof (int));
1856 				val = 0;
1857 				vaddr = (caddr_t)&val;
1858 			}
1859 			if (ddi_copyout(vaddr, uaddr, wordsize, mode) != 0) {
1860 				PM_UNLOCK_DIP(dip);
1861 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1862 				    "vaddr %p (0 count) EFAULT\n", cmdstr,
1863 				    PM_DEVICE(dip), (void *)vaddr))
1864 				ret = EFAULT;
1865 				break;
1866 			}
1867 			/* finished, so don't need to increment addresses */
1868 			PM_UNLOCK_DIP(dip);
1869 			ret = 0;
1870 			break;
1871 		}
1872 
1873 		case PM_GET_STATS:
1874 		{
1875 			time_t now;
1876 			time_t *timestamp;
1877 			extern int pm_cur_power(pm_component_t *cp);
1878 			int musthave;
1879 			int wordsize;
1880 
1881 #ifdef	_MULTI_DATAMODEL
1882 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1883 				wordsize = sizeof (int32_t);
1884 			} else
1885 #endif /* _MULTI_DATAMODEL */
1886 			{
1887 				wordsize = sizeof (int);
1888 			}
1889 
1890 			comps = PM_NUMCMPTS(dip);
1891 			if (comps == 0 || PM_GET_PM_INFO(dip) == NULL) {
1892 				PMD(PMD_ERROR, ("ioctl: %s: %s no components"
1893 				    " or not power managed--EINVAL\n", cmdstr,
1894 				    req.physpath))
1895 				ret = EINVAL;
1896 				break;
1897 			}
1898 			musthave = comps * 2 * wordsize;
1899 			if (req.datasize < musthave) {
1900 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
1901 				    "%d--EINVAL\n", cmdstr, req.datasize,
1902 				    musthave))
1903 				ret = EINVAL;
1904 				break;
1905 			}
1906 
1907 			PM_LOCK_DIP(dip);
1908 			(void) drv_getparm(TIME, &now);
1909 			timestamp = kmem_zalloc(comps * sizeof (time_t),
1910 			    KM_SLEEP);
1911 			pm_get_timestamps(dip, timestamp);
1912 			/*
1913 			 * First the current power levels
1914 			 */
1915 			for (i = 0; i < comps; i++) {
1916 				int curpwr;
1917 				int32_t curpwr32;
1918 				caddr_t cpaddr;
1919 
1920 				cp = PM_CP(dip, i);
1921 				if (wordsize == sizeof (int)) {
1922 					curpwr = pm_cur_power(cp);
1923 					cpaddr = (caddr_t)&curpwr;
1924 				} else {
1925 					ASSERT(wordsize == sizeof (int32_t));
1926 					curpwr32 = pm_cur_power(cp);
1927 					cpaddr = (caddr_t)&curpwr32;
1928 				}
1929 				if (ddi_copyout(cpaddr, (void *) req.data,
1930 				    wordsize, mode) != 0) {
1931 					PM_UNLOCK_DIP(dip);
1932 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1933 					    "(%s#%d) req.data %p EFAULT\n",
1934 					    cmdstr, PM_DEVICE(dip),
1935 					    (void *)req.data))
1936 					ASSERT(!dipheld);
1937 					return (EFAULT);
1938 				}
1939 				cpaddr = (caddr_t)req.data;
1940 				cpaddr += wordsize;
1941 				req.data = cpaddr;
1942 			}
1943 			/*
1944 			 * Then the times remaining
1945 			 */
1946 			for (i = 0; i < comps; i++) {
1947 				int retval;
1948 				int32_t retval32;
1949 				caddr_t rvaddr;
1950 				int curpwr;
1951 
1952 				cp = PM_CP(dip, i);
1953 				curpwr = cp->pmc_cur_pwr;
1954 				if (curpwr == 0 || timestamp[i] == 0) {
1955 					PMD(PMD_STATS, ("ioctl: %s: "
1956 					    "cur_pwer %x, timestamp %lx\n",
1957 					    cmdstr, curpwr, timestamp[i]))
1958 					retval = INT_MAX;
1959 				} else {
1960 					int thresh;
1961 					(void) pm_current_threshold(dip, i,
1962 					    &thresh);
1963 					retval = thresh - (now - timestamp[i]);
1964 					PMD(PMD_STATS, ("ioctl: %s: current "
1965 					    "thresh %x, now %lx, timestamp %lx,"
1966 					    " retval %x\n", cmdstr, thresh, now,
1967 					    timestamp[i], retval))
1968 				}
1969 				if (wordsize == sizeof (int)) {
1970 					rvaddr = (caddr_t)&retval;
1971 				} else {
1972 					ASSERT(wordsize == sizeof (int32_t));
1973 					retval32 = retval;
1974 					rvaddr = (caddr_t)&retval32;
1975 				}
1976 				if (ddi_copyout(rvaddr, (void *) req.data,
1977 				    wordsize, mode) != 0) {
1978 					PM_UNLOCK_DIP(dip);
1979 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1980 					    "(%s#%d) req.data %p EFAULT\n",
1981 					    cmdstr, PM_DEVICE(dip),
1982 					    (void *)req.data))
1983 					ASSERT(!dipheld);
1984 					return (EFAULT);
1985 				}
1986 				rvaddr = (caddr_t)req.data;
1987 				rvaddr += wordsize;
1988 				req.data = (int *)rvaddr;
1989 			}
1990 			PM_UNLOCK_DIP(dip);
1991 			*rval_p = comps;
1992 			ret = 0;
1993 			kmem_free(timestamp, comps * sizeof (time_t));
1994 			break;
1995 		}
1996 
1997 		case PM_GET_COMPONENT_NAME:
1998 			ASSERT(dip);
1999 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2000 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2001 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2002 				    cmdstr, PM_DEVICE(dip), req.component,
2003 				    PM_NUMCMPTS(dip) - 1))
2004 				ret = EINVAL;
2005 				break;
2006 			}
2007 			if (ret = copyoutstr(cp->pmc_comp.pmc_name,
2008 			    (char *)req.data, req.datasize, &lencopied)) {
2009 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2010 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2011 				    PM_DEVICE(dip), (void *)req.data))
2012 				break;
2013 			}
2014 			*rval_p = lencopied;
2015 			ret = 0;
2016 			break;
2017 
2018 		case PM_GET_POWER_NAME:
2019 		{
2020 			int i;
2021 
2022 			ASSERT(dip);
2023 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2024 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2025 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2026 				    cmdstr, PM_DEVICE(dip), req.component,
2027 				    PM_NUMCMPTS(dip) - 1))
2028 				ret = EINVAL;
2029 				break;
2030 			}
2031 			if ((i = req.value) < 0 ||
2032 			    i > cp->pmc_comp.pmc_numlevels - 1) {
2033 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2034 				    "value %d > num_levels - 1 %d--EINVAL\n",
2035 				    cmdstr, PM_DEVICE(dip), req.value,
2036 				    cp->pmc_comp.pmc_numlevels - 1))
2037 				ret = EINVAL;
2038 				break;
2039 			}
2040 			dep = cp->pmc_comp.pmc_lnames[req.value];
2041 			if (ret = copyoutstr(dep,
2042 			    req.data, req.datasize, &lencopied)) {
2043 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2044 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2045 				    PM_DEVICE(dip), (void *)req.data))
2046 				break;
2047 			}
2048 			*rval_p = lencopied;
2049 			ret = 0;
2050 			break;
2051 		}
2052 
2053 		case PM_GET_POWER_LEVELS:
2054 		{
2055 			int musthave;
2056 			int numlevels;
2057 			int wordsize;
2058 
2059 #ifdef	_MULTI_DATAMODEL
2060 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2061 				wordsize = sizeof (int32_t);
2062 			} else
2063 #endif /* _MULTI_DATAMODEL */
2064 			{
2065 				wordsize = sizeof (int);
2066 			}
2067 			ASSERT(dip);
2068 
2069 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2070 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2071 				    "has %d components, component %d requested"
2072 				    "--EINVAL\n", cmdstr, PM_DEVICE(dip),
2073 				    PM_NUMCMPTS(dip), req.component))
2074 				ret = EINVAL;
2075 				break;
2076 			}
2077 			numlevels = cp->pmc_comp.pmc_numlevels;
2078 			musthave = numlevels *  wordsize;
2079 			if (req.datasize < musthave) {
2080 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
2081 				    "%d--EINVAL\n", cmdstr, req.datasize,
2082 				    musthave))
2083 				ret = EINVAL;
2084 				break;
2085 			}
2086 			PM_LOCK_DIP(dip);
2087 			for (i = 0; i < numlevels; i++) {
2088 				int level;
2089 				int32_t level32;
2090 				caddr_t laddr;
2091 
2092 				if (wordsize == sizeof (int)) {
2093 					level = cp->pmc_comp.pmc_lvals[i];
2094 					laddr = (caddr_t)&level;
2095 				} else {
2096 					level32 = cp->pmc_comp.pmc_lvals[i];
2097 					laddr = (caddr_t)&level32;
2098 				}
2099 				if (ddi_copyout(laddr, (void *) req.data,
2100 				    wordsize, mode) != 0) {
2101 					PM_UNLOCK_DIP(dip);
2102 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2103 					    "(%s#%d) laddr %p EFAULT\n",
2104 					    cmdstr, PM_DEVICE(dip),
2105 					    (void *)laddr))
2106 					ASSERT(!dipheld);
2107 					return (EFAULT);
2108 				}
2109 				laddr = (caddr_t)req.data;
2110 				laddr += wordsize;
2111 				req.data = (int *)laddr;
2112 			}
2113 			PM_UNLOCK_DIP(dip);
2114 			*rval_p = numlevels;
2115 			ret = 0;
2116 			break;
2117 		}
2118 
2119 
2120 		case PM_GET_NUM_POWER_LEVELS:
2121 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2122 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2123 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2124 				    cmdstr, PM_DEVICE(dip), req.component,
2125 				    PM_NUMCMPTS(dip) - 1))
2126 				ret = EINVAL;
2127 				break;
2128 			}
2129 			*rval_p = cp->pmc_comp.pmc_numlevels;
2130 			ret = 0;
2131 			break;
2132 
2133 		case PM_GET_DEVICE_THRESHOLD_BASIS:
2134 			ret = 0;
2135 			PM_LOCK_DIP(dip);
2136 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
2137 				PM_UNLOCK_DIP(dip);
2138 				PMD(PMD_ERROR, ("ioctl: %s: "
2139 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
2140 				*rval_p = PM_NO_PM_COMPONENTS;
2141 				break;
2142 			}
2143 			if (PM_ISDIRECT(dip)) {
2144 				PM_UNLOCK_DIP(dip);
2145 				*rval_p = PM_DIRECTLY_MANAGED;
2146 				break;
2147 			}
2148 			switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
2149 			case PMC_DEF_THRESH:
2150 			case PMC_NEXDEF_THRESH:
2151 				*rval_p = PM_DEFAULT_THRESHOLD;
2152 				break;
2153 			case PMC_DEV_THRESH:
2154 				*rval_p = PM_DEVICE_THRESHOLD;
2155 				break;
2156 			case PMC_COMP_THRESH:
2157 				*rval_p = PM_COMPONENT_THRESHOLD;
2158 				break;
2159 			case PMC_CPU_THRESH:
2160 				*rval_p = PM_CPU_THRESHOLD;
2161 				break;
2162 			default:
2163 				if (PM_ISBC(dip)) {
2164 					*rval_p = PM_OLD_THRESHOLD;
2165 					break;
2166 				}
2167 				PMD(PMD_ERROR, ("ioctl: %s: default, not "
2168 				    "BC--EINVAL", cmdstr))
2169 				ret = EINVAL;
2170 				break;
2171 			}
2172 			PM_UNLOCK_DIP(dip);
2173 			break;
2174 		}
2175 		break;
2176 
2177 	case PM_PSC:
2178 		/*
2179 		 * Commands that require pm_state_change_t as arg
2180 		 */
2181 #ifdef	_MULTI_DATAMODEL
2182 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2183 			pscp32 = (pm_state_change32_t *)arg;
2184 			if (ddi_copyin((caddr_t)arg, &psc32,
2185 			    sizeof (psc32), mode) != 0) {
2186 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2187 				    "EFAULT\n\n", cmdstr))
2188 				ASSERT(!dipheld);
2189 				return (EFAULT);
2190 			}
2191 			psc.physpath = (caddr_t)(uintptr_t)psc32.physpath;
2192 			psc.size = psc32.size;
2193 		} else
2194 #endif /* _MULTI_DATAMODEL */
2195 		{
2196 			pscp = (pm_state_change_t *)arg;
2197 			if (ddi_copyin((caddr_t)arg, &psc,
2198 			    sizeof (psc), mode) != 0) {
2199 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2200 				    "EFAULT\n\n", cmdstr))
2201 				ASSERT(!dipheld);
2202 				return (EFAULT);
2203 			}
2204 		}
2205 		switch (cmd) {
2206 
2207 		case PM_GET_STATE_CHANGE:
2208 		case PM_GET_STATE_CHANGE_WAIT:
2209 		{
2210 			psce_t			*pscep;
2211 			pm_state_change_t	*p;
2212 			caddr_t			physpath;
2213 			size_t			physlen;
2214 
2215 			/*
2216 			 * We want to know if any device has changed state.
2217 			 * We look up by clone.  In case we have another thread
2218 			 * from the same process, we loop.
2219 			 * pm_psc_clone_to_interest() returns a locked entry.
2220 			 * We create an internal copy of the event entry prior
2221 			 * to copyout to user space because we don't want to
2222 			 * hold the psce_lock while doing copyout as we might
2223 			 * hit page fault  which eventually brings us back
2224 			 * here requesting the same lock.
2225 			 */
2226 			mutex_enter(&pm_clone_lock);
2227 			if (!pm_interest_registered(clone))
2228 				pm_register_watcher(clone, NULL);
2229 			while ((pscep =
2230 			    pm_psc_clone_to_interest(clone)) == NULL) {
2231 				if (cmd == PM_GET_STATE_CHANGE) {
2232 					PMD(PMD_IOCTL, ("ioctl: %s: "
2233 					    "EWOULDBLOCK\n", cmdstr))
2234 					mutex_exit(&pm_clone_lock);
2235 					ASSERT(!dipheld);
2236 					return (EWOULDBLOCK);
2237 				} else {
2238 					if (cv_wait_sig(&pm_clones_cv[clone],
2239 					    &pm_clone_lock) == 0) {
2240 						mutex_exit(&pm_clone_lock);
2241 						PMD(PMD_ERROR, ("ioctl: %s "
2242 						    "EINTR\n", cmdstr))
2243 						ASSERT(!dipheld);
2244 						return (EINTR);
2245 					}
2246 				}
2247 			}
2248 			mutex_exit(&pm_clone_lock);
2249 
2250 			physlen = pscep->psce_out->size;
2251 			physpath = NULL;
2252 			/*
2253 			 * If we were unable to store the path while bringing
2254 			 * up the console fb upon entering the prom, we give
2255 			 * a "" name with the overrun event set
2256 			 */
2257 			if (physlen == (size_t)-1) {	/* kmemalloc failed */
2258 				physpath = kmem_zalloc(1, KM_SLEEP);
2259 				physlen = 1;
2260 			}
2261 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2262 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n", cmdstr))
2263 				mutex_exit(&pscep->psce_lock);
2264 				ret = EFAULT;
2265 				break;
2266 			}
2267 			if (physpath == NULL) {
2268 				physpath = kmem_zalloc(physlen, KM_SLEEP);
2269 				bcopy((const void *) pscep->psce_out->physpath,
2270 				    (void *) physpath, physlen);
2271 			}
2272 
2273 			p = pscep->psce_out;
2274 #ifdef	_MULTI_DATAMODEL
2275 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2276 #ifdef DEBUG
2277 				size_t usrcopysize;
2278 #endif
2279 				psc32.flags = (ushort_t)p->flags;
2280 				psc32.event = (ushort_t)p->event;
2281 				psc32.timestamp = (int32_t)p->timestamp;
2282 				psc32.component = (int32_t)p->component;
2283 				psc32.old_level = (int32_t)p->old_level;
2284 				psc32.new_level = (int32_t)p->new_level;
2285 				copysize32 = ((intptr_t)&psc32.size -
2286 				    (intptr_t)&psc32.component);
2287 #ifdef DEBUG
2288 				usrcopysize = ((intptr_t)&pscp32->size -
2289 				    (intptr_t)&pscp32->component);
2290 				ASSERT(usrcopysize == copysize32);
2291 #endif
2292 			} else
2293 #endif /* _MULTI_DATAMODEL */
2294 			{
2295 				psc.flags = p->flags;
2296 				psc.event = p->event;
2297 				psc.timestamp = p->timestamp;
2298 				psc.component = p->component;
2299 				psc.old_level = p->old_level;
2300 				psc.new_level = p->new_level;
2301 				copysize = ((long)&p->size -
2302 				    (long)&p->component);
2303 			}
2304 			if (p->size != (size_t)-1)
2305 				kmem_free(p->physpath, p->size);
2306 			p->size = 0;
2307 			p->physpath = NULL;
2308 			if (pscep->psce_out == pscep->psce_last)
2309 				p = pscep->psce_first;
2310 			else
2311 				p++;
2312 			pscep->psce_out = p;
2313 			mutex_exit(&pscep->psce_lock);
2314 
2315 			ret = copyoutstr(physpath, psc.physpath,
2316 			    physlen, &lencopied);
2317 			kmem_free(physpath, physlen);
2318 			if (ret) {
2319 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2320 				    "failed--EFAULT\n", cmdstr,
2321 				    (void *)psc.physpath))
2322 				break;
2323 			}
2324 
2325 #ifdef	_MULTI_DATAMODEL
2326 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2327 				if (ddi_copyout(&psc32.component,
2328 				    &pscp32->component, copysize32, mode)
2329 				    != 0) {
2330 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2331 					    "failed--EFAULT\n", cmdstr))
2332 					ret = EFAULT;
2333 					break;
2334 				}
2335 			} else
2336 #endif	/* _MULTI_DATAMODEL */
2337 			{
2338 				if (ddi_copyout(&psc.component,
2339 				    &pscp->component, copysize, mode) != 0) {
2340 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2341 					    "failed--EFAULT\n", cmdstr))
2342 					ret = EFAULT;
2343 					break;
2344 				}
2345 			}
2346 			ret = 0;
2347 			break;
2348 		}
2349 
2350 		case PM_DIRECT_NOTIFY:
2351 		case PM_DIRECT_NOTIFY_WAIT:
2352 		{
2353 			psce_t			*pscep;
2354 			pm_state_change_t	*p;
2355 			caddr_t			physpath;
2356 			size_t			physlen;
2357 			/*
2358 			 * We want to know if any direct device of ours has
2359 			 * something we should know about.  We look up by clone.
2360 			 * In case we have another thread from the same process,
2361 			 * we loop.
2362 			 * pm_psc_clone_to_direct() returns a locked entry.
2363 			 */
2364 			mutex_enter(&pm_clone_lock);
2365 			while (pm_poll_cnt[clone] == 0 ||
2366 			    (pscep = pm_psc_clone_to_direct(clone)) == NULL) {
2367 				if (cmd == PM_DIRECT_NOTIFY) {
2368 					PMD(PMD_IOCTL, ("ioctl: %s: "
2369 					    "EWOULDBLOCK\n", cmdstr))
2370 					mutex_exit(&pm_clone_lock);
2371 					ASSERT(!dipheld);
2372 					return (EWOULDBLOCK);
2373 				} else {
2374 					if (cv_wait_sig(&pm_clones_cv[clone],
2375 					    &pm_clone_lock) == 0) {
2376 						mutex_exit(&pm_clone_lock);
2377 						PMD(PMD_ERROR, ("ioctl: %s: "
2378 						    "EINTR\n", cmdstr))
2379 						ASSERT(!dipheld);
2380 						return (EINTR);
2381 					}
2382 				}
2383 			}
2384 			mutex_exit(&pm_clone_lock);
2385 			physlen = pscep->psce_out->size;
2386 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2387 				mutex_exit(&pscep->psce_lock);
2388 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n",
2389 				    cmdstr))
2390 				ret = EFAULT;
2391 				break;
2392 			}
2393 			physpath = kmem_zalloc(physlen, KM_SLEEP);
2394 			bcopy((const void *) pscep->psce_out->physpath,
2395 			    (void *) physpath, physlen);
2396 
2397 			p = pscep->psce_out;
2398 #ifdef	_MULTI_DATAMODEL
2399 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2400 #ifdef DEBUG
2401 				size_t usrcopysize;
2402 #endif
2403 				psc32.component = (int32_t)p->component;
2404 				psc32.flags = (ushort_t)p->flags;
2405 				psc32.event = (ushort_t)p->event;
2406 				psc32.timestamp = (int32_t)p->timestamp;
2407 				psc32.old_level = (int32_t)p->old_level;
2408 				psc32.new_level = (int32_t)p->new_level;
2409 				copysize32 = (intptr_t)&psc32.size -
2410 				    (intptr_t)&psc32.component;
2411 				PMD(PMD_DPM, ("ioctl: %s: PDN32 %s, comp %d "
2412 				    "%d -> %d\n", cmdstr, physpath,
2413 				    p->component, p->old_level, p->new_level))
2414 #ifdef DEBUG
2415 				usrcopysize = (intptr_t)&pscp32->size -
2416 				    (intptr_t)&pscp32->component;
2417 				ASSERT(usrcopysize == copysize32);
2418 #endif
2419 			} else
2420 #endif
2421 			{
2422 				psc.component = p->component;
2423 				psc.flags = p->flags;
2424 				psc.event = p->event;
2425 				psc.timestamp = p->timestamp;
2426 				psc.old_level = p->old_level;
2427 				psc.new_level = p->new_level;
2428 				copysize = (intptr_t)&p->size -
2429 				    (intptr_t)&p->component;
2430 				PMD(PMD_DPM, ("ioctl: %s: PDN %s, comp %d "
2431 				    "%d -> %d\n", cmdstr, physpath,
2432 				    p->component, p->old_level, p->new_level))
2433 			}
2434 			mutex_enter(&pm_clone_lock);
2435 			PMD(PMD_IOCTL, ("ioctl: %s: pm_poll_cnt[%d] is %d "
2436 			    "before decrement\n", cmdstr, clone,
2437 			    pm_poll_cnt[clone]))
2438 			pm_poll_cnt[clone]--;
2439 			mutex_exit(&pm_clone_lock);
2440 			kmem_free(p->physpath, p->size);
2441 			p->size = 0;
2442 			p->physpath = NULL;
2443 			if (pscep->psce_out == pscep->psce_last)
2444 				p = pscep->psce_first;
2445 			else
2446 				p++;
2447 			pscep->psce_out = p;
2448 			mutex_exit(&pscep->psce_lock);
2449 
2450 			ret = copyoutstr(physpath, psc.physpath,
2451 			    physlen, &lencopied);
2452 			kmem_free(physpath, physlen);
2453 			if (ret) {
2454 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2455 				    "failed--EFAULT\n", cmdstr,
2456 				    (void *)psc.physpath))
2457 				break;
2458 			}
2459 
2460 #ifdef	_MULTI_DATAMODEL
2461 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2462 				if (ddi_copyout(&psc32.component,
2463 				    &pscp32->component, copysize32, mode)
2464 					!= 0) {
2465 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2466 					    "failed--EFAULT\n", cmdstr))
2467 					ret = EFAULT;
2468 					break;
2469 				}
2470 			} else
2471 #endif	/* _MULTI_DATAMODEL */
2472 			{
2473 				if (ddi_copyout(&psc.component,
2474 				    &pscp->component, copysize, mode) != 0) {
2475 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2476 					    "failed--EFAULT\n", cmdstr))
2477 					ret = EFAULT;
2478 					break;
2479 				}
2480 			}
2481 			ret = 0;
2482 			break;
2483 		}
2484 		default:
2485 			ASSERT(0);
2486 		}
2487 		break;
2488 
2489 	case NOSTRUCT:
2490 		switch (cmd) {
2491 		case PM_START_PM:
2492 		case PM_START_CPUPM:
2493 			mutex_enter(&pm_scan_lock);
2494 			if ((cmd == PM_START_PM && autopm_enabled) ||
2495 			    (cmd == PM_START_CPUPM && PM_CPUPM_ENABLED)) {
2496 				mutex_exit(&pm_scan_lock);
2497 				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2498 				    cmdstr))
2499 				ret = EBUSY;
2500 				break;
2501 			}
2502 			if (cmd == PM_START_PM)
2503 			    autopm_enabled = 1;
2504 			else
2505 			    cpupm = PM_CPUPM_ENABLE;
2506 			mutex_exit(&pm_scan_lock);
2507 			ddi_walk_devs(ddi_root_node(), pm_start_pm_walk, &cmd);
2508 			ret = 0;
2509 			break;
2510 
2511 		case PM_RESET_PM:
2512 		case PM_STOP_PM:
2513 		case PM_STOP_CPUPM:
2514 		{
2515 			extern void pm_discard_thresholds(void);
2516 
2517 			mutex_enter(&pm_scan_lock);
2518 			if ((cmd == PM_STOP_PM && !autopm_enabled) ||
2519 			    (cmd == PM_STOP_CPUPM && PM_CPUPM_DISABLED)) {
2520 				mutex_exit(&pm_scan_lock);
2521 				PMD(PMD_ERROR, ("ioctl: %s: EINVAL\n",
2522 				    cmdstr))
2523 				ret = EINVAL;
2524 				break;
2525 			}
2526 			if (cmd == PM_STOP_PM)
2527 			    autopm_enabled = 0;
2528 			else if (cmd == PM_STOP_CPUPM)
2529 			    cpupm = PM_CPUPM_DISABLE;
2530 			else {
2531 			    autopm_enabled = 0;
2532 			    cpupm = PM_CPUPM_NOTSET;
2533 			}
2534 			mutex_exit(&pm_scan_lock);
2535 
2536 			/*
2537 			 * bring devices to full power level, stop scan
2538 			 */
2539 			ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk, &cmd);
2540 			ret = 0;
2541 			if (cmd == PM_STOP_PM || cmd == PM_STOP_CPUPM)
2542 				break;
2543 			/*
2544 			 * Now do only PM_RESET_PM stuff.
2545 			 */
2546 			pm_system_idle_threshold = pm_default_idle_threshold;
2547 			pm_cpu_idle_threshold = 0;
2548 			pm_discard_thresholds();
2549 			pm_all_to_default_thresholds();
2550 			pm_dispatch_to_dep_thread(PM_DEP_WK_REMOVE_DEP,
2551 			    NULL, NULL, PM_DEP_WAIT, NULL, 0);
2552 			break;
2553 		}
2554 
2555 		case PM_GET_SYSTEM_THRESHOLD:
2556 			*rval_p = pm_system_idle_threshold;
2557 			ret = 0;
2558 			break;
2559 
2560 		case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
2561 			*rval_p = pm_default_idle_threshold;
2562 			ret = 0;
2563 			break;
2564 
2565 		case PM_GET_CPU_THRESHOLD:
2566 			*rval_p = pm_cpu_idle_threshold;
2567 			ret = 0;
2568 			break;
2569 
2570 		case PM_SET_SYSTEM_THRESHOLD:
2571 		case PM_SET_CPU_THRESHOLD:
2572 			if ((int)arg < 0) {
2573 				PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
2574 				    "--EINVAL\n", cmdstr, (int)arg))
2575 				ret = EINVAL;
2576 				break;
2577 			}
2578 			PMD(PMD_IOCTL, ("ioctl: %s: 0x%x 0t%d\n", cmdstr,
2579 			    (int)arg, (int)arg))
2580 			if (cmd == PM_SET_SYSTEM_THRESHOLD)
2581 				pm_system_idle_threshold = (int)arg;
2582 			else {
2583 				pm_cpu_idle_threshold = (int)arg;
2584 			}
2585 			ddi_walk_devs(ddi_root_node(), pm_set_idle_thresh_walk,
2586 				    (void *) &cmd);
2587 
2588 			ret = 0;
2589 			break;
2590 
2591 		case PM_IDLE_DOWN:
2592 			if (pm_timeout_idledown() != 0) {
2593 				ddi_walk_devs(ddi_root_node(),
2594 				    pm_start_idledown, (void *)PMID_IOC);
2595 			}
2596 			ret = 0;
2597 			break;
2598 
2599 		case PM_GET_PM_STATE:
2600 			if (autopm_enabled) {
2601 				*rval_p = PM_SYSTEM_PM_ENABLED;
2602 			} else {
2603 				*rval_p = PM_SYSTEM_PM_DISABLED;
2604 			}
2605 			ret = 0;
2606 			break;
2607 
2608 		case PM_GET_CPUPM_STATE:
2609 			if (PM_CPUPM_ENABLED)
2610 				*rval_p = PM_CPU_PM_ENABLED;
2611 			else if (PM_CPUPM_DISABLED)
2612 				*rval_p = PM_CPU_PM_DISABLED;
2613 			else
2614 				*rval_p = PM_CPU_PM_NOTSET;
2615 			ret = 0;
2616 			break;
2617 		}
2618 		break;
2619 
2620 	default:
2621 		/*
2622 		 * Internal error, invalid ioctl description
2623 		 * force debug entry even if pm_debug not set
2624 		 */
2625 #ifdef	DEBUG
2626 		pm_log("ioctl: invalid str_type %d for cmd %d (%s)\n",
2627 		    pcip->str_type, cmd, pcip->name);
2628 #endif
2629 		ASSERT(0);
2630 		return (EIO);
2631 	}
2632 	ASSERT(ret != 0x0badcafe);	/* some cmd in wrong case! */
2633 	if (dipheld) {
2634 		ASSERT(dip);
2635 		PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d) for "
2636 		    "exiting pm_ioctl\n", cmdstr, PM_DEVICE(dip)))
2637 		PM_RELE(dip);
2638 	}
2639 	PMD(PMD_IOCTL, ("ioctl: %s: end, ret=%d\n", cmdstr, ret))
2640 	return (ret);
2641 }
2642