xref: /titanic_51/usr/src/uts/common/io/pm.c (revision a12e05a04a5d5850f645c79e7e2c74f8d6b7c5ec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * pm	This driver now only handles the ioctl interface.  The scanning
30  *	and policy stuff now lives in common/os/sunpm.c.
31  *	Not DDI compliant
32  */
33 
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/modctl.h>
37 #include <sys/conf.h>		/* driver flags and functions */
38 #include <sys/open.h>		/* OTYP_CHR definition */
39 #include <sys/stat.h>		/* S_IFCHR definition */
40 #include <sys/pathname.h>	/* name -> dev_info xlation */
41 #include <sys/kmem.h>		/* memory alloc stuff */
42 #include <sys/debug.h>
43 #include <sys/pm.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/epm.h>
47 #include <sys/vfs.h>
48 #include <sys/mode.h>
49 #include <sys/mkdev.h>
50 #include <sys/promif.h>
51 #include <sys/consdev.h>
52 #include <sys/ddi_impldefs.h>
53 #include <sys/poll.h>
54 #include <sys/note.h>
55 #include <sys/taskq.h>
56 #include <sys/policy.h>
57 
58 /*
59  * Minor number is instance<<8 + clone minor from range 1-255; (0 reserved
60  * for "original"
61  */
62 #define	PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE - 1))
63 
64 #define	PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
65 #define	PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
66 #define	PM_MAJOR(dip) ddi_driver_major(dip)
67 #define	PM_RELE(dip) ddi_release_devi(dip)
68 
69 #define	PM_IDLEDOWN_TIME	10
70 
71 extern kmutex_t	pm_scan_lock;	/* protects autopm_enable, pm_scans_disabled */
72 extern kmutex_t	pm_clone_lock;	/* protects pm_clones array */
73 extern int	autopm_enabled;
74 extern pm_cpupm_t cpupm;
75 extern int	pm_default_idle_threshold;
76 extern int	pm_system_idle_threshold;
77 extern int	pm_cpu_idle_threshold;
78 extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
79 extern uint_t	pm_poll_cnt[PM_MAX_CLONE];
80 
81 /*
82  * The soft state of the power manager.  Since there will only
83  * one of these, just reference it through a static pointer.
84  */
85 static struct pmstate {
86 	dev_info_t	*pm_dip;		/* ptr to our dev_info node */
87 	int		pm_instance;		/* for ddi_get_instance() */
88 	timeout_id_t	pm_idledown_id;		/* pm idledown timeout id */
89 	uchar_t		pm_clones[PM_MAX_CLONE]; /* uniqueify multiple opens */
90 	struct cred	*pm_cred[PM_MAX_CLONE];	/* cred for each unique open */
91 } pm_state = { NULL, -1, (timeout_id_t)0 };
92 typedef struct pmstate *pm_state_t;
93 static pm_state_t pmstp = &pm_state;
94 
95 static int	pm_open(dev_t *, int, int, cred_t *);
96 static int	pm_close(dev_t, int, int, cred_t *);
97 static int	pm_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
98 static int	pm_chpoll(dev_t, short, int, short *, struct pollhead **);
99 
100 static struct cb_ops pm_cb_ops = {
101 	pm_open,	/* open */
102 	pm_close,	/* close */
103 	nodev,		/* strategy */
104 	nodev,		/* print */
105 	nodev,		/* dump */
106 	nodev,		/* read */
107 	nodev,		/* write */
108 	pm_ioctl,	/* ioctl */
109 	nodev,		/* devmap */
110 	nodev,		/* mmap */
111 	nodev,		/* segmap */
112 	pm_chpoll,	/* poll */
113 	ddi_prop_op,	/* prop_op */
114 	NULL,		/* streamtab */
115 	D_NEW | D_MP	/* driver compatibility flag */
116 };
117 
118 static int pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
119     void **result);
120 static int pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
121 static int pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
122 
123 static struct dev_ops pm_ops = {
124 	DEVO_REV,		/* devo_rev */
125 	0,			/* refcnt */
126 	pm_getinfo,		/* info */
127 	nulldev,		/* identify */
128 	nulldev,		/* probe */
129 	pm_attach,		/* attach */
130 	pm_detach,		/* detach */
131 	nodev,			/* reset */
132 	&pm_cb_ops,		/* driver operations */
133 	NULL,			/* bus operations */
134 	NULL			/* power */
135 };
136 
137 static struct modldrv modldrv = {
138 	&mod_driverops,
139 	"power management driver v%I%",
140 	&pm_ops
141 };
142 
143 static struct modlinkage modlinkage = {
144 	MODREV_1, &modldrv, 0
145 };
146 
147 /* Local functions */
148 #ifdef DEBUG
149 static int	print_info(dev_info_t *, void *);
150 
151 #endif
152 
153 int
154 _init(void)
155 {
156 	return (mod_install(&modlinkage));
157 }
158 
159 int
160 _fini(void)
161 {
162 	return (mod_remove(&modlinkage));
163 }
164 
165 int
166 _info(struct modinfo *modinfop)
167 {
168 	return (mod_info(&modlinkage, modinfop));
169 }
170 
171 static int
172 pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
173 {
174 	int		i;
175 
176 	switch (cmd) {
177 
178 	case DDI_ATTACH:
179 		if (pmstp->pm_instance != -1)	/* Only allow one instance */
180 			return (DDI_FAILURE);
181 		pmstp->pm_instance = ddi_get_instance(dip);
182 		if (ddi_create_minor_node(dip, "pm", S_IFCHR,
183 		    (pmstp->pm_instance << 8) + 0,
184 			DDI_PSEUDO, 0) != DDI_SUCCESS) {
185 			return (DDI_FAILURE);
186 		}
187 		pmstp->pm_dip = dip;	/* pm_init and getinfo depend on it */
188 
189 		for (i = 0; i < PM_MAX_CLONE; i++)
190 			cv_init(&pm_clones_cv[i], NULL, CV_DEFAULT, NULL);
191 
192 		ddi_report_dev(dip);
193 		return (DDI_SUCCESS);
194 
195 	default:
196 		return (DDI_FAILURE);
197 	}
198 }
199 
200 /* ARGSUSED */
201 static int
202 pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
203 {
204 	int i;
205 
206 	switch (cmd) {
207 	case DDI_DETACH:
208 		/*
209 		 * Don't detach while idledown timeout is pending.  Note that
210 		 * we already know we're not in pm_ioctl() due to framework
211 		 * synchronization, so this is a sufficient test
212 		 */
213 		if (pmstp->pm_idledown_id)
214 			return (DDI_FAILURE);
215 
216 		for (i = 0; i < PM_MAX_CLONE; i++)
217 			cv_destroy(&pm_clones_cv[i]);
218 
219 		ddi_remove_minor_node(dip, NULL);
220 		pmstp->pm_instance = -1;
221 		return (DDI_SUCCESS);
222 
223 	default:
224 		return (DDI_FAILURE);
225 	}
226 }
227 
228 static int
229 pm_close_direct_pm_device(dev_info_t *dip, void *arg)
230 {
231 	int clone;
232 	char *pathbuf;
233 	pm_info_t *info = PM_GET_PM_INFO(dip);
234 
235 	clone = *((int *)arg);
236 
237 	if (!info)
238 		return (DDI_WALK_CONTINUE);
239 
240 	pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
241 	PM_LOCK_DIP(dip);
242 	if (clone == info->pmi_clone) {
243 		PMD(PMD_CLOSE, ("pm_close: found %s@%s(%s#%d)\n",
244 		    PM_DEVICE(dip)))
245 		ASSERT(PM_ISDIRECT(dip));
246 		info->pmi_dev_pm_state &= ~PM_DIRECT;
247 		PM_UNLOCK_DIP(dip);
248 		pm_proceed(dip, PMP_RELEASE, -1, -1);
249 		/* Bring ourselves up if there is a keeper that is up */
250 		(void) ddi_pathname(dip, pathbuf);
251 		pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF, NULL,
252 		    pathbuf, PM_DEP_NOWAIT, NULL, 0);
253 		PM_LOCK_DIP(dip);
254 		info->pmi_clone = 0;
255 		PM_UNLOCK_DIP(dip);
256 	} else {
257 		PM_UNLOCK_DIP(dip);
258 	}
259 	kmem_free(pathbuf, MAXPATHLEN);
260 
261 	/* restart autopm on device released from direct pm */
262 	pm_rescan(dip);
263 
264 	return (DDI_WALK_CONTINUE);
265 }
266 
267 #define	PM_REQ		1
268 #define	NOSTRUCT	2
269 #define	DIP		3
270 #define	NODIP		4
271 #define	NODEP		5
272 #define	DEP		6
273 #define	PM_PSC		7
274 
275 #define	CHECKPERMS	0x001
276 #define	SU		0x002
277 #define	SG		0x004
278 #define	OWNER		0x008
279 
280 #define	INWHO		0x001
281 #define	INDATAINT	0x002
282 #define	INDATASTRING	0x004
283 #define	INDEP		0x008
284 #define	INDATAOUT	0x010
285 #define	INDATA	(INDATAOUT | INDATAINT | INDATASTRING | INDEP)
286 
287 struct pm_cmd_info {
288 	int cmd;		/* command code */
289 	char *name;		/* printable string */
290 	int supported;		/* true if still supported */
291 	int str_type;		/* PM_REQ or NOSTRUCT */
292 	int inargs;		/* INWHO, INDATAINT, INDATASTRING, INDEP, */
293 				/* INDATAOUT */
294 	int diptype;		/* DIP or NODIP */
295 	int deptype;		/* DEP or NODEP */
296 	int permission;		/* SU, GU, or CHECKPERMS */
297 };
298 
299 #ifdef DEBUG
300 char *pm_cmd_string;
301 int pm_cmd;
302 #endif
303 
304 /*
305  * Returns true if permission granted by credentials
306  */
307 static int
308 pm_perms(int perm, cred_t *cr)
309 {
310 	if (perm == 0)			/* no restrictions */
311 		return (1);
312 	if (perm == CHECKPERMS)		/* ok for now (is checked later) */
313 		return (1);
314 	if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
315 		return (1);
316 	if ((perm & SG) && (crgetgid(cr) == 0))	/* group 0 is ok */
317 		return (1);
318 	return (0);
319 }
320 
321 #ifdef DEBUG
322 static int
323 print_info(dev_info_t *dip, void *arg)
324 {
325 	_NOTE(ARGUNUSED(arg))
326 	pm_info_t	*info;
327 	int		i, j;
328 	struct pm_component *cp;
329 	extern int pm_cur_power(pm_component_t *cp);
330 
331 	info = PM_GET_PM_INFO(dip);
332 	if (!info)
333 		return (DDI_WALK_CONTINUE);
334 	cmn_err(CE_CONT, "pm_info for %s\n", ddi_node_name(dip));
335 	for (i = 0; i < PM_NUMCMPTS(dip); i++) {
336 		cp = PM_CP(dip, i);
337 		cmn_err(CE_CONT, "\tThresholds[%d] =",  i);
338 		for (j = 0; j < cp->pmc_comp.pmc_numlevels; j++)
339 			cmn_err(CE_CONT, " %d", cp->pmc_comp.pmc_thresh[i]);
340 		cmn_err(CE_CONT, "\n");
341 		cmn_err(CE_CONT, "\tCurrent power[%d] = %d\n", i,
342 		    pm_cur_power(cp));
343 	}
344 	if (PM_ISDIRECT(dip))
345 		cmn_err(CE_CONT, "\tDirect power management\n");
346 	return (DDI_WALK_CONTINUE);
347 }
348 #endif
349 
350 /*
351  * command, name, supported, str_type, inargs, diptype, deptype, permission
352  */
353 static struct pm_cmd_info pmci[] = {
354 	{PM_SCHEDULE, "PM_SCHEDULE", 0},
355 	{PM_GET_IDLE_TIME, "PM_GET_IDLE_TIME", 0},
356 	{PM_GET_NUM_CMPTS, "PM_GET_NUM_CMPTS", 0},
357 	{PM_GET_THRESHOLD, "PM_GET_THRESHOLD", 0},
358 	{PM_SET_THRESHOLD, "PM_SET_THRESHOLD", 0},
359 	{PM_GET_NORM_PWR, "PM_GET_NORM_PWR", 0},
360 	{PM_SET_CUR_PWR, "PM_SET_CUR_PWR", 0},
361 	{PM_GET_CUR_PWR, "PM_GET_CUR_PWR", 0},
362 	{PM_GET_NUM_DEPS, "PM_GET_NUM_DEPS", 0},
363 	{PM_GET_DEP, "PM_GET_DEP", 0},
364 	{PM_ADD_DEP, "PM_ADD_DEP", 0},
365 	{PM_REM_DEP, "PM_REM_DEP", 0},
366 	{PM_REM_DEVICE, "PM_REM_DEVICE", 0},
367 	{PM_REM_DEVICES, "PM_REM_DEVICES", 0},
368 	{PM_REPARSE_PM_PROPS, "PM_REPARSE_PM_PROPS", 1, PM_REQ, INWHO, DIP,
369 	    NODEP},
370 	{PM_DISABLE_AUTOPM, "PM_DISABLE_AUTOPM", 0},
371 	{PM_REENABLE_AUTOPM, "PM_REENABLE_AUTOPM", 0},
372 	{PM_SET_NORM_PWR, "PM_SET_NORM_PWR", 0 },
373 	{PM_SET_DEVICE_THRESHOLD, "PM_SET_DEVICE_THRESHOLD", 1, PM_REQ,
374 	    INWHO, NODIP, NODEP, SU},
375 	{PM_GET_SYSTEM_THRESHOLD, "PM_GET_SYSTEM_THRESHOLD", 1, NOSTRUCT},
376 	{PM_GET_DEFAULT_SYSTEM_THRESHOLD, "PM_GET_DEFAULT_SYSTEM_THRESHOLD",
377 	    1, NOSTRUCT},
378 	{PM_SET_SYSTEM_THRESHOLD, "PM_SET_SYSTEM_THRESHOLD", 1, NOSTRUCT,
379 	    0, 0, 0, SU},
380 	{PM_START_PM, "PM_START_PM", 1, NOSTRUCT, 0, 0, 0, SU},
381 	{PM_STOP_PM, "PM_STOP_PM", 1, NOSTRUCT, 0, 0, 0, SU},
382 	{PM_RESET_PM, "PM_RESET_PM", 1, NOSTRUCT, 0, 0, 0, SU},
383 	{PM_GET_STATS, "PM_GET_STATS", 1, PM_REQ, INWHO | INDATAOUT,
384 	    DIP, NODEP},
385 	{PM_GET_DEVICE_THRESHOLD, "PM_GET_DEVICE_THRESHOLD", 1, PM_REQ, INWHO,
386 	    DIP, NODEP},
387 	{PM_GET_POWER_NAME, "PM_GET_POWER_NAME", 1, PM_REQ, INWHO | INDATAOUT,
388 	    DIP, NODEP},
389 	{PM_GET_POWER_LEVELS, "PM_GET_POWER_LEVELS", 1, PM_REQ,
390 	    INWHO | INDATAOUT, DIP, NODEP},
391 	{PM_GET_NUM_COMPONENTS, "PM_GET_NUM_COMPONENTS", 1, PM_REQ, INWHO,
392 	    DIP, NODEP},
393 	{PM_GET_COMPONENT_NAME, "PM_GET_COMPONENT_NAME", 1, PM_REQ,
394 	    INWHO | INDATAOUT, DIP, NODEP},
395 	{PM_GET_NUM_POWER_LEVELS, "PM_GET_NUM_POWER_LEVELS", 1, PM_REQ, INWHO,
396 	    DIP, NODEP},
397 	{PM_GET_STATE_CHANGE, "PM_GET_STATE_CHANGE", 1, PM_PSC},
398 	{PM_GET_STATE_CHANGE_WAIT, "PM_GET_STATE_CHANGE_WAIT", 1, PM_PSC},
399 	{PM_DIRECT_PM, "PM_DIRECT_PM", 1, PM_REQ, INWHO, DIP, NODEP,
400 	    (SU | SG)},
401 	{PM_RELEASE_DIRECT_PM, "PM_RELEASE_DIRECT_PM", 1, PM_REQ, INWHO,
402 	    DIP, NODEP},
403 	{PM_DIRECT_NOTIFY, "PM_DIRECT_NOTIFY", 1, PM_PSC},
404 	{PM_DIRECT_NOTIFY_WAIT, "PM_DIRECT_NOTIFY_WAIT", 1, PM_PSC},
405 	{PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
406 	    INWHO, DIP, NODEP, SU},
407 	{PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
408 	{PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
409 	    DIP, NODEP},
410 	{PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
411 	    INWHO | INDATAINT, NODIP, NODEP, SU},
412 	{PM_GET_COMPONENT_THRESHOLDS, "PM_GET_COMPONENT_THRESHOLDS", 1, PM_REQ,
413 	    INWHO | INDATAOUT, DIP, NODEP},
414 	{PM_IDLE_DOWN, "PM_IDLE_DOWN", 1, NOSTRUCT, 0, 0, 0, SU},
415 	{PM_GET_DEVICE_THRESHOLD_BASIS, "PM_GET_DEVICE_THRESHOLD_BASIS", 1,
416 	    PM_REQ, INWHO, DIP, NODEP},
417 	{PM_SET_CURRENT_POWER, "PM_SET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
418 	    NODEP},
419 	{PM_GET_CURRENT_POWER, "PM_GET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
420 	    NODEP},
421 	{PM_GET_FULL_POWER, "PM_GET_FULL_POWER", 1, PM_REQ, INWHO, DIP,
422 	    NODEP},
423 	{PM_ADD_DEPENDENT, "PM_ADD_DEPENDENT", 1, PM_REQ, INWHO | INDATASTRING,
424 	    DIP, DEP, SU},
425 	{PM_GET_TIME_IDLE, "PM_GET_TIME_IDLE", 1, PM_REQ, INWHO, DIP, NODEP},
426 	{PM_ADD_DEPENDENT_PROPERTY, "PM_ADD_DEPENDENT_PROPERTY", 1, PM_REQ,
427 	    INWHO | INDATASTRING, NODIP, DEP, SU},
428 	{PM_START_CPUPM, "PM_START_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
429 	{PM_STOP_CPUPM, "PM_STOP_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
430 	{PM_GET_CPU_THRESHOLD, "PM_GET_CPU_THRESHOLD", 1, NOSTRUCT},
431 	{PM_SET_CPU_THRESHOLD, "PM_SET_CPU_THRESHOLD", 1, NOSTRUCT,
432 	    0, 0, 0, SU},
433 	{PM_GET_CPUPM_STATE, "PM_GET_CPUPM_STATE", 1, NOSTRUCT},
434 	{0, NULL}
435 };
436 
437 struct pm_cmd_info *
438 pc_info(int cmd)
439 {
440 	struct pm_cmd_info *pcip;
441 
442 	for (pcip = pmci; pcip->name; pcip++) {
443 		if (cmd == pcip->cmd)
444 			return (pcip);
445 	}
446 	return (NULL);
447 }
448 
449 static char *
450 pm_decode_cmd(int cmd)
451 {
452 	static char invbuf[64];
453 	struct pm_cmd_info *pcip = pc_info(cmd);
454 	if (pcip != NULL)
455 		return (pcip->name);
456 	(void) sprintf(invbuf, "ioctl: invalid command %d\n", cmd);
457 	return (invbuf);
458 }
459 
460 /*
461  * Allocate scan resource, create taskq, then dispatch scan,
462  * called only if autopm is enabled.
463  */
464 int
465 pm_start_pm_walk(dev_info_t *dip, void *arg)
466 {
467 	int cmd = *((int *)arg);
468 	char *cmdstr = pm_decode_cmd(cmd);
469 
470 	if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip))
471 		return (DDI_WALK_CONTINUE);
472 
473 	switch (cmd) {
474 	case PM_START_CPUPM:
475 		if (!PM_ISCPU(dip))
476 			return (DDI_WALK_CONTINUE);
477 		mutex_enter(&pm_scan_lock);
478 		if (!PM_CPUPM_DISABLED)
479 			pm_scan_init(dip);
480 		mutex_exit(&pm_scan_lock);
481 		break;
482 	case PM_START_PM:
483 		mutex_enter(&pm_scan_lock);
484 		if (PM_ISCPU(dip) && PM_CPUPM_DISABLED) {
485 			mutex_exit(&pm_scan_lock);
486 			return (DDI_WALK_CONTINUE);
487 		}
488 		if (autopm_enabled)
489 			pm_scan_init(dip);
490 		mutex_exit(&pm_scan_lock);
491 		break;
492 	}
493 
494 	/*
495 	 * Start doing pm on device: ensure pm_scan data structure initiated,
496 	 * no need to guarantee a successful scan run.
497 	 */
498 	PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: scan %s@%s(%s#%d)\n", cmdstr,
499 	    PM_DEVICE(dip)))
500 	pm_rescan(dip);
501 
502 	return (DDI_WALK_CONTINUE);
503 }
504 
505 /*
506  * Bring devices to full power level, then stop scan
507  */
508 int
509 pm_stop_pm_walk(dev_info_t *dip, void *arg)
510 {
511 	pm_info_t *info = PM_GET_PM_INFO(dip);
512 	int cmd = *((int *)arg);
513 	char *cmdstr = pm_decode_cmd(cmd);
514 
515 	if (!info)
516 		return (DDI_WALK_CONTINUE);
517 
518 	switch (cmd) {
519 	case PM_STOP_PM:
520 		/*
521 		 * If CPU devices are being managed independently, then don't
522 		 * stop them as part of PM_STOP_PM. Only stop them as part of
523 		 * PM_STOP_CPUPM and PM_RESET_PM.
524 		 */
525 		if (PM_ISCPU(dip) && PM_CPUPM_ENABLED)
526 			return (DDI_WALK_CONTINUE);
527 		break;
528 	case PM_STOP_CPUPM:
529 		/*
530 		 * If stopping CPU devices and this device is not marked
531 		 * as a CPU device, then skip.
532 		 */
533 		if (!PM_ISCPU(dip))
534 			return (DDI_WALK_CONTINUE);
535 		break;
536 	}
537 
538 	/*
539 	 * Stop the current scan, and then bring it back to normal power.
540 	 */
541 	if (!PM_ISBC(dip)) {
542 		PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: stop scan for "
543 		    "%s@%s(%s#%d)\n", cmdstr, PM_DEVICE(dip)))
544 		pm_scan_stop(dip);
545 	}
546 
547 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip) &&
548 	    !pm_all_at_normal(dip)) {
549 		PM_LOCK_DIP(dip);
550 		if (info->pmi_dev_pm_state & PM_DETACHING) {
551 			PMD(PMD_ALLNORM, ("ioctl: %s: deferring "
552 			    "all_to_normal because %s@%s(%s#%d) is detaching\n",
553 			    cmdstr, PM_DEVICE(dip)))
554 			info->pmi_dev_pm_state |= PM_ALLNORM_DEFERRED;
555 			PM_UNLOCK_DIP(dip);
556 			return (DDI_WALK_CONTINUE);
557 		}
558 		PM_UNLOCK_DIP(dip);
559 		if (pm_all_to_normal(dip, PM_CANBLOCK_FAIL) != DDI_SUCCESS) {
560 			PMD(PMD_ERROR, ("ioctl: %s: could not bring %s@%s"
561 			    "(%s#%d) to normal\n", cmdstr, PM_DEVICE(dip)))
562 		}
563 	}
564 
565 	return (DDI_WALK_CONTINUE);
566 }
567 
568 static int
569 pm_start_idledown(dev_info_t *dip, void *arg)
570 {
571 	int		flag = (int)(intptr_t)arg;
572 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
573 
574 	if (!scanp)
575 		return (DDI_WALK_CONTINUE);
576 
577 	PM_LOCK_DIP(dip);
578 	scanp->ps_idle_down |= flag;
579 	PM_UNLOCK_DIP(dip);
580 	pm_rescan(dip);
581 
582 	return (DDI_WALK_CONTINUE);
583 }
584 
585 /*ARGSUSED*/
586 static int
587 pm_end_idledown(dev_info_t *dip, void *ignore)
588 {
589 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
590 
591 	if (!scanp)
592 		return (DDI_WALK_CONTINUE);
593 
594 	PM_LOCK_DIP(dip);
595 	/*
596 	 * The PMID_TIMERS bits are place holder till idledown expires.
597 	 * The bits are also the base for regenerating PMID_SCANS bits.
598 	 * While it's up to scan thread to clear up the PMID_SCANS bits
599 	 * after each scan run, PMID_TIMERS ensure aggressive scan down
600 	 * performance throughout the idledown period.
601 	 */
602 	scanp->ps_idle_down &= ~PMID_TIMERS;
603 	PM_UNLOCK_DIP(dip);
604 
605 	return (DDI_WALK_CONTINUE);
606 }
607 
608 /*ARGSUSED*/
609 static void
610 pm_end_idledown_walk(void *ignore)
611 {
612 	PMD(PMD_IDLEDOWN, ("ioctl: end_idledown: idledown_id(%lx) timer is "
613 	    "off\n", (ulong_t)pmstp->pm_idledown_id));
614 
615 	mutex_enter(&pm_scan_lock);
616 	pmstp->pm_idledown_id = 0;
617 	mutex_exit(&pm_scan_lock);
618 
619 	ddi_walk_devs(ddi_root_node(), pm_end_idledown, NULL);
620 }
621 
622 /*
623  * pm_timeout_idledown - keep idledown effect for 10 seconds.
624  *
625  * Return 0 if another competing caller scheduled idledown timeout,
626  * otherwise, return idledown timeout_id.
627  */
628 static timeout_id_t
629 pm_timeout_idledown(void)
630 {
631 	timeout_id_t	to_id;
632 
633 	/*
634 	 * Keep idle-down in effect for either 10 seconds
635 	 * or length of a scan interval, which ever is greater.
636 	 */
637 	mutex_enter(&pm_scan_lock);
638 	if (pmstp->pm_idledown_id != 0) {
639 		to_id = pmstp->pm_idledown_id;
640 		pmstp->pm_idledown_id = 0;
641 		mutex_exit(&pm_scan_lock);
642 		(void) untimeout(to_id);
643 		mutex_enter(&pm_scan_lock);
644 		if (pmstp->pm_idledown_id != 0) {
645 			PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: "
646 			    "another caller got it, idledown_id(%lx)!\n",
647 			    (ulong_t)pmstp->pm_idledown_id))
648 			mutex_exit(&pm_scan_lock);
649 			return (0);
650 		}
651 	}
652 	pmstp->pm_idledown_id = timeout(pm_end_idledown_walk, NULL,
653 	    PM_IDLEDOWN_TIME * hz);
654 	PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: idledown_id(%lx)\n",
655 	    (ulong_t)pmstp->pm_idledown_id))
656 	mutex_exit(&pm_scan_lock);
657 
658 	return (pmstp->pm_idledown_id);
659 }
660 
661 static int
662 pm_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
663 	struct pollhead **phpp)
664 {
665 	extern struct pollhead pm_pollhead;	/* common/os/sunpm.c */
666 	int	clone;
667 
668 	clone = PM_MINOR_TO_CLONE(getminor(dev));
669 	PMD(PMD_IOCTL, ("ioctl: pm_chpoll: clone %d\n", clone))
670 	if ((events & (POLLIN | POLLRDNORM)) && pm_poll_cnt[clone]) {
671 		*reventsp |= (POLLIN | POLLRDNORM);
672 		PMD(PMD_IOCTL, ("ioctl: pm_chpoll: reventsp set\n"))
673 	} else {
674 		*reventsp = 0;
675 		if (!anyyet) {
676 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: not anyyet\n"))
677 			*phpp = &pm_pollhead;
678 		}
679 #ifdef DEBUG
680 		else {
681 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: anyyet\n"))
682 		}
683 #endif
684 	}
685 	return (0);
686 }
687 
688 /*
689  * called by pm_dicard_entries to free up the memory. It also decrements
690  * pm_poll_cnt, if direct is non zero.
691  */
692 static void
693 pm_free_entries(psce_t *pscep, int clone, int direct)
694 {
695 	pm_state_change_t	*p;
696 
697 	if (pscep) {
698 		p = pscep->psce_out;
699 		while (p->size) {
700 			if (direct) {
701 				PMD(PMD_IOCTL, ("ioctl: discard: "
702 				    "pm_poll_cnt[%d] is %d before "
703 				    "ASSERT\n", clone,
704 				    pm_poll_cnt[clone]))
705 				ASSERT(pm_poll_cnt[clone]);
706 				pm_poll_cnt[clone]--;
707 			}
708 			kmem_free(p->physpath, p->size);
709 			p->size = 0;
710 			if (p == pscep->psce_last)
711 				p = pscep->psce_first;
712 			else
713 				p++;
714 		}
715 		pscep->psce_out = pscep->psce_first;
716 		pscep->psce_in = pscep->psce_first;
717 		mutex_exit(&pscep->psce_lock);
718 	}
719 }
720 
721 /*
722  * Discard entries for this clone. Calls pm_free_entries to free up memory.
723  */
724 static void
725 pm_discard_entries(int clone)
726 {
727 	psce_t	*pscep;
728 	psce_t			*pm_psc_clone_to_direct(int);
729 	psce_t			*pm_psc_clone_to_interest(int);
730 	int			direct = 0;
731 
732 	mutex_enter(&pm_clone_lock);
733 	if ((pscep = pm_psc_clone_to_direct(clone)) != NULL)
734 		direct = 1;
735 	pm_free_entries(pscep, clone, direct);
736 	pscep = pm_psc_clone_to_interest(clone);
737 	pm_free_entries(pscep, clone, 0);
738 	mutex_exit(&pm_clone_lock);
739 }
740 
741 
742 static void
743 pm_set_idle_threshold(dev_info_t *dip, int thresh, int flag)
744 {
745 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip)) {
746 		switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
747 		case PMC_DEF_THRESH:
748 		case PMC_CPU_THRESH:
749 			PMD(PMD_IOCTL, ("ioctl: set_idle_threshold: set "
750 			    "%s@%s(%s#%d) default thresh to 0t%d\n",
751 			    PM_DEVICE(dip), thresh))
752 			pm_set_device_threshold(dip, thresh, flag);
753 			break;
754 		default:
755 			break;
756 		}
757 	}
758 }
759 
760 static int
761 pm_set_idle_thresh_walk(dev_info_t *dip, void *arg)
762 {
763 	int cmd = *((int *)arg);
764 
765 	if (!PM_GET_PM_INFO(dip))
766 		return (DDI_WALK_CONTINUE);
767 
768 	switch (cmd) {
769 	case PM_SET_SYSTEM_THRESHOLD:
770 		if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
771 			break;
772 		pm_set_idle_threshold(dip, pm_system_idle_threshold,
773 		    PMC_DEF_THRESH);
774 		pm_rescan(dip);
775 		break;
776 	case PM_SET_CPU_THRESHOLD:
777 		if (!PM_ISCPU(dip))
778 			break;
779 		pm_set_idle_threshold(dip, pm_cpu_idle_threshold,
780 		    PMC_CPU_THRESH);
781 		pm_rescan(dip);
782 		break;
783 	}
784 
785 	return (DDI_WALK_CONTINUE);
786 }
787 
788 /*ARGSUSED*/
789 static int
790 pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
791 {
792 	dev_t	dev;
793 	int	instance;
794 
795 	switch (infocmd) {
796 	case DDI_INFO_DEVT2DEVINFO:
797 		if (pmstp->pm_instance == -1)
798 			return (DDI_FAILURE);
799 		*result = pmstp->pm_dip;
800 		return (DDI_SUCCESS);
801 
802 	case DDI_INFO_DEVT2INSTANCE:
803 		dev = (dev_t)arg;
804 		instance = getminor(dev) >> 8;
805 		*result = (void *)(uintptr_t)instance;
806 		return (DDI_SUCCESS);
807 
808 	default:
809 		return (DDI_FAILURE);
810 	}
811 }
812 
813 
814 /*ARGSUSED1*/
815 static int
816 pm_open(dev_t *devp, int flag, int otyp, cred_t *cr)
817 {
818 	int		clone;
819 
820 	if (otyp != OTYP_CHR)
821 		return (EINVAL);
822 
823 	mutex_enter(&pm_clone_lock);
824 	for (clone = 1; clone < PM_MAX_CLONE; clone++)
825 		if (!pmstp->pm_clones[clone])
826 			break;
827 
828 	if (clone == PM_MAX_CLONE) {
829 		mutex_exit(&pm_clone_lock);
830 		return (ENXIO);
831 	}
832 	pmstp->pm_cred[clone] = cr;
833 	crhold(cr);
834 
835 	*devp = makedevice(getmajor(*devp), (pmstp->pm_instance << 8) + clone);
836 	pmstp->pm_clones[clone] = 1;
837 	mutex_exit(&pm_clone_lock);
838 
839 	return (0);
840 }
841 
842 /*ARGSUSED1*/
843 static int
844 pm_close(dev_t dev, int flag, int otyp, cred_t *cr)
845 {
846 	int clone;
847 
848 	if (otyp != OTYP_CHR)
849 		return (EINVAL);
850 
851 	clone = PM_MINOR_TO_CLONE(getminor(dev));
852 	PMD(PMD_CLOSE, ("pm_close: minor %x, clone %x\n", getminor(dev),
853 	    clone))
854 
855 	/*
856 	 * Walk the entire device tree to find the corresponding
857 	 * device and operate on it.
858 	 */
859 	ddi_walk_devs(ddi_root_node(), pm_close_direct_pm_device,
860 	    (void *) &clone);
861 
862 	crfree(pmstp->pm_cred[clone]);
863 	pmstp->pm_cred[clone] = 0;
864 	pmstp->pm_clones[clone] = 0;
865 	pm_discard_entries(clone);
866 	ASSERT(pm_poll_cnt[clone] == 0);
867 	pm_deregister_watcher(clone, NULL);
868 	return (0);
869 }
870 
871 /*ARGSUSED*/
872 static int
873 pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
874 {
875 	struct pm_cmd_info *pc_info(int);
876 	struct pm_cmd_info *pcip = pc_info(cmd);
877 	pm_req_t	req;
878 	dev_info_t	*dip = NULL;
879 	pm_info_t	*info = NULL;
880 	int		clone;
881 	char		*cmdstr = pm_decode_cmd(cmd);
882 	/*
883 	 * To keep devinfo nodes from going away while we're holding a
884 	 * pointer to their dip, pm_name_to_dip() optionally holds
885 	 * the devinfo node.  If we've done that, we set dipheld
886 	 * so we know at the end of the ioctl processing to release the
887 	 * node again.
888 	 */
889 	int		dipheld = 0;
890 	int		icount = 0;
891 	int		i;
892 	int		comps;
893 	size_t		lencopied;
894 	int		ret = ENOTTY;
895 	int		curpower;
896 	char		who[MAXNAMELEN];
897 	size_t		wholen;			/* copyinstr length */
898 	size_t		deplen = MAXNAMELEN;
899 	char		*dep, i_dep_buf[MAXNAMELEN];
900 	char		*pathbuf;
901 	struct pm_component *cp;
902 #ifdef	_MULTI_DATAMODEL
903 	pm_state_change32_t		*pscp32;
904 	pm_state_change32_t		psc32;
905 	size_t				copysize32;
906 #endif
907 	pm_state_change_t		*pscp;
908 	pm_state_change_t		psc;
909 	size_t		copysize;
910 	extern void	pm_record_thresh(pm_thresh_rec_t *);
911 	psce_t		*pm_psc_clone_to_direct(int);
912 	psce_t		*pm_psc_clone_to_interest(int);
913 	extern	void	pm_register_watcher(int, dev_info_t *);
914 	extern	int	pm_get_current_power(dev_info_t *, int, int *);
915 	extern	int	pm_interest_registered(int);
916 	extern	void	pm_all_to_default_thresholds(void);
917 	extern	int	pm_current_threshold(dev_info_t *, int, int *);
918 	extern void	pm_deregister_watcher(int, dev_info_t *);
919 	extern void	pm_unrecord_threshold(char *);
920 
921 	PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
922 
923 #ifdef DEBUG
924 	if (cmd == 666) {
925 		ddi_walk_devs(ddi_root_node(), print_info, NULL);
926 		return (0);
927 	}
928 	ret = 0x0badcafe;			/* sanity checking */
929 	pm_cmd = cmd;				/* for ASSERT debugging */
930 	pm_cmd_string = cmdstr;	/* for ASSERT debugging */
931 #endif
932 
933 
934 	if (pcip == NULL) {
935 		PMD(PMD_ERROR, ("ioctl: unknown command %d\n", cmd))
936 		return (ENOTTY);
937 	}
938 	if (pcip == NULL || pcip->supported == 0) {
939 		PMD(PMD_ERROR, ("ioctl: command %s no longer supported\n",
940 		    pcip->name))
941 		return (ENOTTY);
942 	}
943 
944 	wholen = 0;
945 	dep = i_dep_buf;
946 	i_dep_buf[0] = 0;
947 	clone = PM_MINOR_TO_CLONE(getminor(dev));
948 	if (!pm_perms(pcip->permission, pmstp->pm_cred[clone])) {
949 		ret = EPERM;
950 		return (ret);
951 	}
952 	switch (pcip->str_type) {
953 	case PM_REQ:
954 #ifdef	_MULTI_DATAMODEL
955 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
956 			pm_req32_t	req32;
957 
958 			if (ddi_copyin((caddr_t)arg, &req32,
959 			    sizeof (req32), mode) != 0) {
960 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
961 				    "EFAULT\n\n", cmdstr))
962 				ret = EFAULT;
963 				break;
964 			}
965 			req.component = req32.component;
966 			req.value = req32.value;
967 			req.datasize = req32.datasize;
968 			if (pcip->inargs & INWHO) {
969 				ret = copyinstr((char *)(uintptr_t)
970 				    req32.physpath, who, MAXNAMELEN, &wholen);
971 				if (ret) {
972 					PMD(PMD_ERROR, ("ioctl: %s: "
973 					    "copyinstr fails returning %d\n",
974 					    cmdstr, ret))
975 					break;
976 				}
977 				req.physpath = who;
978 			}
979 			PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
980 			    req.physpath))
981 			if (pcip->inargs & INDATA) {
982 				req.data = (void *)(uintptr_t)req32.data;
983 				req.datasize = req32.datasize;
984 			} else {
985 				req.data = NULL;
986 				req.datasize = 0;
987 			}
988 			switch (pcip->diptype) {
989 			case DIP:
990 				if (!(dip =
991 				    pm_name_to_dip(req.physpath, 1))) {
992 					PMD(PMD_ERROR, ("ioctl: %s: "
993 					    "pm_name_to_dip for %s failed\n",
994 					    cmdstr, req.physpath))
995 					return (ENODEV);
996 				}
997 				ASSERT(!dipheld);
998 				dipheld++;
999 				break;
1000 			case NODIP:
1001 				break;
1002 			default:
1003 				/*
1004 				 * Internal error, invalid ioctl description
1005 				 * force debug entry even if pm_debug not set
1006 				 */
1007 #ifdef	DEBUG
1008 				pm_log("invalid diptype %d for cmd %d (%s)\n",
1009 				    pcip->diptype, cmd, pcip->name);
1010 #endif
1011 				ASSERT(0);
1012 				return (EIO);
1013 			}
1014 			if (pcip->inargs & INDATAINT) {
1015 				int32_t int32buf;
1016 				int32_t *i32p;
1017 				int *ip;
1018 				icount = req32.datasize / sizeof (int32_t);
1019 				if (icount <= 0) {
1020 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1021 					    " 0 or neg EFAULT\n\n", cmdstr))
1022 					ret = EFAULT;
1023 					break;
1024 				}
1025 				ASSERT(!(pcip->inargs & INDATASTRING));
1026 				req.datasize = icount * sizeof (int);
1027 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1028 				ip = req.data;
1029 				ret = 0;
1030 				for (i = 0,
1031 				    i32p = (int32_t *)(uintptr_t)req32.data;
1032 				    i < icount; i++, i32p++) {
1033 					if (ddi_copyin((void *)i32p, &int32buf,
1034 					    sizeof (int32_t), mode)) {
1035 						kmem_free(req.data,
1036 						    req.datasize);
1037 						PMD(PMD_ERROR, ("ioctl: %s: "
1038 						    "entry %d EFAULT\n",
1039 						    cmdstr, i))
1040 						ret = EFAULT;
1041 						break;
1042 					}
1043 					*ip++ = (int)int32buf;
1044 				}
1045 				if (ret)
1046 					break;
1047 			}
1048 			if (pcip->inargs & INDATASTRING) {
1049 				ASSERT(!(pcip->inargs & INDATAINT));
1050 				ASSERT(pcip->deptype == DEP);
1051 				if (req32.data != NULL) {
1052 					size_t dummy;
1053 					if (copyinstr((void *)(uintptr_t)
1054 					    req32.data, dep, deplen, &dummy)) {
1055 						PMD(PMD_ERROR, ("ioctl: %s: "
1056 						    "0x%p dep size %lx, EFAULT"
1057 						    "\n", cmdstr,
1058 						    (void *)req.data, deplen))
1059 						ret = EFAULT;
1060 						break;
1061 					}
1062 #ifdef DEBUG
1063 					else {
1064 						PMD(PMD_DEP, ("ioctl: %s: "
1065 						    "dep %s\n", cmdstr, dep))
1066 					}
1067 #endif
1068 				} else {
1069 					PMD(PMD_ERROR, ("ioctl: %s: no "
1070 					    "dependent\n", cmdstr))
1071 					ret = EINVAL;
1072 					break;
1073 				}
1074 			}
1075 		} else
1076 #endif /* _MULTI_DATAMODEL */
1077 		{
1078 			if (ddi_copyin((caddr_t)arg,
1079 			    &req, sizeof (req), mode) != 0) {
1080 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1081 				    "EFAULT\n\n", cmdstr))
1082 				ret = EFAULT;
1083 				break;
1084 			}
1085 			if (pcip->inargs & INWHO) {
1086 				ret = copyinstr((char *)req.physpath, who,
1087 				    MAXNAMELEN, &wholen);
1088 				if (ret) {
1089 					PMD(PMD_ERROR, ("ioctl: %s copyinstr"
1090 					    " fails returning %d\n", cmdstr,
1091 					    ret))
1092 					break;
1093 				}
1094 				req.physpath = who;
1095 			}
1096 			PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n", cmdstr,
1097 			    req.physpath))
1098 			if (!(pcip->inargs & INDATA)) {
1099 				req.data = NULL;
1100 				req.datasize = 0;
1101 			}
1102 			switch (pcip->diptype) {
1103 			case DIP:
1104 				if (!(dip =
1105 				    pm_name_to_dip(req.physpath, 1))) {
1106 					PMD(PMD_ERROR, ("ioctl: %s: "
1107 					    "pm_name_to_dip for %s failed\n",
1108 					    cmdstr, req.physpath))
1109 					return (ENODEV);
1110 				}
1111 				ASSERT(!dipheld);
1112 				dipheld++;
1113 				break;
1114 			case NODIP:
1115 				break;
1116 			default:
1117 				/*
1118 				 * Internal error, invalid ioctl description
1119 				 * force debug entry even if pm_debug not set
1120 				 */
1121 #ifdef	DEBUG
1122 				pm_log("invalid diptype %d for cmd %d (%s)\n",
1123 				    pcip->diptype, cmd, pcip->name);
1124 #endif
1125 				ASSERT(0);
1126 				return (EIO);
1127 			}
1128 			if (pcip->inargs & INDATAINT) {
1129 				int *ip;
1130 
1131 				ASSERT(!(pcip->inargs & INDATASTRING));
1132 				ip = req.data;
1133 				icount = req.datasize / sizeof (int);
1134 				if (icount <= 0) {
1135 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1136 					    " 0 or neg EFAULT\n\n", cmdstr))
1137 					ret = EFAULT;
1138 					break;
1139 				}
1140 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1141 				if (ddi_copyin((caddr_t)ip, req.data,
1142 				    req.datasize, mode) != 0) {
1143 					PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1144 					    "EFAULT\n\n", cmdstr))
1145 					ret = EFAULT;
1146 					break;
1147 				}
1148 			}
1149 			if (pcip->inargs & INDATASTRING) {
1150 				ASSERT(!(pcip->inargs & INDATAINT));
1151 				ASSERT(pcip->deptype == DEP);
1152 				if (req.data != NULL) {
1153 					size_t dummy;
1154 					if (copyinstr((caddr_t)req.data,
1155 					    dep, deplen, &dummy)) {
1156 						PMD(PMD_ERROR, ("ioctl: %s: "
1157 						    "0x%p dep size %lu, "
1158 						    "EFAULT\n", cmdstr,
1159 						    (void *)req.data, deplen))
1160 						ret = EFAULT;
1161 						break;
1162 					}
1163 #ifdef DEBUG
1164 					else {
1165 						PMD(PMD_DEP, ("ioctl: %s: "
1166 						    "dep %s\n", cmdstr, dep))
1167 					}
1168 #endif
1169 				} else {
1170 					PMD(PMD_ERROR, ("ioctl: %s: no "
1171 					    "dependent\n", cmdstr))
1172 					ret = EINVAL;
1173 					break;
1174 				}
1175 			}
1176 		}
1177 		/*
1178 		 * Now we've got all the args in for the commands that
1179 		 * use the new pm_req struct.
1180 		 */
1181 		switch (cmd) {
1182 		case PM_REPARSE_PM_PROPS:
1183 		{
1184 			struct dev_ops	*drv;
1185 			struct cb_ops	*cb;
1186 			void		*propval;
1187 			int length;
1188 			/*
1189 			 * This ioctl is provided only for the ddivs pm test.
1190 			 * We only do it to a driver which explicitly allows
1191 			 * us to do so by exporting a pm-reparse-ok property.
1192 			 * We only care whether the property exists or not.
1193 			 */
1194 			if ((drv = ddi_get_driver(dip)) == NULL) {
1195 				ret = EINVAL;
1196 				break;
1197 			}
1198 			if ((cb = drv->devo_cb_ops) != NULL) {
1199 				if ((*cb->cb_prop_op)(DDI_DEV_T_ANY, dip,
1200 				    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1201 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1202 				    "pm-reparse-ok", (caddr_t)&propval,
1203 				    &length) != DDI_SUCCESS) {
1204 					ret = EINVAL;
1205 					break;
1206 				}
1207 			} else if (ddi_prop_op(DDI_DEV_T_ANY, dip,
1208 			    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1209 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1210 			    "pm-reparse-ok", (caddr_t)&propval,
1211 			    &length) != DDI_SUCCESS) {
1212 				ret = EINVAL;
1213 				break;
1214 			}
1215 			kmem_free(propval, length);
1216 			ret =  e_new_pm_props(dip);
1217 			break;
1218 		}
1219 
1220 		case PM_GET_DEVICE_THRESHOLD:
1221 			PM_LOCK_DIP(dip);
1222 			if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
1223 				PM_UNLOCK_DIP(dip);
1224 				PMD(PMD_ERROR, ("ioctl: %s: ENODEV\n",
1225 				    cmdstr))
1226 				ret = ENODEV;
1227 				break;
1228 			}
1229 			*rval_p = DEVI(dip)->devi_pm_dev_thresh;
1230 			PM_UNLOCK_DIP(dip);
1231 			ret = 0;
1232 			break;
1233 
1234 		case PM_DIRECT_PM:
1235 		{
1236 			int has_dep;
1237 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1238 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1239 				    "ENODEV\n", cmdstr))
1240 				ret = ENODEV;
1241 				break;
1242 			}
1243 			/*
1244 			 * Check to see if we are there is a dependency on
1245 			 * this kept device, if so, return EBUSY.
1246 			 */
1247 			pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1248 			(void) ddi_pathname(dip, pathbuf);
1249 			pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
1250 			    NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
1251 			kmem_free(pathbuf, MAXPATHLEN);
1252 			if (has_dep) {
1253 				PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
1254 				    cmdstr))
1255 				ret = EBUSY;
1256 				break;
1257 			}
1258 			PM_LOCK_DIP(dip);
1259 			if (PM_ISDIRECT(dip) || (info->pmi_clone != 0)) {
1260 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1261 				    "%s@%s(%s#%d): EBUSY\n", cmdstr,
1262 				    PM_DEVICE(dip)))
1263 				PM_UNLOCK_DIP(dip);
1264 				ret = EBUSY;
1265 				break;
1266 			}
1267 			info->pmi_dev_pm_state |= PM_DIRECT;
1268 			info->pmi_clone = clone;
1269 			PM_UNLOCK_DIP(dip);
1270 			PMD(PMD_DPM, ("ioctl: %s: info %p, pmi_clone %d\n",
1271 			    cmdstr, (void *)info, clone))
1272 			mutex_enter(&pm_clone_lock);
1273 			pm_register_watcher(clone, dip);
1274 			mutex_exit(&pm_clone_lock);
1275 			ret = 0;
1276 			break;
1277 		}
1278 
1279 		case PM_RELEASE_DIRECT_PM:
1280 		{
1281 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1282 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1283 				    "ENODEV\n", cmdstr))
1284 				ret = ENODEV;
1285 				break;
1286 			}
1287 			PM_LOCK_DIP(dip);
1288 			if (info->pmi_clone != clone) {
1289 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1290 				    "%s@%s(%s#%d) EINVAL\n", cmdstr,
1291 				    PM_DEVICE(dip)))
1292 				ret = EINVAL;
1293 				PM_UNLOCK_DIP(dip);
1294 				break;
1295 			}
1296 			ASSERT(PM_ISDIRECT(dip));
1297 			info->pmi_dev_pm_state &= ~PM_DIRECT;
1298 			PM_UNLOCK_DIP(dip);
1299 			/* Bring ourselves up if there is a keeper. */
1300 			pathbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
1301 			(void) ddi_pathname(dip, pathbuf);
1302 			pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
1303 			    NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
1304 			kmem_free(pathbuf, MAXPATHLEN);
1305 			pm_discard_entries(clone);
1306 			pm_deregister_watcher(clone, dip);
1307 			/*
1308 			 * Now we could let the other threads that are
1309 			 * trying to do a DIRECT_PM thru
1310 			 */
1311 			PM_LOCK_DIP(dip);
1312 			info->pmi_clone = 0;
1313 			PM_UNLOCK_DIP(dip);
1314 			pm_proceed(dip, PMP_RELEASE, -1, -1);
1315 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1316 			    cmdstr))
1317 			pm_rescan(dip);
1318 			ret = 0;
1319 			break;
1320 		}
1321 
1322 		case PM_SET_CURRENT_POWER:
1323 		{
1324 			int comp = req.component;
1325 			int  value = req.value;
1326 			PMD(PMD_DPM, ("ioctl: %s: %s component %d to value "
1327 			    "%d\n", cmdstr, req.physpath, comp, value))
1328 			if (!e_pm_valid_comp(dip, comp, NULL) ||
1329 			    !e_pm_valid_power(dip, comp, value)) {
1330 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1331 				    "physpath=%s, comp=%d, level=%d, fails\n",
1332 				    cmdstr, req.physpath, comp, value))
1333 				ret = EINVAL;
1334 				break;
1335 			}
1336 
1337 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1338 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1339 				    "ENODEV\n", cmdstr))
1340 				ret = ENODEV;
1341 				break;
1342 			}
1343 			if (info->pmi_clone != clone) {
1344 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1345 				    "(not owner) %s fails; clone %d, owner %d"
1346 				    "\n", cmdstr, req.physpath, clone,
1347 				    info->pmi_clone))
1348 				ret = EINVAL;
1349 				break;
1350 			}
1351 			ASSERT(PM_ISDIRECT(dip));
1352 
1353 			if (pm_set_power(dip, comp, value, PM_LEVEL_EXACT,
1354 			    PM_CANBLOCK_BLOCK, 0, &ret) != DDI_SUCCESS) {
1355 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1356 				    "pm_set_power for %s fails, errno=%d\n",
1357 				    cmdstr, req.physpath, ret))
1358 				break;
1359 			}
1360 
1361 			pm_proceed(dip, PMP_SETPOWER, comp, value);
1362 
1363 			/*
1364 			 * Power down all idle components if console framebuffer
1365 			 * is powered off.
1366 			 */
1367 			if (PM_IS_CFB(dip) && (pm_system_idle_threshold ==
1368 			    pm_default_idle_threshold)) {
1369 				dev_info_t	*root = ddi_root_node();
1370 				if (PM_ISBC(dip)) {
1371 					if (comp == 0 && value == 0 &&
1372 					    (pm_timeout_idledown() != 0)) {
1373 						ddi_walk_devs(root,
1374 						    pm_start_idledown,
1375 						    (void *)PMID_CFB);
1376 					}
1377 				} else {
1378 					int count = 0;
1379 					for (i = 0; i < PM_NUMCMPTS(dip); i++) {
1380 						ret = pm_get_current_power(dip,
1381 						    i, &curpower);
1382 						if (ret == DDI_SUCCESS &&
1383 						    curpower == 0)
1384 							count++;
1385 					}
1386 					if ((count == PM_NUMCMPTS(dip)) &&
1387 					    (pm_timeout_idledown() != 0)) {
1388 						ddi_walk_devs(root,
1389 						    pm_start_idledown,
1390 						    (void *)PMID_CFB);
1391 					}
1392 				}
1393 			}
1394 
1395 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1396 			    cmdstr))
1397 			pm_rescan(dip);
1398 			*rval_p = 0;
1399 			ret = 0;
1400 			break;
1401 		}
1402 
1403 		case PM_GET_FULL_POWER:
1404 		{
1405 			int normal;
1406 			ASSERT(dip);
1407 			PMD(PMD_NORM, ("ioctl: %s: %s component %d\n",
1408 			    cmdstr, req.physpath, req.component))
1409 			normal =  pm_get_normal_power(dip, req.component);
1410 
1411 			if (normal == DDI_FAILURE) {
1412 				PMD(PMD_ERROR | PMD_NORM, ("ioctl: %s: "
1413 				    "returns EINVAL\n", cmdstr))
1414 				ret = EINVAL;
1415 				break;
1416 			}
1417 			*rval_p = normal;
1418 			PMD(PMD_NORM, ("ioctl: %s: returns %d\n",
1419 			    cmdstr, normal))
1420 			ret = 0;
1421 			break;
1422 		}
1423 
1424 		case PM_GET_CURRENT_POWER:
1425 			if (pm_get_current_power(dip, req.component,
1426 			    rval_p) != DDI_SUCCESS) {
1427 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
1428 				    "EINVAL\n", cmdstr))
1429 				ret = EINVAL;
1430 				break;
1431 			}
1432 			PMD(PMD_DPM, ("ioctl: %s: %s comp %d returns %d\n",
1433 			    cmdstr, req.physpath, req.component, *rval_p))
1434 			if (*rval_p == PM_LEVEL_UNKNOWN)
1435 				ret = EAGAIN;
1436 			else
1437 				ret = 0;
1438 			break;
1439 
1440 		case PM_GET_TIME_IDLE:
1441 		{
1442 			time_t timestamp;
1443 			int comp = req.component;
1444 			pm_component_t *cp;
1445 			if (!e_pm_valid_comp(dip, comp, &cp)) {
1446 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1447 				    "component %d > numcmpts - 1 %d--EINVAL\n",
1448 				    cmdstr, PM_DEVICE(dip), comp,
1449 				    PM_NUMCMPTS(dip) - 1))
1450 				ret = EINVAL;
1451 				break;
1452 			}
1453 			timestamp = cp->pmc_timestamp;
1454 			if (timestamp) {
1455 				time_t now;
1456 				(void) drv_getparm(TIME, &now);
1457 				*rval_p = (now - timestamp);
1458 			} else {
1459 				*rval_p = 0;
1460 			}
1461 			ret = 0;
1462 			break;
1463 		}
1464 
1465 		case PM_ADD_DEPENDENT:
1466 		{
1467 			dev_info_t	*kept_dip;
1468 
1469 			PMD(PMD_KEEPS, ("%s, kept %s, keeper %s\n", cmdstr,
1470 			    dep, req.physpath))
1471 
1472 			/*
1473 			 * hold and install kept while processing dependency
1474 			 * keeper (in .physpath) has already been held.
1475 			 */
1476 			if (dep[0] == '\0') {
1477 				PMD(PMD_ERROR, ("kept NULL or null\n"))
1478 				ret = EINVAL;
1479 				break;
1480 			} else if ((kept_dip =
1481 			    pm_name_to_dip(dep, 1)) == NULL) {
1482 				PMD(PMD_ERROR, ("no dip for kept %s\n", dep))
1483 				ret = ENODEV;
1484 				break;
1485 			} else if (kept_dip == dip) {
1486 				PMD(PMD_ERROR, ("keeper(%s, %p) - kept(%s, %p) "
1487 				    "self-dependency not allowed.\n",
1488 				    dep, (void *)kept_dip, req.physpath,
1489 				    (void *) dip))
1490 				PM_RELE(dip);	/* release "double" hold */
1491 				ret = EINVAL;
1492 				break;
1493 			}
1494 			ASSERT(!(strcmp(req.physpath, (char *)dep) == 0));
1495 
1496 			/*
1497 			 * record dependency, then walk through device tree
1498 			 * independently on behalf of kept and keeper to
1499 			 * establish newly created dependency.
1500 			 */
1501 			pm_dispatch_to_dep_thread(PM_DEP_WK_RECORD_KEEPER,
1502 			    req.physpath, dep, PM_DEP_WAIT, NULL, 0);
1503 
1504 			/*
1505 			 * release kept after establishing dependency, keeper
1506 			 * is released as part of ioctl exit processing.
1507 			 */
1508 			PM_RELE(kept_dip);
1509 			*rval_p = 0;
1510 			ret = 0;
1511 			break;
1512 		}
1513 
1514 		case PM_ADD_DEPENDENT_PROPERTY:
1515 		{
1516 			char *keeper, *kept;
1517 
1518 			if (dep[0] == '\0') {
1519 				PMD(PMD_ERROR, ("ioctl: %s: dep NULL or "
1520 				    "null\n", cmdstr))
1521 				ret = EINVAL;
1522 				break;
1523 			}
1524 			kept = dep;
1525 			keeper = req.physpath;
1526 			/*
1527 			 * record keeper - kept dependency, then walk through
1528 			 * device tree to find out all attached keeper, walk
1529 			 * through again to apply dependency to all the
1530 			 * potential kept.
1531 			 */
1532 			pm_dispatch_to_dep_thread(
1533 			    PM_DEP_WK_RECORD_KEEPER_PROP, keeper, kept,
1534 			    PM_DEP_WAIT, NULL, 0);
1535 
1536 			*rval_p = 0;
1537 			ret = 0;
1538 			break;
1539 		}
1540 
1541 		case PM_SET_DEVICE_THRESHOLD:
1542 		{
1543 			pm_thresh_rec_t *rp;
1544 			pm_pte_t *ep;	/* threshold header storage */
1545 			int *tp;	/* threshold storage */
1546 			size_t size;
1547 			extern int pm_thresh_specd(dev_info_t *);
1548 
1549 			/*
1550 			 * The header struct plus one entry struct plus one
1551 			 * threshold plus the length of the string
1552 			 */
1553 			size = sizeof (pm_thresh_rec_t) +
1554 			    (sizeof (pm_pte_t) * 1) +
1555 			    (1 * sizeof (int)) +
1556 			    strlen(req.physpath) + 1;
1557 
1558 			rp = kmem_zalloc(size, KM_SLEEP);
1559 			rp->ptr_size = size;
1560 			rp->ptr_numcomps = 0;	/* means device threshold */
1561 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1562 			rp->ptr_entries = ep;
1563 			tp = (int *)((intptr_t)ep +
1564 			    (1 * sizeof (pm_pte_t)));
1565 			ep->pte_numthresh = 1;
1566 			ep->pte_thresh = tp;
1567 			*tp++ = req.value;
1568 			(void) strcat((char *)tp, req.physpath);
1569 			rp->ptr_physpath = (char *)tp;
1570 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1571 			    (intptr_t)rp + rp->ptr_size);
1572 			PMD(PMD_THRESH, ("ioctl: %s: record thresh %d for "
1573 			    "%s\n", cmdstr, req.value, req.physpath))
1574 			pm_record_thresh(rp);
1575 			/*
1576 			 * Don't free rp, pm_record_thresh() keeps it.
1577 			 * We don't try to apply it ourselves because we'd need
1578 			 * to know too much about locking.  Since we don't
1579 			 * hold a lock the entry could be removed before
1580 			 * we get here
1581 			 */
1582 			ASSERT(dip == NULL);
1583 			ret = 0;		/* can't fail now */
1584 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1585 				break;
1586 			}
1587 			(void) pm_thresh_specd(dip);
1588 			PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d)\n",
1589 			    cmdstr, PM_DEVICE(dip)))
1590 			PM_RELE(dip);
1591 			break;
1592 		}
1593 
1594 		case PM_RESET_DEVICE_THRESHOLD:
1595 		{
1596 			/*
1597 			 * This only applies to a currently attached and power
1598 			 * managed node
1599 			 */
1600 			/*
1601 			 * We don't do this to old-style drivers
1602 			 */
1603 			info = PM_GET_PM_INFO(dip);
1604 			if (info == NULL) {
1605 				PMD(PMD_ERROR, ("ioctl: %s: %s not power "
1606 				    "managed\n", cmdstr, req.physpath))
1607 				ret = EINVAL;
1608 				break;
1609 			}
1610 			if (PM_ISBC(dip)) {
1611 				PMD(PMD_ERROR, ("ioctl: %s: %s is BC\n",
1612 				    cmdstr, req.physpath))
1613 				ret = EINVAL;
1614 				break;
1615 			}
1616 			pm_unrecord_threshold(req.physpath);
1617 			if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
1618 				pm_set_device_threshold(dip,
1619 				    pm_cpu_idle_threshold, PMC_CPU_THRESH);
1620 			else
1621 				pm_set_device_threshold(dip,
1622 				    pm_system_idle_threshold, PMC_DEF_THRESH);
1623 			ret = 0;
1624 			break;
1625 		}
1626 
1627 		case PM_GET_NUM_COMPONENTS:
1628 			ret = 0;
1629 			*rval_p = PM_NUMCMPTS(dip);
1630 			break;
1631 
1632 		case PM_GET_DEVICE_TYPE:
1633 			ret = 0;
1634 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1635 				PMD(PMD_ERROR, ("ioctl: %s: "
1636 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
1637 				*rval_p = PM_NO_PM_COMPONENTS;
1638 				break;
1639 			}
1640 			if (PM_ISBC(dip)) {
1641 				*rval_p = PM_CREATE_COMPONENTS;
1642 			} else {
1643 				*rval_p = PM_AUTOPM;
1644 			}
1645 			break;
1646 
1647 		case PM_SET_COMPONENT_THRESHOLDS:
1648 		{
1649 			int comps = 0;
1650 			int *end = (int *)req.data + icount;
1651 			pm_thresh_rec_t *rp;
1652 			pm_pte_t *ep;	/* threshold header storage */
1653 			int *tp;	/* threshold storage */
1654 			int *ip;
1655 			int j;
1656 			size_t size;
1657 			extern int pm_thresh_specd(dev_info_t *);
1658 			extern int pm_valid_thresh(dev_info_t *,
1659 			    pm_thresh_rec_t *);
1660 
1661 			for (ip = req.data; *ip; ip++) {
1662 				if (ip >= end) {
1663 					ret = EFAULT;
1664 					break;
1665 				}
1666 				comps++;
1667 				/* skip over indicated number of entries */
1668 				for (j = *ip; j; j--) {
1669 					if (++ip >= end) {
1670 						ret = EFAULT;
1671 						break;
1672 					}
1673 				}
1674 				if (ret)
1675 					break;
1676 			}
1677 			if (ret)
1678 				break;
1679 			if ((intptr_t)ip != (intptr_t)end - sizeof (int)) {
1680 				/* did not exactly fill buffer */
1681 				ret = EINVAL;
1682 				break;
1683 			}
1684 			if (comps == 0) {
1685 				PMD(PMD_ERROR, ("ioctl: %s: %s 0 components"
1686 				    "--EINVAL\n", cmdstr, req.physpath))
1687 				ret = EINVAL;
1688 				break;
1689 			}
1690 			/*
1691 			 * The header struct plus one entry struct per component
1692 			 * plus the size of the lists minus the counts
1693 			 * plus the length of the string
1694 			 */
1695 			size = sizeof (pm_thresh_rec_t) +
1696 			    (sizeof (pm_pte_t) * comps) + req.datasize -
1697 			    ((comps + 1) * sizeof (int)) +
1698 			    strlen(req.physpath) + 1;
1699 
1700 			rp = kmem_zalloc(size, KM_SLEEP);
1701 			rp->ptr_size = size;
1702 			rp->ptr_numcomps = comps;
1703 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1704 			rp->ptr_entries = ep;
1705 			tp = (int *)((intptr_t)ep +
1706 			    (comps * sizeof (pm_pte_t)));
1707 			for (ip = req.data; *ip; ep++) {
1708 				ep->pte_numthresh = *ip;
1709 				ep->pte_thresh = tp;
1710 				for (j = *ip++; j; j--) {
1711 					*tp++ = *ip++;
1712 				}
1713 			}
1714 			(void) strcat((char *)tp, req.physpath);
1715 			rp->ptr_physpath = (char *)tp;
1716 			ASSERT((intptr_t)end == (intptr_t)ip + sizeof (int));
1717 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1718 			    (intptr_t)rp + rp->ptr_size);
1719 
1720 			ASSERT(dip == NULL);
1721 			/*
1722 			 * If this is not a currently power managed node,
1723 			 * then we can't check for validity of the thresholds
1724 			 */
1725 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1726 				/* don't free rp, pm_record_thresh uses it */
1727 				pm_record_thresh(rp);
1728 				PMD(PMD_ERROR, ("ioctl: %s: pm_name_to_dip "
1729 				    "for %s failed\n", cmdstr, req.physpath))
1730 				ret = 0;
1731 				break;
1732 			}
1733 			ASSERT(!dipheld);
1734 			dipheld++;
1735 
1736 			if (!pm_valid_thresh(dip, rp)) {
1737 				PMD(PMD_ERROR, ("ioctl: %s: invalid thresh "
1738 				    "for %s@%s(%s#%d)\n", cmdstr,
1739 				    PM_DEVICE(dip)))
1740 				kmem_free(rp, size);
1741 				ret = EINVAL;
1742 				break;
1743 			}
1744 			/*
1745 			 * We don't just apply it ourselves because we'd need
1746 			 * to know too much about locking.  Since we don't
1747 			 * hold a lock the entry could be removed before
1748 			 * we get here
1749 			 */
1750 			pm_record_thresh(rp);
1751 			(void) pm_thresh_specd(dip);
1752 			ret = 0;
1753 			break;
1754 		}
1755 
1756 		case PM_GET_COMPONENT_THRESHOLDS:
1757 		{
1758 			int musthave;
1759 			int numthresholds = 0;
1760 			int wordsize;
1761 			int numcomps;
1762 			caddr_t uaddr = req.data;	/* user address */
1763 			int val;	/* int value to be copied out */
1764 			int32_t val32;	/* int32 value to be copied out */
1765 			caddr_t vaddr;	/* address to copyout from */
1766 			int j;
1767 
1768 #ifdef	_MULTI_DATAMODEL
1769 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1770 				wordsize = sizeof (int32_t);
1771 			} else
1772 #endif /* _MULTI_DATAMODEL */
1773 			{
1774 				wordsize = sizeof (int);
1775 			}
1776 
1777 			ASSERT(dip);
1778 
1779 			numcomps = PM_NUMCMPTS(dip);
1780 			for (i = 0; i < numcomps; i++) {
1781 				cp = PM_CP(dip, i);
1782 				numthresholds += cp->pmc_comp.pmc_numlevels - 1;
1783 			}
1784 			musthave = (numthresholds + numcomps + 1) *  wordsize;
1785 			if (req.datasize < musthave) {
1786 				PMD(PMD_ERROR, ("ioctl: %s: size %ld, need "
1787 				    "%d--EINVAL\n", cmdstr, req.datasize,
1788 				    musthave))
1789 				ret = EINVAL;
1790 				break;
1791 			}
1792 			PM_LOCK_DIP(dip);
1793 			for (i = 0; i < numcomps; i++) {
1794 				int *thp;
1795 				cp = PM_CP(dip, i);
1796 				thp = cp->pmc_comp.pmc_thresh;
1797 				/* first copyout the count */
1798 				if (wordsize == sizeof (int32_t)) {
1799 					val32 = cp->pmc_comp.pmc_numlevels - 1;
1800 					vaddr = (caddr_t)&val32;
1801 				} else {
1802 					val = cp->pmc_comp.pmc_numlevels - 1;
1803 					vaddr = (caddr_t)&val;
1804 				}
1805 				if (ddi_copyout(vaddr, (void *)uaddr,
1806 				    wordsize, mode) != 0) {
1807 					PM_UNLOCK_DIP(dip);
1808 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1809 					    "(%s#%d) vaddr %p EFAULT\n",
1810 					    cmdstr, PM_DEVICE(dip),
1811 					    (void*)vaddr))
1812 					ret = EFAULT;
1813 					break;
1814 				}
1815 				vaddr = uaddr;
1816 				vaddr += wordsize;
1817 				uaddr = (caddr_t)vaddr;
1818 				/* then copyout each threshold value */
1819 				for (j = 0; j < cp->pmc_comp.pmc_numlevels - 1;
1820 				    j++) {
1821 					if (wordsize == sizeof (int32_t)) {
1822 						val32 = thp[j + 1];
1823 						vaddr = (caddr_t)&val32;
1824 					} else {
1825 						val = thp[i + 1];
1826 						vaddr = (caddr_t)&val;
1827 					}
1828 					if (ddi_copyout(vaddr, (void *) uaddr,
1829 					    wordsize, mode) != 0) {
1830 						PM_UNLOCK_DIP(dip);
1831 						PMD(PMD_ERROR, ("ioctl: %s: "
1832 						    "%s@%s(%s#%d) uaddr %p "
1833 						    "EFAULT\n", cmdstr,
1834 						    PM_DEVICE(dip),
1835 						    (void *)uaddr))
1836 						ret = EFAULT;
1837 						break;
1838 					}
1839 					vaddr = uaddr;
1840 					vaddr += wordsize;
1841 					uaddr = (caddr_t)vaddr;
1842 				}
1843 			}
1844 			if (ret)
1845 				break;
1846 			/* last copyout a terminating 0 count */
1847 			if (wordsize == sizeof (int32_t)) {
1848 				val32 = 0;
1849 				vaddr = (caddr_t)&val32;
1850 			} else {
1851 				ASSERT(wordsize == sizeof (int));
1852 				val = 0;
1853 				vaddr = (caddr_t)&val;
1854 			}
1855 			if (ddi_copyout(vaddr, uaddr, wordsize, mode) != 0) {
1856 				PM_UNLOCK_DIP(dip);
1857 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1858 				    "vaddr %p (0 count) EFAULT\n", cmdstr,
1859 				    PM_DEVICE(dip), (void *)vaddr))
1860 				ret = EFAULT;
1861 				break;
1862 			}
1863 			/* finished, so don't need to increment addresses */
1864 			PM_UNLOCK_DIP(dip);
1865 			ret = 0;
1866 			break;
1867 		}
1868 
1869 		case PM_GET_STATS:
1870 		{
1871 			time_t now;
1872 			time_t *timestamp;
1873 			extern int pm_cur_power(pm_component_t *cp);
1874 			int musthave;
1875 			int wordsize;
1876 
1877 #ifdef	_MULTI_DATAMODEL
1878 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1879 				wordsize = sizeof (int32_t);
1880 			} else
1881 #endif /* _MULTI_DATAMODEL */
1882 			{
1883 				wordsize = sizeof (int);
1884 			}
1885 
1886 			comps = PM_NUMCMPTS(dip);
1887 			if (comps == 0 || PM_GET_PM_INFO(dip) == NULL) {
1888 				PMD(PMD_ERROR, ("ioctl: %s: %s no components"
1889 				    " or not power managed--EINVAL\n", cmdstr,
1890 				    req.physpath))
1891 				ret = EINVAL;
1892 				break;
1893 			}
1894 			musthave = comps * 2 * wordsize;
1895 			if (req.datasize < musthave) {
1896 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
1897 				    "%d--EINVAL\n", cmdstr, req.datasize,
1898 				    musthave))
1899 				ret = EINVAL;
1900 				break;
1901 			}
1902 
1903 			PM_LOCK_DIP(dip);
1904 			(void) drv_getparm(TIME, &now);
1905 			timestamp = kmem_zalloc(comps * sizeof (time_t),
1906 			    KM_SLEEP);
1907 			pm_get_timestamps(dip, timestamp);
1908 			/*
1909 			 * First the current power levels
1910 			 */
1911 			for (i = 0; i < comps; i++) {
1912 				int curpwr;
1913 				int32_t curpwr32;
1914 				caddr_t cpaddr;
1915 
1916 				cp = PM_CP(dip, i);
1917 				if (wordsize == sizeof (int)) {
1918 					curpwr = pm_cur_power(cp);
1919 					cpaddr = (caddr_t)&curpwr;
1920 				} else {
1921 					ASSERT(wordsize == sizeof (int32_t));
1922 					curpwr32 = pm_cur_power(cp);
1923 					cpaddr = (caddr_t)&curpwr32;
1924 				}
1925 				if (ddi_copyout(cpaddr, (void *) req.data,
1926 				    wordsize, mode) != 0) {
1927 					PM_UNLOCK_DIP(dip);
1928 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1929 					    "(%s#%d) req.data %p EFAULT\n",
1930 					    cmdstr, PM_DEVICE(dip),
1931 					    (void *)req.data))
1932 					ASSERT(!dipheld);
1933 					return (EFAULT);
1934 				}
1935 				cpaddr = (caddr_t)req.data;
1936 				cpaddr += wordsize;
1937 				req.data = cpaddr;
1938 			}
1939 			/*
1940 			 * Then the times remaining
1941 			 */
1942 			for (i = 0; i < comps; i++) {
1943 				int retval;
1944 				int32_t retval32;
1945 				caddr_t rvaddr;
1946 				int curpwr;
1947 
1948 				cp = PM_CP(dip, i);
1949 				curpwr = cp->pmc_cur_pwr;
1950 				if (curpwr == 0 || timestamp[i] == 0) {
1951 					PMD(PMD_STATS, ("ioctl: %s: "
1952 					    "cur_pwer %x, timestamp %lx\n",
1953 					    cmdstr, curpwr, timestamp[i]))
1954 					retval = INT_MAX;
1955 				} else {
1956 					int thresh;
1957 					(void) pm_current_threshold(dip, i,
1958 					    &thresh);
1959 					retval = thresh - (now - timestamp[i]);
1960 					PMD(PMD_STATS, ("ioctl: %s: current "
1961 					    "thresh %x, now %lx, timestamp %lx,"
1962 					    " retval %x\n", cmdstr, thresh, now,
1963 					    timestamp[i], retval))
1964 				}
1965 				if (wordsize == sizeof (int)) {
1966 					rvaddr = (caddr_t)&retval;
1967 				} else {
1968 					ASSERT(wordsize == sizeof (int32_t));
1969 					retval32 = retval;
1970 					rvaddr = (caddr_t)&retval32;
1971 				}
1972 				if (ddi_copyout(rvaddr, (void *) req.data,
1973 				    wordsize, mode) != 0) {
1974 					PM_UNLOCK_DIP(dip);
1975 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1976 					    "(%s#%d) req.data %p EFAULT\n",
1977 					    cmdstr, PM_DEVICE(dip),
1978 					    (void *)req.data))
1979 					ASSERT(!dipheld);
1980 					return (EFAULT);
1981 				}
1982 				rvaddr = (caddr_t)req.data;
1983 				rvaddr += wordsize;
1984 				req.data = (int *)rvaddr;
1985 			}
1986 			PM_UNLOCK_DIP(dip);
1987 			*rval_p = comps;
1988 			ret = 0;
1989 			kmem_free(timestamp, comps * sizeof (time_t));
1990 			break;
1991 		}
1992 
1993 		case PM_GET_COMPONENT_NAME:
1994 			ASSERT(dip);
1995 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
1996 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1997 				    "component %d > numcmpts - 1 %d--EINVAL\n",
1998 				    cmdstr, PM_DEVICE(dip), req.component,
1999 				    PM_NUMCMPTS(dip) - 1))
2000 				ret = EINVAL;
2001 				break;
2002 			}
2003 			if (ret = copyoutstr(cp->pmc_comp.pmc_name,
2004 			    (char *)req.data, req.datasize, &lencopied)) {
2005 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2006 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2007 				    PM_DEVICE(dip), (void *)req.data))
2008 				break;
2009 			}
2010 			*rval_p = lencopied;
2011 			ret = 0;
2012 			break;
2013 
2014 		case PM_GET_POWER_NAME:
2015 		{
2016 			int i;
2017 
2018 			ASSERT(dip);
2019 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2020 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2021 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2022 				    cmdstr, PM_DEVICE(dip), req.component,
2023 				    PM_NUMCMPTS(dip) - 1))
2024 				ret = EINVAL;
2025 				break;
2026 			}
2027 			if ((i = req.value) < 0 ||
2028 			    i > cp->pmc_comp.pmc_numlevels - 1) {
2029 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2030 				    "value %d > num_levels - 1 %d--EINVAL\n",
2031 				    cmdstr, PM_DEVICE(dip), req.value,
2032 				    cp->pmc_comp.pmc_numlevels - 1))
2033 				ret = EINVAL;
2034 				break;
2035 			}
2036 			dep = cp->pmc_comp.pmc_lnames[req.value];
2037 			if (ret = copyoutstr(dep,
2038 			    req.data, req.datasize, &lencopied)) {
2039 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2040 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2041 				    PM_DEVICE(dip), (void *)req.data))
2042 				break;
2043 			}
2044 			*rval_p = lencopied;
2045 			ret = 0;
2046 			break;
2047 		}
2048 
2049 		case PM_GET_POWER_LEVELS:
2050 		{
2051 			int musthave;
2052 			int numlevels;
2053 			int wordsize;
2054 
2055 #ifdef	_MULTI_DATAMODEL
2056 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2057 				wordsize = sizeof (int32_t);
2058 			} else
2059 #endif /* _MULTI_DATAMODEL */
2060 			{
2061 				wordsize = sizeof (int);
2062 			}
2063 			ASSERT(dip);
2064 
2065 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2066 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2067 				    "has %d components, component %d requested"
2068 				    "--EINVAL\n", cmdstr, PM_DEVICE(dip),
2069 				    PM_NUMCMPTS(dip), req.component))
2070 				ret = EINVAL;
2071 				break;
2072 			}
2073 			numlevels = cp->pmc_comp.pmc_numlevels;
2074 			musthave = numlevels *  wordsize;
2075 			if (req.datasize < musthave) {
2076 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
2077 				    "%d--EINVAL\n", cmdstr, req.datasize,
2078 				    musthave))
2079 				ret = EINVAL;
2080 				break;
2081 			}
2082 			PM_LOCK_DIP(dip);
2083 			for (i = 0; i < numlevels; i++) {
2084 				int level;
2085 				int32_t level32;
2086 				caddr_t laddr;
2087 
2088 				if (wordsize == sizeof (int)) {
2089 					level = cp->pmc_comp.pmc_lvals[i];
2090 					laddr = (caddr_t)&level;
2091 				} else {
2092 					level32 = cp->pmc_comp.pmc_lvals[i];
2093 					laddr = (caddr_t)&level32;
2094 				}
2095 				if (ddi_copyout(laddr, (void *) req.data,
2096 				    wordsize, mode) != 0) {
2097 					PM_UNLOCK_DIP(dip);
2098 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2099 					    "(%s#%d) laddr %p EFAULT\n",
2100 					    cmdstr, PM_DEVICE(dip),
2101 					    (void *)laddr))
2102 					ASSERT(!dipheld);
2103 					return (EFAULT);
2104 				}
2105 				laddr = (caddr_t)req.data;
2106 				laddr += wordsize;
2107 				req.data = (int *)laddr;
2108 			}
2109 			PM_UNLOCK_DIP(dip);
2110 			*rval_p = numlevels;
2111 			ret = 0;
2112 			break;
2113 		}
2114 
2115 
2116 		case PM_GET_NUM_POWER_LEVELS:
2117 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2118 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2119 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2120 				    cmdstr, PM_DEVICE(dip), req.component,
2121 				    PM_NUMCMPTS(dip) - 1))
2122 				ret = EINVAL;
2123 				break;
2124 			}
2125 			*rval_p = cp->pmc_comp.pmc_numlevels;
2126 			ret = 0;
2127 			break;
2128 
2129 		case PM_GET_DEVICE_THRESHOLD_BASIS:
2130 			ret = 0;
2131 			PM_LOCK_DIP(dip);
2132 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
2133 				PM_UNLOCK_DIP(dip);
2134 				PMD(PMD_ERROR, ("ioctl: %s: "
2135 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
2136 				*rval_p = PM_NO_PM_COMPONENTS;
2137 				break;
2138 			}
2139 			if (PM_ISDIRECT(dip)) {
2140 				PM_UNLOCK_DIP(dip);
2141 				*rval_p = PM_DIRECTLY_MANAGED;
2142 				break;
2143 			}
2144 			switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
2145 			case PMC_DEF_THRESH:
2146 			case PMC_NEXDEF_THRESH:
2147 				*rval_p = PM_DEFAULT_THRESHOLD;
2148 				break;
2149 			case PMC_DEV_THRESH:
2150 				*rval_p = PM_DEVICE_THRESHOLD;
2151 				break;
2152 			case PMC_COMP_THRESH:
2153 				*rval_p = PM_COMPONENT_THRESHOLD;
2154 				break;
2155 			case PMC_CPU_THRESH:
2156 				*rval_p = PM_CPU_THRESHOLD;
2157 				break;
2158 			default:
2159 				if (PM_ISBC(dip)) {
2160 					*rval_p = PM_OLD_THRESHOLD;
2161 					break;
2162 				}
2163 				PMD(PMD_ERROR, ("ioctl: %s: default, not "
2164 				    "BC--EINVAL", cmdstr))
2165 				ret = EINVAL;
2166 				break;
2167 			}
2168 			PM_UNLOCK_DIP(dip);
2169 			break;
2170 		}
2171 		break;
2172 
2173 	case PM_PSC:
2174 		/*
2175 		 * Commands that require pm_state_change_t as arg
2176 		 */
2177 #ifdef	_MULTI_DATAMODEL
2178 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2179 			pscp32 = (pm_state_change32_t *)arg;
2180 			if (ddi_copyin((caddr_t)arg, &psc32,
2181 			    sizeof (psc32), mode) != 0) {
2182 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2183 				    "EFAULT\n\n", cmdstr))
2184 				ASSERT(!dipheld);
2185 				return (EFAULT);
2186 			}
2187 			psc.physpath = (caddr_t)(uintptr_t)psc32.physpath;
2188 			psc.size = psc32.size;
2189 		} else
2190 #endif /* _MULTI_DATAMODEL */
2191 		{
2192 			pscp = (pm_state_change_t *)arg;
2193 			if (ddi_copyin((caddr_t)arg, &psc,
2194 			    sizeof (psc), mode) != 0) {
2195 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2196 				    "EFAULT\n\n", cmdstr))
2197 				ASSERT(!dipheld);
2198 				return (EFAULT);
2199 			}
2200 		}
2201 		switch (cmd) {
2202 
2203 		case PM_GET_STATE_CHANGE:
2204 		case PM_GET_STATE_CHANGE_WAIT:
2205 		{
2206 			psce_t			*pscep;
2207 			pm_state_change_t	*p;
2208 			caddr_t			physpath;
2209 			size_t			physlen;
2210 
2211 			/*
2212 			 * We want to know if any device has changed state.
2213 			 * We look up by clone.  In case we have another thread
2214 			 * from the same process, we loop.
2215 			 * pm_psc_clone_to_interest() returns a locked entry.
2216 			 * We create an internal copy of the event entry prior
2217 			 * to copyout to user space because we don't want to
2218 			 * hold the psce_lock while doing copyout as we might
2219 			 * hit page fault  which eventually brings us back
2220 			 * here requesting the same lock.
2221 			 */
2222 			mutex_enter(&pm_clone_lock);
2223 			if (!pm_interest_registered(clone))
2224 				pm_register_watcher(clone, NULL);
2225 			while ((pscep =
2226 			    pm_psc_clone_to_interest(clone)) == NULL) {
2227 				if (cmd == PM_GET_STATE_CHANGE) {
2228 					PMD(PMD_IOCTL, ("ioctl: %s: "
2229 					    "EWOULDBLOCK\n", cmdstr))
2230 					mutex_exit(&pm_clone_lock);
2231 					ASSERT(!dipheld);
2232 					return (EWOULDBLOCK);
2233 				} else {
2234 					if (cv_wait_sig(&pm_clones_cv[clone],
2235 					    &pm_clone_lock) == 0) {
2236 						mutex_exit(&pm_clone_lock);
2237 						PMD(PMD_ERROR, ("ioctl: %s "
2238 						    "EINTR\n", cmdstr))
2239 						ASSERT(!dipheld);
2240 						return (EINTR);
2241 					}
2242 				}
2243 			}
2244 			mutex_exit(&pm_clone_lock);
2245 
2246 			physlen = pscep->psce_out->size;
2247 			physpath = NULL;
2248 			/*
2249 			 * If we were unable to store the path while bringing
2250 			 * up the console fb upon entering the prom, we give
2251 			 * a "" name with the overrun event set
2252 			 */
2253 			if (physlen == (size_t)-1) {	/* kmemalloc failed */
2254 				physpath = kmem_zalloc(1, KM_SLEEP);
2255 				physlen = 1;
2256 			}
2257 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2258 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n", cmdstr))
2259 				mutex_exit(&pscep->psce_lock);
2260 				ret = EFAULT;
2261 				break;
2262 			}
2263 			if (physpath == NULL) {
2264 				physpath = kmem_zalloc(physlen, KM_SLEEP);
2265 				bcopy((const void *) pscep->psce_out->physpath,
2266 				    (void *) physpath, physlen);
2267 			}
2268 
2269 			p = pscep->psce_out;
2270 #ifdef	_MULTI_DATAMODEL
2271 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2272 #ifdef DEBUG
2273 				size_t usrcopysize;
2274 #endif
2275 				psc32.flags = (ushort_t)p->flags;
2276 				psc32.event = (ushort_t)p->event;
2277 				psc32.timestamp = (int32_t)p->timestamp;
2278 				psc32.component = (int32_t)p->component;
2279 				psc32.old_level = (int32_t)p->old_level;
2280 				psc32.new_level = (int32_t)p->new_level;
2281 				copysize32 = ((intptr_t)&psc32.size -
2282 				    (intptr_t)&psc32.component);
2283 #ifdef DEBUG
2284 				usrcopysize = ((intptr_t)&pscp32->size -
2285 				    (intptr_t)&pscp32->component);
2286 				ASSERT(usrcopysize == copysize32);
2287 #endif
2288 			} else
2289 #endif /* _MULTI_DATAMODEL */
2290 			{
2291 				psc.flags = p->flags;
2292 				psc.event = p->event;
2293 				psc.timestamp = p->timestamp;
2294 				psc.component = p->component;
2295 				psc.old_level = p->old_level;
2296 				psc.new_level = p->new_level;
2297 				copysize = ((long)&p->size -
2298 				    (long)&p->component);
2299 			}
2300 			if (p->size != (size_t)-1)
2301 				kmem_free(p->physpath, p->size);
2302 			p->size = 0;
2303 			p->physpath = NULL;
2304 			if (pscep->psce_out == pscep->psce_last)
2305 				p = pscep->psce_first;
2306 			else
2307 				p++;
2308 			pscep->psce_out = p;
2309 			mutex_exit(&pscep->psce_lock);
2310 
2311 			ret = copyoutstr(physpath, psc.physpath,
2312 			    physlen, &lencopied);
2313 			kmem_free(physpath, physlen);
2314 			if (ret) {
2315 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2316 				    "failed--EFAULT\n", cmdstr,
2317 				    (void *)psc.physpath))
2318 				break;
2319 			}
2320 
2321 #ifdef	_MULTI_DATAMODEL
2322 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2323 				if (ddi_copyout(&psc32.component,
2324 				    &pscp32->component, copysize32, mode)
2325 				    != 0) {
2326 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2327 					    "failed--EFAULT\n", cmdstr))
2328 					ret = EFAULT;
2329 					break;
2330 				}
2331 			} else
2332 #endif	/* _MULTI_DATAMODEL */
2333 			{
2334 				if (ddi_copyout(&psc.component,
2335 				    &pscp->component, copysize, mode) != 0) {
2336 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2337 					    "failed--EFAULT\n", cmdstr))
2338 					ret = EFAULT;
2339 					break;
2340 				}
2341 			}
2342 			ret = 0;
2343 			break;
2344 		}
2345 
2346 		case PM_DIRECT_NOTIFY:
2347 		case PM_DIRECT_NOTIFY_WAIT:
2348 		{
2349 			psce_t			*pscep;
2350 			pm_state_change_t	*p;
2351 			caddr_t			physpath;
2352 			size_t			physlen;
2353 			/*
2354 			 * We want to know if any direct device of ours has
2355 			 * something we should know about.  We look up by clone.
2356 			 * In case we have another thread from the same process,
2357 			 * we loop.
2358 			 * pm_psc_clone_to_direct() returns a locked entry.
2359 			 */
2360 			mutex_enter(&pm_clone_lock);
2361 			while (pm_poll_cnt[clone] == 0 ||
2362 			    (pscep = pm_psc_clone_to_direct(clone)) == NULL) {
2363 				if (cmd == PM_DIRECT_NOTIFY) {
2364 					PMD(PMD_IOCTL, ("ioctl: %s: "
2365 					    "EWOULDBLOCK\n", cmdstr))
2366 					mutex_exit(&pm_clone_lock);
2367 					ASSERT(!dipheld);
2368 					return (EWOULDBLOCK);
2369 				} else {
2370 					if (cv_wait_sig(&pm_clones_cv[clone],
2371 					    &pm_clone_lock) == 0) {
2372 						mutex_exit(&pm_clone_lock);
2373 						PMD(PMD_ERROR, ("ioctl: %s: "
2374 						    "EINTR\n", cmdstr))
2375 						ASSERT(!dipheld);
2376 						return (EINTR);
2377 					}
2378 				}
2379 			}
2380 			mutex_exit(&pm_clone_lock);
2381 			physlen = pscep->psce_out->size;
2382 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2383 				mutex_exit(&pscep->psce_lock);
2384 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n",
2385 				    cmdstr))
2386 				ret = EFAULT;
2387 				break;
2388 			}
2389 			physpath = kmem_zalloc(physlen, KM_SLEEP);
2390 			bcopy((const void *) pscep->psce_out->physpath,
2391 			    (void *) physpath, physlen);
2392 
2393 			p = pscep->psce_out;
2394 #ifdef	_MULTI_DATAMODEL
2395 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2396 #ifdef DEBUG
2397 				size_t usrcopysize;
2398 #endif
2399 				psc32.component = (int32_t)p->component;
2400 				psc32.flags = (ushort_t)p->flags;
2401 				psc32.event = (ushort_t)p->event;
2402 				psc32.timestamp = (int32_t)p->timestamp;
2403 				psc32.old_level = (int32_t)p->old_level;
2404 				psc32.new_level = (int32_t)p->new_level;
2405 				copysize32 = (intptr_t)&psc32.size -
2406 				    (intptr_t)&psc32.component;
2407 				PMD(PMD_DPM, ("ioctl: %s: PDN32 %s, comp %d "
2408 				    "%d -> %d\n", cmdstr, physpath,
2409 				    p->component, p->old_level, p->new_level))
2410 #ifdef DEBUG
2411 				usrcopysize = (intptr_t)&pscp32->size -
2412 				    (intptr_t)&pscp32->component;
2413 				ASSERT(usrcopysize == copysize32);
2414 #endif
2415 			} else
2416 #endif
2417 			{
2418 				psc.component = p->component;
2419 				psc.flags = p->flags;
2420 				psc.event = p->event;
2421 				psc.timestamp = p->timestamp;
2422 				psc.old_level = p->old_level;
2423 				psc.new_level = p->new_level;
2424 				copysize = (intptr_t)&p->size -
2425 				    (intptr_t)&p->component;
2426 				PMD(PMD_DPM, ("ioctl: %s: PDN %s, comp %d "
2427 				    "%d -> %d\n", cmdstr, physpath,
2428 				    p->component, p->old_level, p->new_level))
2429 			}
2430 			mutex_enter(&pm_clone_lock);
2431 			PMD(PMD_IOCTL, ("ioctl: %s: pm_poll_cnt[%d] is %d "
2432 			    "before decrement\n", cmdstr, clone,
2433 			    pm_poll_cnt[clone]))
2434 			pm_poll_cnt[clone]--;
2435 			mutex_exit(&pm_clone_lock);
2436 			kmem_free(p->physpath, p->size);
2437 			p->size = 0;
2438 			p->physpath = NULL;
2439 			if (pscep->psce_out == pscep->psce_last)
2440 				p = pscep->psce_first;
2441 			else
2442 				p++;
2443 			pscep->psce_out = p;
2444 			mutex_exit(&pscep->psce_lock);
2445 
2446 			ret = copyoutstr(physpath, psc.physpath,
2447 			    physlen, &lencopied);
2448 			kmem_free(physpath, physlen);
2449 			if (ret) {
2450 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2451 				    "failed--EFAULT\n", cmdstr,
2452 				    (void *)psc.physpath))
2453 				break;
2454 			}
2455 
2456 #ifdef	_MULTI_DATAMODEL
2457 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2458 				if (ddi_copyout(&psc32.component,
2459 				    &pscp32->component, copysize32, mode)
2460 					!= 0) {
2461 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2462 					    "failed--EFAULT\n", cmdstr))
2463 					ret = EFAULT;
2464 					break;
2465 				}
2466 			} else
2467 #endif	/* _MULTI_DATAMODEL */
2468 			{
2469 				if (ddi_copyout(&psc.component,
2470 				    &pscp->component, copysize, mode) != 0) {
2471 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2472 					    "failed--EFAULT\n", cmdstr))
2473 					ret = EFAULT;
2474 					break;
2475 				}
2476 			}
2477 			ret = 0;
2478 			break;
2479 		}
2480 		default:
2481 			ASSERT(0);
2482 		}
2483 		break;
2484 
2485 	case NOSTRUCT:
2486 		switch (cmd) {
2487 		case PM_START_PM:
2488 		case PM_START_CPUPM:
2489 			mutex_enter(&pm_scan_lock);
2490 			if ((cmd == PM_START_PM && autopm_enabled) ||
2491 			    (cmd == PM_START_CPUPM && PM_CPUPM_ENABLED)) {
2492 				mutex_exit(&pm_scan_lock);
2493 				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2494 				    cmdstr))
2495 				ret = EBUSY;
2496 				break;
2497 			}
2498 			if (cmd == PM_START_PM)
2499 			    autopm_enabled = 1;
2500 			else
2501 			    cpupm = PM_CPUPM_ENABLE;
2502 			mutex_exit(&pm_scan_lock);
2503 			ddi_walk_devs(ddi_root_node(), pm_start_pm_walk, &cmd);
2504 			ret = 0;
2505 			break;
2506 
2507 		case PM_RESET_PM:
2508 		case PM_STOP_PM:
2509 		case PM_STOP_CPUPM:
2510 		{
2511 			extern void pm_discard_thresholds(void);
2512 
2513 			mutex_enter(&pm_scan_lock);
2514 			if ((cmd == PM_STOP_PM && !autopm_enabled) ||
2515 			    (cmd == PM_STOP_CPUPM && PM_CPUPM_DISABLED)) {
2516 				mutex_exit(&pm_scan_lock);
2517 				PMD(PMD_ERROR, ("ioctl: %s: EINVAL\n",
2518 				    cmdstr))
2519 				ret = EINVAL;
2520 				break;
2521 			}
2522 			if (cmd == PM_STOP_PM)
2523 			    autopm_enabled = 0;
2524 			else if (cmd == PM_STOP_CPUPM)
2525 			    cpupm = PM_CPUPM_DISABLE;
2526 			else {
2527 			    autopm_enabled = 0;
2528 			    cpupm = PM_CPUPM_NOTSET;
2529 			}
2530 			mutex_exit(&pm_scan_lock);
2531 
2532 			/*
2533 			 * bring devices to full power level, stop scan
2534 			 */
2535 			ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk, &cmd);
2536 			ret = 0;
2537 			if (cmd == PM_STOP_PM || cmd == PM_STOP_CPUPM)
2538 				break;
2539 			/*
2540 			 * Now do only PM_RESET_PM stuff.
2541 			 */
2542 			pm_system_idle_threshold = pm_default_idle_threshold;
2543 			pm_cpu_idle_threshold = 0;
2544 			pm_discard_thresholds();
2545 			pm_all_to_default_thresholds();
2546 			pm_dispatch_to_dep_thread(PM_DEP_WK_REMOVE_DEP,
2547 			    NULL, NULL, PM_DEP_WAIT, NULL, 0);
2548 			break;
2549 		}
2550 
2551 		case PM_GET_SYSTEM_THRESHOLD:
2552 			*rval_p = pm_system_idle_threshold;
2553 			ret = 0;
2554 			break;
2555 
2556 		case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
2557 			*rval_p = pm_default_idle_threshold;
2558 			ret = 0;
2559 			break;
2560 
2561 		case PM_GET_CPU_THRESHOLD:
2562 			*rval_p = pm_cpu_idle_threshold;
2563 			ret = 0;
2564 			break;
2565 
2566 		case PM_SET_SYSTEM_THRESHOLD:
2567 		case PM_SET_CPU_THRESHOLD:
2568 			if ((int)arg < 0) {
2569 				PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
2570 				    "--EINVAL\n", cmdstr, (int)arg))
2571 				ret = EINVAL;
2572 				break;
2573 			}
2574 			PMD(PMD_IOCTL, ("ioctl: %s: 0x%x 0t%d\n", cmdstr,
2575 			    (int)arg, (int)arg))
2576 			if (cmd == PM_SET_SYSTEM_THRESHOLD)
2577 				pm_system_idle_threshold = (int)arg;
2578 			else {
2579 				pm_cpu_idle_threshold = (int)arg;
2580 			}
2581 			ddi_walk_devs(ddi_root_node(), pm_set_idle_thresh_walk,
2582 				    (void *) &cmd);
2583 
2584 			ret = 0;
2585 			break;
2586 
2587 		case PM_IDLE_DOWN:
2588 			if (pm_timeout_idledown() != 0) {
2589 				ddi_walk_devs(ddi_root_node(),
2590 				    pm_start_idledown, (void *)PMID_IOC);
2591 			}
2592 			ret = 0;
2593 			break;
2594 
2595 		case PM_GET_PM_STATE:
2596 			if (autopm_enabled) {
2597 				*rval_p = PM_SYSTEM_PM_ENABLED;
2598 			} else {
2599 				*rval_p = PM_SYSTEM_PM_DISABLED;
2600 			}
2601 			ret = 0;
2602 			break;
2603 
2604 		case PM_GET_CPUPM_STATE:
2605 			if (PM_CPUPM_ENABLED)
2606 				*rval_p = PM_CPU_PM_ENABLED;
2607 			else if (PM_CPUPM_DISABLED)
2608 				*rval_p = PM_CPU_PM_DISABLED;
2609 			else
2610 				*rval_p = PM_CPU_PM_NOTSET;
2611 			ret = 0;
2612 			break;
2613 		}
2614 		break;
2615 
2616 	default:
2617 		/*
2618 		 * Internal error, invalid ioctl description
2619 		 * force debug entry even if pm_debug not set
2620 		 */
2621 #ifdef	DEBUG
2622 		pm_log("ioctl: invalid str_type %d for cmd %d (%s)\n",
2623 		    pcip->str_type, cmd, pcip->name);
2624 #endif
2625 		ASSERT(0);
2626 		return (EIO);
2627 	}
2628 	ASSERT(ret != 0x0badcafe);	/* some cmd in wrong case! */
2629 	if (dipheld) {
2630 		ASSERT(dip);
2631 		PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d) for "
2632 		    "exiting pm_ioctl\n", cmdstr, PM_DEVICE(dip)))
2633 		PM_RELE(dip);
2634 	}
2635 	PMD(PMD_IOCTL, ("ioctl: %s: end, ret=%d\n", cmdstr, ret))
2636 	return (ret);
2637 }
2638