xref: /titanic_41/usr/src/uts/common/io/pm.c (revision 909c1a3310e6a348a85950c7179fefda50d0e37d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * pm	This driver now only handles the ioctl interface.  The scanning
30  *	and policy stuff now lives in common/os/sunpm.c.
31  *	Not DDI compliant
32  */
33 
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/modctl.h>
37 #include <sys/conf.h>		/* driver flags and functions */
38 #include <sys/open.h>		/* OTYP_CHR definition */
39 #include <sys/stat.h>		/* S_IFCHR definition */
40 #include <sys/pathname.h>	/* name -> dev_info xlation */
41 #include <sys/kmem.h>		/* memory alloc stuff */
42 #include <sys/debug.h>
43 #include <sys/pm.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/epm.h>
47 #include <sys/vfs.h>
48 #include <sys/mode.h>
49 #include <sys/mkdev.h>
50 #include <sys/promif.h>
51 #include <sys/consdev.h>
52 #include <sys/ddi_impldefs.h>
53 #include <sys/poll.h>
54 #include <sys/note.h>
55 #include <sys/taskq.h>
56 #include <sys/policy.h>
57 
58 /*
59  * Minor number is instance<<8 + clone minor from range 1-254; (0 reserved
60  * for "original")
61  */
62 #define	PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE -1))
63 
64 #define	PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
65 #define	PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
66 #define	PM_MAJOR(dip) ddi_driver_major(dip)
67 #define	PM_RELE(dip) ddi_release_devi(dip)
68 
69 #define	PM_IDLEDOWN_TIME	10
70 #define	MAXSMBIOSSTRLEN 64	/* from SMBIOS spec */
71 #define	MAXCOPYBUF 	(MAXSMBIOSSTRLEN + 1)
72 
73 extern kmutex_t	pm_scan_lock;	/* protects autopm_enable, pm_scans_disabled */
74 extern kmutex_t	pm_clone_lock;	/* protects pm_clones array */
75 extern int	autopm_enabled;
76 extern pm_cpupm_t cpupm;
77 extern int	pm_default_idle_threshold;
78 extern int	pm_system_idle_threshold;
79 extern int	pm_cpu_idle_threshold;
80 extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
81 extern uint_t	pm_poll_cnt[PM_MAX_CLONE];
82 extern int	autoS3_enabled;
83 extern void	pm_record_thresh(pm_thresh_rec_t *);
84 extern void	pm_register_watcher(int, dev_info_t *);
85 extern int	pm_get_current_power(dev_info_t *, int, int *);
86 extern int	pm_interest_registered(int);
87 extern void	pm_all_to_default_thresholds(void);
88 extern int	pm_current_threshold(dev_info_t *, int, int *);
89 extern void	pm_deregister_watcher(int, dev_info_t *);
90 extern void	pm_unrecord_threshold(char *);
91 extern int	pm_S3_enabled;
92 extern int	pm_ppm_searchlist(pm_searchargs_t *);
93 extern psce_t	*pm_psc_clone_to_direct(int);
94 extern psce_t	*pm_psc_clone_to_interest(int);
95 
96 /*
97  * The soft state of the power manager.  Since there will only
98  * one of these, just reference it through a static pointer.
99  */
100 static struct pmstate {
101 	dev_info_t	*pm_dip;		/* ptr to our dev_info node */
102 	int		pm_instance;		/* for ddi_get_instance() */
103 	timeout_id_t	pm_idledown_id;		/* pm idledown timeout id */
104 	uchar_t		pm_clones[PM_MAX_CLONE]; /* uniqueify multiple opens */
105 	struct cred	*pm_cred[PM_MAX_CLONE];	/* cred for each unique open */
106 } pm_state = { NULL, -1, (timeout_id_t)0 };
107 typedef struct pmstate *pm_state_t;
108 static pm_state_t pmstp = &pm_state;
109 
110 static int	pm_open(dev_t *, int, int, cred_t *);
111 static int	pm_close(dev_t, int, int, cred_t *);
112 static int	pm_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
113 static int	pm_chpoll(dev_t, short, int, short *, struct pollhead **);
114 
115 static struct cb_ops pm_cb_ops = {
116 	pm_open,	/* open */
117 	pm_close,	/* close */
118 	nodev,		/* strategy */
119 	nodev,		/* print */
120 	nodev,		/* dump */
121 	nodev,		/* read */
122 	nodev,		/* write */
123 	pm_ioctl,	/* ioctl */
124 	nodev,		/* devmap */
125 	nodev,		/* mmap */
126 	nodev,		/* segmap */
127 	pm_chpoll,	/* poll */
128 	ddi_prop_op,	/* prop_op */
129 	NULL,		/* streamtab */
130 	D_NEW | D_MP	/* driver compatibility flag */
131 };
132 
133 static int pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
134     void **result);
135 static int pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
136 static int pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
137 
138 static struct dev_ops pm_ops = {
139 	DEVO_REV,		/* devo_rev */
140 	0,			/* refcnt */
141 	pm_getinfo,		/* info */
142 	nulldev,		/* identify */
143 	nulldev,		/* probe */
144 	pm_attach,		/* attach */
145 	pm_detach,		/* detach */
146 	nodev,			/* reset */
147 	&pm_cb_ops,		/* driver operations */
148 	NULL,			/* bus operations */
149 	NULL			/* power */
150 };
151 
152 static struct modldrv modldrv = {
153 	&mod_driverops,
154 	"power management driver v%I%",
155 	&pm_ops
156 };
157 
158 static struct modlinkage modlinkage = {
159 	MODREV_1, &modldrv, 0
160 };
161 
162 /* Local functions */
163 #ifdef DEBUG
164 static int	print_info(dev_info_t *, void *);
165 
166 #endif
167 
168 int
169 _init(void)
170 {
171 	return (mod_install(&modlinkage));
172 }
173 
174 int
175 _fini(void)
176 {
177 	return (mod_remove(&modlinkage));
178 }
179 
180 int
181 _info(struct modinfo *modinfop)
182 {
183 	return (mod_info(&modlinkage, modinfop));
184 }
185 
186 static int
187 pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
188 {
189 	int		i;
190 
191 	switch (cmd) {
192 
193 	case DDI_ATTACH:
194 		if (pmstp->pm_instance != -1)	/* Only allow one instance */
195 			return (DDI_FAILURE);
196 		pmstp->pm_instance = ddi_get_instance(dip);
197 		if (ddi_create_minor_node(dip, "pm", S_IFCHR,
198 		    (pmstp->pm_instance << 8) + 0,
199 		    DDI_PSEUDO, 0) != DDI_SUCCESS) {
200 			return (DDI_FAILURE);
201 		}
202 		pmstp->pm_dip = dip;	/* pm_init and getinfo depend on it */
203 
204 		for (i = 0; i < PM_MAX_CLONE; i++)
205 			cv_init(&pm_clones_cv[i], NULL, CV_DEFAULT, NULL);
206 
207 		ddi_report_dev(dip);
208 		return (DDI_SUCCESS);
209 
210 	default:
211 		return (DDI_FAILURE);
212 	}
213 }
214 
215 /* ARGSUSED */
216 static int
217 pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
218 {
219 	int i;
220 
221 	switch (cmd) {
222 	case DDI_DETACH:
223 		/*
224 		 * Don't detach while idledown timeout is pending.  Note that
225 		 * we already know we're not in pm_ioctl() due to framework
226 		 * synchronization, so this is a sufficient test
227 		 */
228 		if (pmstp->pm_idledown_id)
229 			return (DDI_FAILURE);
230 
231 		for (i = 0; i < PM_MAX_CLONE; i++)
232 			cv_destroy(&pm_clones_cv[i]);
233 
234 		ddi_remove_minor_node(dip, NULL);
235 		pmstp->pm_instance = -1;
236 		return (DDI_SUCCESS);
237 
238 	default:
239 		return (DDI_FAILURE);
240 	}
241 }
242 
243 static int
244 pm_close_direct_pm_device(dev_info_t *dip, void *arg)
245 {
246 	int clone;
247 	char *pathbuf;
248 	pm_info_t *info = PM_GET_PM_INFO(dip);
249 
250 	clone = *((int *)arg);
251 
252 	if (!info)
253 		return (DDI_WALK_CONTINUE);
254 
255 	pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
256 	PM_LOCK_DIP(dip);
257 	if (clone == info->pmi_clone) {
258 		PMD(PMD_CLOSE, ("pm_close: found %s@%s(%s#%d)\n",
259 		    PM_DEVICE(dip)))
260 		ASSERT(PM_ISDIRECT(dip));
261 		info->pmi_dev_pm_state &= ~PM_DIRECT;
262 		PM_UNLOCK_DIP(dip);
263 		pm_proceed(dip, PMP_RELEASE, -1, -1);
264 		/* Bring ourselves up if there is a keeper that is up */
265 		(void) ddi_pathname(dip, pathbuf);
266 		pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF, NULL,
267 		    pathbuf, PM_DEP_NOWAIT, NULL, 0);
268 		PM_LOCK_DIP(dip);
269 		info->pmi_clone = 0;
270 		PM_UNLOCK_DIP(dip);
271 	} else {
272 		PM_UNLOCK_DIP(dip);
273 	}
274 	kmem_free(pathbuf, MAXPATHLEN);
275 
276 	/* restart autopm on device released from direct pm */
277 	pm_rescan(dip);
278 
279 	return (DDI_WALK_CONTINUE);
280 }
281 
282 #define	PM_REQ		1
283 #define	NOSTRUCT	2
284 #define	DIP		3
285 #define	NODIP		4
286 #define	NODEP		5
287 #define	DEP		6
288 #define	PM_PSC		7
289 #define	PM_SRCH		8
290 
291 #define	CHECKPERMS	0x001
292 #define	SU		0x002
293 #define	SG		0x004
294 #define	OWNER		0x008
295 
296 #define	INWHO		0x001
297 #define	INDATAINT	0x002
298 #define	INDATASTRING	0x004
299 #define	INDEP		0x008
300 #define	INDATAOUT	0x010
301 #define	INDATA	(INDATAOUT | INDATAINT | INDATASTRING | INDEP)
302 
303 struct pm_cmd_info {
304 	int cmd;		/* command code */
305 	char *name;		/* printable string */
306 	int supported;		/* true if still supported */
307 	int str_type;		/* PM_REQ or NOSTRUCT */
308 	int inargs;		/* INWHO, INDATAINT, INDATASTRING, INDEP, */
309 				/* INDATAOUT */
310 	int diptype;		/* DIP or NODIP */
311 	int deptype;		/* DEP or NODEP */
312 	int permission;		/* SU, GU, or CHECKPERMS */
313 };
314 
315 #ifdef DEBUG
316 char *pm_cmd_string;
317 int pm_cmd;
318 #endif
319 
320 /*
321  * Returns true if permission granted by credentials
322  */
323 static int
324 pm_perms(int perm, cred_t *cr)
325 {
326 	if (perm == 0)			/* no restrictions */
327 		return (1);
328 	if (perm == CHECKPERMS)		/* ok for now (is checked later) */
329 		return (1);
330 	if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
331 		return (1);
332 	if ((perm & SG) && (crgetgid(cr) == 0))	/* group 0 is ok */
333 		return (1);
334 	return (0);
335 }
336 
337 #ifdef DEBUG
338 static int
339 print_info(dev_info_t *dip, void *arg)
340 {
341 	_NOTE(ARGUNUSED(arg))
342 	pm_info_t	*info;
343 	int		i, j;
344 	struct pm_component *cp;
345 	extern int pm_cur_power(pm_component_t *cp);
346 
347 	info = PM_GET_PM_INFO(dip);
348 	if (!info)
349 		return (DDI_WALK_CONTINUE);
350 	cmn_err(CE_CONT, "pm_info for %s\n", ddi_node_name(dip));
351 	for (i = 0; i < PM_NUMCMPTS(dip); i++) {
352 		cp = PM_CP(dip, i);
353 		cmn_err(CE_CONT, "\tThresholds[%d] =",  i);
354 		for (j = 0; j < cp->pmc_comp.pmc_numlevels; j++)
355 			cmn_err(CE_CONT, " %d", cp->pmc_comp.pmc_thresh[i]);
356 		cmn_err(CE_CONT, "\n");
357 		cmn_err(CE_CONT, "\tCurrent power[%d] = %d\n", i,
358 		    pm_cur_power(cp));
359 	}
360 	if (PM_ISDIRECT(dip))
361 		cmn_err(CE_CONT, "\tDirect power management\n");
362 	return (DDI_WALK_CONTINUE);
363 }
364 #endif
365 
366 /*
367  * command, name, supported, str_type, inargs, diptype, deptype, permission
368  */
369 static struct pm_cmd_info pmci[] = {
370 	{PM_SCHEDULE, "PM_SCHEDULE", 0},
371 	{PM_GET_IDLE_TIME, "PM_GET_IDLE_TIME", 0},
372 	{PM_GET_NUM_CMPTS, "PM_GET_NUM_CMPTS", 0},
373 	{PM_GET_THRESHOLD, "PM_GET_THRESHOLD", 0},
374 	{PM_SET_THRESHOLD, "PM_SET_THRESHOLD", 0},
375 	{PM_GET_NORM_PWR, "PM_GET_NORM_PWR", 0},
376 	{PM_SET_CUR_PWR, "PM_SET_CUR_PWR", 0},
377 	{PM_GET_CUR_PWR, "PM_GET_CUR_PWR", 0},
378 	{PM_GET_NUM_DEPS, "PM_GET_NUM_DEPS", 0},
379 	{PM_GET_DEP, "PM_GET_DEP", 0},
380 	{PM_ADD_DEP, "PM_ADD_DEP", 0},
381 	{PM_REM_DEP, "PM_REM_DEP", 0},
382 	{PM_REM_DEVICE, "PM_REM_DEVICE", 0},
383 	{PM_REM_DEVICES, "PM_REM_DEVICES", 0},
384 	{PM_REPARSE_PM_PROPS, "PM_REPARSE_PM_PROPS", 1, PM_REQ, INWHO, DIP,
385 	    NODEP},
386 	{PM_DISABLE_AUTOPM, "PM_DISABLE_AUTOPM", 0},
387 	{PM_REENABLE_AUTOPM, "PM_REENABLE_AUTOPM", 0},
388 	{PM_SET_NORM_PWR, "PM_SET_NORM_PWR", 0 },
389 	{PM_SET_DEVICE_THRESHOLD, "PM_SET_DEVICE_THRESHOLD", 1, PM_REQ,
390 	    INWHO, NODIP, NODEP, SU},
391 	{PM_GET_SYSTEM_THRESHOLD, "PM_GET_SYSTEM_THRESHOLD", 1, NOSTRUCT},
392 	{PM_GET_DEFAULT_SYSTEM_THRESHOLD, "PM_GET_DEFAULT_SYSTEM_THRESHOLD",
393 	    1, NOSTRUCT},
394 	{PM_SET_SYSTEM_THRESHOLD, "PM_SET_SYSTEM_THRESHOLD", 1, NOSTRUCT,
395 	    0, 0, 0, SU},
396 	{PM_START_PM, "PM_START_PM", 1, NOSTRUCT, 0, 0, 0, SU},
397 	{PM_STOP_PM, "PM_STOP_PM", 1, NOSTRUCT, 0, 0, 0, SU},
398 	{PM_RESET_PM, "PM_RESET_PM", 1, NOSTRUCT, 0, 0, 0, SU},
399 	{PM_GET_STATS, "PM_GET_STATS", 1, PM_REQ, INWHO | INDATAOUT,
400 	    DIP, NODEP},
401 	{PM_GET_DEVICE_THRESHOLD, "PM_GET_DEVICE_THRESHOLD", 1, PM_REQ, INWHO,
402 	    DIP, NODEP},
403 	{PM_GET_POWER_NAME, "PM_GET_POWER_NAME", 1, PM_REQ, INWHO | INDATAOUT,
404 	    DIP, NODEP},
405 	{PM_GET_POWER_LEVELS, "PM_GET_POWER_LEVELS", 1, PM_REQ,
406 	    INWHO | INDATAOUT, DIP, NODEP},
407 	{PM_GET_NUM_COMPONENTS, "PM_GET_NUM_COMPONENTS", 1, PM_REQ, INWHO,
408 	    DIP, NODEP},
409 	{PM_GET_COMPONENT_NAME, "PM_GET_COMPONENT_NAME", 1, PM_REQ,
410 	    INWHO | INDATAOUT, DIP, NODEP},
411 	{PM_GET_NUM_POWER_LEVELS, "PM_GET_NUM_POWER_LEVELS", 1, PM_REQ, INWHO,
412 	    DIP, NODEP},
413 	{PM_GET_STATE_CHANGE, "PM_GET_STATE_CHANGE", 1, PM_PSC},
414 	{PM_GET_STATE_CHANGE_WAIT, "PM_GET_STATE_CHANGE_WAIT", 1, PM_PSC},
415 	{PM_DIRECT_PM, "PM_DIRECT_PM", 1, PM_REQ, INWHO, DIP, NODEP,
416 	    (SU | SG)},
417 	{PM_RELEASE_DIRECT_PM, "PM_RELEASE_DIRECT_PM", 1, PM_REQ, INWHO,
418 	    DIP, NODEP},
419 	{PM_DIRECT_NOTIFY, "PM_DIRECT_NOTIFY", 1, PM_PSC},
420 	{PM_DIRECT_NOTIFY_WAIT, "PM_DIRECT_NOTIFY_WAIT", 1, PM_PSC},
421 	{PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
422 	    INWHO, DIP, NODEP, SU},
423 	{PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
424 	{PM_GET_AUTOS3_STATE, "PM_GET_AUTOS3_STATE", 1, NOSTRUCT},
425 	{PM_GET_S3_SUPPORT_STATE, "PM_GET_S3_SUPPORT_STATE", 1, NOSTRUCT},
426 	{PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
427 	    DIP, NODEP},
428 	{PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
429 	    INWHO | INDATAINT, NODIP, NODEP, SU},
430 	{PM_GET_COMPONENT_THRESHOLDS, "PM_GET_COMPONENT_THRESHOLDS", 1, PM_REQ,
431 	    INWHO | INDATAOUT, DIP, NODEP},
432 	{PM_IDLE_DOWN, "PM_IDLE_DOWN", 1, NOSTRUCT, 0, 0, 0, SU},
433 	{PM_GET_DEVICE_THRESHOLD_BASIS, "PM_GET_DEVICE_THRESHOLD_BASIS", 1,
434 	    PM_REQ, INWHO, DIP, NODEP},
435 	{PM_SET_CURRENT_POWER, "PM_SET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
436 	    NODEP},
437 	{PM_GET_CURRENT_POWER, "PM_GET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
438 	    NODEP},
439 	{PM_GET_FULL_POWER, "PM_GET_FULL_POWER", 1, PM_REQ, INWHO, DIP,
440 	    NODEP},
441 	{PM_ADD_DEPENDENT, "PM_ADD_DEPENDENT", 1, PM_REQ, INWHO | INDATASTRING,
442 	    DIP, DEP, SU},
443 	{PM_GET_TIME_IDLE, "PM_GET_TIME_IDLE", 1, PM_REQ, INWHO, DIP, NODEP},
444 	{PM_ADD_DEPENDENT_PROPERTY, "PM_ADD_DEPENDENT_PROPERTY", 1, PM_REQ,
445 	    INWHO | INDATASTRING, NODIP, DEP, SU},
446 	{PM_START_CPUPM, "PM_START_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
447 	{PM_STOP_CPUPM, "PM_STOP_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
448 	{PM_GET_CPU_THRESHOLD, "PM_GET_CPU_THRESHOLD", 1, NOSTRUCT},
449 	{PM_SET_CPU_THRESHOLD, "PM_SET_CPU_THRESHOLD", 1, NOSTRUCT,
450 	    0, 0, 0, SU},
451 	{PM_GET_CPUPM_STATE, "PM_GET_CPUPM_STATE", 1, NOSTRUCT},
452 	{PM_START_AUTOS3, "PM_START_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
453 	{PM_STOP_AUTOS3, "PM_STOP_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
454 	{PM_ENABLE_S3, "PM_ENABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
455 	{PM_DISABLE_S3, "PM_DISABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
456 	{PM_ENTER_S3, "PM_ENTER_S3", 1, NOSTRUCT, 0, 0, 0, SU},
457 	{PM_SEARCH_LIST, "PM_SEARCH_LIST", 1, PM_SRCH, 0, 0, 0, SU},
458 	{PM_GET_CMD_NAME, "PM_GET_CMD_NAME", 1, PM_REQ, INDATAOUT, NODIP,
459 	    NODEP, 0},
460 	{0, NULL}
461 };
462 
463 struct pm_cmd_info *
464 pc_info(int cmd)
465 {
466 	struct pm_cmd_info *pcip;
467 
468 	for (pcip = pmci; pcip->name; pcip++) {
469 		if (cmd == pcip->cmd)
470 			return (pcip);
471 	}
472 	return (NULL);
473 }
474 
475 static char *
476 pm_decode_cmd(int cmd)
477 {
478 	static char invbuf[64];
479 	struct pm_cmd_info *pcip = pc_info(cmd);
480 	if (pcip != NULL)
481 		return (pcip->name);
482 	(void) sprintf(invbuf, "ioctl: invalid command %d\n", cmd);
483 	return (invbuf);
484 }
485 
486 /*
487  * Allocate scan resource, create taskq, then dispatch scan,
488  * called only if autopm is enabled.
489  */
490 int
491 pm_start_pm_walk(dev_info_t *dip, void *arg)
492 {
493 	int cmd = *((int *)arg);
494 #ifdef PMDDEBUG
495 	char *cmdstr = pm_decode_cmd(cmd);
496 #endif
497 
498 	if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip))
499 		return (DDI_WALK_CONTINUE);
500 
501 	switch (cmd) {
502 	case PM_START_CPUPM:
503 		if (!PM_ISCPU(dip))
504 			return (DDI_WALK_CONTINUE);
505 		mutex_enter(&pm_scan_lock);
506 		if (!PM_CPUPM_DISABLED)
507 			pm_scan_init(dip);
508 		mutex_exit(&pm_scan_lock);
509 		break;
510 	case PM_START_PM:
511 		mutex_enter(&pm_scan_lock);
512 		if (PM_ISCPU(dip) && PM_CPUPM_DISABLED) {
513 			mutex_exit(&pm_scan_lock);
514 			return (DDI_WALK_CONTINUE);
515 		}
516 		if (autopm_enabled)
517 			pm_scan_init(dip);
518 		mutex_exit(&pm_scan_lock);
519 		break;
520 	}
521 
522 	/*
523 	 * Start doing pm on device: ensure pm_scan data structure initiated,
524 	 * no need to guarantee a successful scan run.
525 	 */
526 	PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: scan %s@%s(%s#%d)\n", cmdstr,
527 	    PM_DEVICE(dip)))
528 	pm_rescan(dip);
529 
530 	return (DDI_WALK_CONTINUE);
531 }
532 
533 /*
534  * Bring devices to full power level, then stop scan
535  */
536 int
537 pm_stop_pm_walk(dev_info_t *dip, void *arg)
538 {
539 	pm_info_t *info = PM_GET_PM_INFO(dip);
540 	int cmd = *((int *)arg);
541 #ifdef PMDDEBUG
542 	char *cmdstr = pm_decode_cmd(cmd);
543 #endif
544 
545 	if (!info)
546 		return (DDI_WALK_CONTINUE);
547 
548 	switch (cmd) {
549 	case PM_STOP_PM:
550 		/*
551 		 * If CPU devices are being managed independently, then don't
552 		 * stop them as part of PM_STOP_PM. Only stop them as part of
553 		 * PM_STOP_CPUPM and PM_RESET_PM.
554 		 */
555 		if (PM_ISCPU(dip) && PM_CPUPM_ENABLED)
556 			return (DDI_WALK_CONTINUE);
557 		break;
558 	case PM_STOP_CPUPM:
559 		/*
560 		 * If stopping CPU devices and this device is not marked
561 		 * as a CPU device, then skip.
562 		 */
563 		if (!PM_ISCPU(dip))
564 			return (DDI_WALK_CONTINUE);
565 		break;
566 	}
567 
568 	/*
569 	 * Stop the current scan, and then bring it back to normal power.
570 	 */
571 	if (!PM_ISBC(dip)) {
572 		PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: stop scan for "
573 		    "%s@%s(%s#%d)\n", cmdstr, PM_DEVICE(dip)))
574 		pm_scan_stop(dip);
575 	}
576 
577 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip) &&
578 	    !pm_all_at_normal(dip)) {
579 		PM_LOCK_DIP(dip);
580 		if (info->pmi_dev_pm_state & PM_DETACHING) {
581 			PMD(PMD_ALLNORM, ("ioctl: %s: deferring "
582 			    "all_to_normal because %s@%s(%s#%d) is detaching\n",
583 			    cmdstr, PM_DEVICE(dip)))
584 			info->pmi_dev_pm_state |= PM_ALLNORM_DEFERRED;
585 			PM_UNLOCK_DIP(dip);
586 			return (DDI_WALK_CONTINUE);
587 		}
588 		PM_UNLOCK_DIP(dip);
589 		if (pm_all_to_normal(dip, PM_CANBLOCK_FAIL) != DDI_SUCCESS) {
590 			PMD(PMD_ERROR, ("ioctl: %s: could not bring %s@%s"
591 			    "(%s#%d) to normal\n", cmdstr, PM_DEVICE(dip)))
592 		}
593 	}
594 
595 	return (DDI_WALK_CONTINUE);
596 }
597 
598 static int
599 pm_start_idledown(dev_info_t *dip, void *arg)
600 {
601 	int		flag = (int)(intptr_t)arg;
602 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
603 
604 	if (!scanp)
605 		return (DDI_WALK_CONTINUE);
606 
607 	PM_LOCK_DIP(dip);
608 	scanp->ps_idle_down |= flag;
609 	PM_UNLOCK_DIP(dip);
610 	pm_rescan(dip);
611 
612 	return (DDI_WALK_CONTINUE);
613 }
614 
615 /*ARGSUSED*/
616 static int
617 pm_end_idledown(dev_info_t *dip, void *ignore)
618 {
619 	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
620 
621 	if (!scanp)
622 		return (DDI_WALK_CONTINUE);
623 
624 	PM_LOCK_DIP(dip);
625 	/*
626 	 * The PMID_TIMERS bits are place holder till idledown expires.
627 	 * The bits are also the base for regenerating PMID_SCANS bits.
628 	 * While it's up to scan thread to clear up the PMID_SCANS bits
629 	 * after each scan run, PMID_TIMERS ensure aggressive scan down
630 	 * performance throughout the idledown period.
631 	 */
632 	scanp->ps_idle_down &= ~PMID_TIMERS;
633 	PM_UNLOCK_DIP(dip);
634 
635 	return (DDI_WALK_CONTINUE);
636 }
637 
638 /*ARGSUSED*/
639 static void
640 pm_end_idledown_walk(void *ignore)
641 {
642 	PMD(PMD_IDLEDOWN, ("ioctl: end_idledown: idledown_id(%lx) timer is "
643 	    "off\n", (ulong_t)pmstp->pm_idledown_id));
644 
645 	mutex_enter(&pm_scan_lock);
646 	pmstp->pm_idledown_id = 0;
647 	mutex_exit(&pm_scan_lock);
648 
649 	ddi_walk_devs(ddi_root_node(), pm_end_idledown, NULL);
650 }
651 
652 /*
653  * pm_timeout_idledown - keep idledown effect for 10 seconds.
654  *
655  * Return 0 if another competing caller scheduled idledown timeout,
656  * otherwise, return idledown timeout_id.
657  */
658 static timeout_id_t
659 pm_timeout_idledown(void)
660 {
661 	timeout_id_t	to_id;
662 
663 	/*
664 	 * Keep idle-down in effect for either 10 seconds
665 	 * or length of a scan interval, which ever is greater.
666 	 */
667 	mutex_enter(&pm_scan_lock);
668 	if (pmstp->pm_idledown_id != 0) {
669 		to_id = pmstp->pm_idledown_id;
670 		pmstp->pm_idledown_id = 0;
671 		mutex_exit(&pm_scan_lock);
672 		(void) untimeout(to_id);
673 		mutex_enter(&pm_scan_lock);
674 		if (pmstp->pm_idledown_id != 0) {
675 			PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: "
676 			    "another caller got it, idledown_id(%lx)!\n",
677 			    (ulong_t)pmstp->pm_idledown_id))
678 			mutex_exit(&pm_scan_lock);
679 			return (0);
680 		}
681 	}
682 	pmstp->pm_idledown_id = timeout(pm_end_idledown_walk, NULL,
683 	    PM_IDLEDOWN_TIME * hz);
684 	PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: idledown_id(%lx)\n",
685 	    (ulong_t)pmstp->pm_idledown_id))
686 	mutex_exit(&pm_scan_lock);
687 
688 	return (pmstp->pm_idledown_id);
689 }
690 
691 static int
692 pm_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
693 	struct pollhead **phpp)
694 {
695 	extern struct pollhead pm_pollhead;	/* common/os/sunpm.c */
696 	int	clone;
697 
698 	clone = PM_MINOR_TO_CLONE(getminor(dev));
699 	PMD(PMD_IOCTL, ("ioctl: pm_chpoll: clone %d\n", clone))
700 	if ((events & (POLLIN | POLLRDNORM)) && pm_poll_cnt[clone]) {
701 		*reventsp |= (POLLIN | POLLRDNORM);
702 		PMD(PMD_IOCTL, ("ioctl: pm_chpoll: reventsp set\n"))
703 	} else {
704 		*reventsp = 0;
705 		if (!anyyet) {
706 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: not anyyet\n"))
707 			*phpp = &pm_pollhead;
708 		}
709 #ifdef DEBUG
710 		else {
711 			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: anyyet\n"))
712 		}
713 #endif
714 	}
715 	return (0);
716 }
717 
718 /*
719  * called by pm_dicard_entries to free up the memory. It also decrements
720  * pm_poll_cnt, if direct is non zero.
721  */
722 static void
723 pm_free_entries(psce_t *pscep, int clone, int direct)
724 {
725 	pm_state_change_t	*p;
726 
727 	if (pscep) {
728 		p = pscep->psce_out;
729 		while (p->size) {
730 			if (direct) {
731 				PMD(PMD_IOCTL, ("ioctl: discard: "
732 				    "pm_poll_cnt[%d] is %d before "
733 				    "ASSERT\n", clone,
734 				    pm_poll_cnt[clone]))
735 				ASSERT(pm_poll_cnt[clone]);
736 				pm_poll_cnt[clone]--;
737 			}
738 			kmem_free(p->physpath, p->size);
739 			p->size = 0;
740 			if (p == pscep->psce_last)
741 				p = pscep->psce_first;
742 			else
743 				p++;
744 		}
745 		pscep->psce_out = pscep->psce_first;
746 		pscep->psce_in = pscep->psce_first;
747 		mutex_exit(&pscep->psce_lock);
748 	}
749 }
750 
751 /*
752  * Discard entries for this clone. Calls pm_free_entries to free up memory.
753  */
754 static void
755 pm_discard_entries(int clone)
756 {
757 	psce_t	*pscep;
758 	int			direct = 0;
759 
760 	mutex_enter(&pm_clone_lock);
761 	if ((pscep = pm_psc_clone_to_direct(clone)) != NULL)
762 		direct = 1;
763 	pm_free_entries(pscep, clone, direct);
764 	pscep = pm_psc_clone_to_interest(clone);
765 	pm_free_entries(pscep, clone, 0);
766 	mutex_exit(&pm_clone_lock);
767 }
768 
769 
770 static void
771 pm_set_idle_threshold(dev_info_t *dip, int thresh, int flag)
772 {
773 	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip)) {
774 		switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
775 		case PMC_DEF_THRESH:
776 		case PMC_CPU_THRESH:
777 			PMD(PMD_IOCTL, ("ioctl: set_idle_threshold: set "
778 			    "%s@%s(%s#%d) default thresh to 0t%d\n",
779 			    PM_DEVICE(dip), thresh))
780 			pm_set_device_threshold(dip, thresh, flag);
781 			break;
782 		default:
783 			break;
784 		}
785 	}
786 }
787 
788 static int
789 pm_set_idle_thresh_walk(dev_info_t *dip, void *arg)
790 {
791 	int cmd = *((int *)arg);
792 
793 	if (!PM_GET_PM_INFO(dip))
794 		return (DDI_WALK_CONTINUE);
795 
796 	switch (cmd) {
797 	case PM_SET_SYSTEM_THRESHOLD:
798 		if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
799 			break;
800 		pm_set_idle_threshold(dip, pm_system_idle_threshold,
801 		    PMC_DEF_THRESH);
802 		pm_rescan(dip);
803 		break;
804 	case PM_SET_CPU_THRESHOLD:
805 		if (!PM_ISCPU(dip))
806 			break;
807 		pm_set_idle_threshold(dip, pm_cpu_idle_threshold,
808 		    PMC_CPU_THRESH);
809 		pm_rescan(dip);
810 		break;
811 	}
812 
813 	return (DDI_WALK_CONTINUE);
814 }
815 
816 /*ARGSUSED*/
817 static int
818 pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
819 {
820 	dev_t	dev;
821 	int	instance;
822 
823 	switch (infocmd) {
824 	case DDI_INFO_DEVT2DEVINFO:
825 		if (pmstp->pm_instance == -1)
826 			return (DDI_FAILURE);
827 		*result = pmstp->pm_dip;
828 		return (DDI_SUCCESS);
829 
830 	case DDI_INFO_DEVT2INSTANCE:
831 		dev = (dev_t)arg;
832 		instance = getminor(dev) >> 8;
833 		*result = (void *)(uintptr_t)instance;
834 		return (DDI_SUCCESS);
835 
836 	default:
837 		return (DDI_FAILURE);
838 	}
839 }
840 
841 
842 /*ARGSUSED1*/
843 static int
844 pm_open(dev_t *devp, int flag, int otyp, cred_t *cr)
845 {
846 	int		clone;
847 
848 	if (otyp != OTYP_CHR)
849 		return (EINVAL);
850 
851 	mutex_enter(&pm_clone_lock);
852 	for (clone = 1; clone < PM_MAX_CLONE; clone++)
853 		if (!pmstp->pm_clones[clone])
854 			break;
855 
856 	if (clone == PM_MAX_CLONE) {
857 		mutex_exit(&pm_clone_lock);
858 		return (ENXIO);
859 	}
860 	pmstp->pm_cred[clone] = cr;
861 	crhold(cr);
862 
863 	*devp = makedevice(getmajor(*devp), (pmstp->pm_instance << 8) + clone);
864 	pmstp->pm_clones[clone] = 1;
865 	mutex_exit(&pm_clone_lock);
866 
867 	return (0);
868 }
869 
870 /*ARGSUSED1*/
871 static int
872 pm_close(dev_t dev, int flag, int otyp, cred_t *cr)
873 {
874 	int clone;
875 
876 	if (otyp != OTYP_CHR)
877 		return (EINVAL);
878 
879 	clone = PM_MINOR_TO_CLONE(getminor(dev));
880 	PMD(PMD_CLOSE, ("pm_close: minor %x, clone %x\n", getminor(dev),
881 	    clone))
882 
883 	/*
884 	 * Walk the entire device tree to find the corresponding
885 	 * device and operate on it.
886 	 */
887 	ddi_walk_devs(ddi_root_node(), pm_close_direct_pm_device,
888 	    (void *) &clone);
889 
890 	crfree(pmstp->pm_cred[clone]);
891 	pmstp->pm_cred[clone] = 0;
892 	pmstp->pm_clones[clone] = 0;
893 	pm_discard_entries(clone);
894 	ASSERT(pm_poll_cnt[clone] == 0);
895 	pm_deregister_watcher(clone, NULL);
896 	return (0);
897 }
898 
899 /*ARGSUSED*/
900 static int
901 pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
902 {
903 	struct pm_cmd_info *pc_info(int);
904 	struct pm_cmd_info *pcip = pc_info(cmd);
905 	pm_req_t	req;
906 	dev_info_t	*dip = NULL;
907 	pm_info_t	*info = NULL;
908 	int		clone;
909 	char		*cmdstr = pm_decode_cmd(cmd);
910 	/*
911 	 * To keep devinfo nodes from going away while we're holding a
912 	 * pointer to their dip, pm_name_to_dip() optionally holds
913 	 * the devinfo node.  If we've done that, we set dipheld
914 	 * so we know at the end of the ioctl processing to release the
915 	 * node again.
916 	 */
917 	int		dipheld = 0;
918 	int		icount = 0;
919 	int		i;
920 	int		comps;
921 	size_t		lencopied;
922 	int		ret = ENOTTY;
923 	int		curpower;
924 	char		who[MAXNAMELEN];
925 	size_t		wholen;			/* copyinstr length */
926 	size_t		deplen = MAXNAMELEN;
927 	char		*dep, i_dep_buf[MAXNAMELEN];
928 	char		pathbuf[MAXNAMELEN];
929 	struct pm_component *cp;
930 #ifdef	_MULTI_DATAMODEL
931 	pm_state_change32_t		*pscp32;
932 	pm_state_change32_t		psc32;
933 	pm_searchargs32_t		psa32;
934 	size_t				copysize32;
935 #endif
936 	pm_state_change_t		*pscp;
937 	pm_state_change_t		psc;
938 	pm_searchargs_t		psa;
939 	char		listname[MAXCOPYBUF];
940 	char		manufacturer[MAXCOPYBUF];
941 	char		product[MAXCOPYBUF];
942 	size_t		copysize;
943 
944 	PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
945 
946 #ifdef DEBUG
947 	if (cmd == 666) {
948 		ddi_walk_devs(ddi_root_node(), print_info, NULL);
949 		return (0);
950 	}
951 	ret = 0x0badcafe;			/* sanity checking */
952 	pm_cmd = cmd;				/* for ASSERT debugging */
953 	pm_cmd_string = cmdstr;	/* for ASSERT debugging */
954 #endif
955 
956 
957 	if (pcip == NULL) {
958 		PMD(PMD_ERROR, ("ioctl: unknown command %d\n", cmd))
959 		return (ENOTTY);
960 	}
961 	if (pcip == NULL || pcip->supported == 0) {
962 		PMD(PMD_ERROR, ("ioctl: command %s no longer supported\n",
963 		    pcip->name))
964 		return (ENOTTY);
965 	}
966 
967 	wholen = 0;
968 	dep = i_dep_buf;
969 	i_dep_buf[0] = 0;
970 	clone = PM_MINOR_TO_CLONE(getminor(dev));
971 	if (!pm_perms(pcip->permission, pmstp->pm_cred[clone])) {
972 		ret = EPERM;
973 		return (ret);
974 	}
975 	switch (pcip->str_type) {
976 	case PM_REQ:
977 	{
978 #ifdef	_MULTI_DATAMODEL
979 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
980 			pm_req32_t	req32;
981 
982 			if (ddi_copyin((caddr_t)arg, &req32,
983 			    sizeof (req32), mode) != 0) {
984 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
985 				    "EFAULT\n\n", cmdstr))
986 				ret = EFAULT;
987 				break;
988 			}
989 			req.component = req32.component;
990 			req.value = req32.value;
991 			req.datasize = req32.datasize;
992 			if (pcip->inargs & INWHO) {
993 				ret = copyinstr((char *)(uintptr_t)
994 				    req32.physpath, who, MAXNAMELEN, &wholen);
995 				if (ret) {
996 					PMD(PMD_ERROR, ("ioctl: %s: "
997 					    "copyinstr fails returning %d\n",
998 					    cmdstr, ret))
999 					break;
1000 				}
1001 				req.physpath = who;
1002 				PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
1003 				    cmdstr, req.physpath))
1004 			}
1005 			if (pcip->inargs & INDATA) {
1006 				req.data = (void *)(uintptr_t)req32.data;
1007 				req.datasize = req32.datasize;
1008 			} else {
1009 				req.data = NULL;
1010 				req.datasize = 0;
1011 			}
1012 			switch (pcip->diptype) {
1013 			case DIP:
1014 				if (!(dip =
1015 				    pm_name_to_dip(req.physpath, 1))) {
1016 					PMD(PMD_ERROR, ("ioctl: %s: "
1017 					    "pm_name_to_dip for %s failed\n",
1018 					    cmdstr, req.physpath))
1019 					return (ENODEV);
1020 				}
1021 				ASSERT(!dipheld);
1022 				dipheld++;
1023 				break;
1024 			case NODIP:
1025 				break;
1026 			default:
1027 				/*
1028 				 * Internal error, invalid ioctl description
1029 				 * force debug entry even if pm_debug not set
1030 				 */
1031 #ifdef	DEBUG
1032 				pm_log("invalid diptype %d for cmd %d (%s)\n",
1033 				    pcip->diptype, cmd, pcip->name);
1034 #endif
1035 				ASSERT(0);
1036 				return (EIO);
1037 			}
1038 			if (pcip->inargs & INDATAINT) {
1039 				int32_t int32buf;
1040 				int32_t *i32p;
1041 				int *ip;
1042 				icount = req32.datasize / sizeof (int32_t);
1043 				if (icount <= 0) {
1044 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1045 					    " 0 or neg EFAULT\n\n", cmdstr))
1046 					ret = EFAULT;
1047 					break;
1048 				}
1049 				ASSERT(!(pcip->inargs & INDATASTRING));
1050 				req.datasize = icount * sizeof (int);
1051 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1052 				ip = req.data;
1053 				ret = 0;
1054 				for (i = 0,
1055 				    i32p = (int32_t *)(uintptr_t)req32.data;
1056 				    i < icount; i++, i32p++) {
1057 					if (ddi_copyin((void *)i32p, &int32buf,
1058 					    sizeof (int32_t), mode)) {
1059 						kmem_free(req.data,
1060 						    req.datasize);
1061 						PMD(PMD_ERROR, ("ioctl: %s: "
1062 						    "entry %d EFAULT\n",
1063 						    cmdstr, i))
1064 						ret = EFAULT;
1065 						break;
1066 					}
1067 					*ip++ = (int)int32buf;
1068 				}
1069 				if (ret)
1070 					break;
1071 			}
1072 			if (pcip->inargs & INDATASTRING) {
1073 				ASSERT(!(pcip->inargs & INDATAINT));
1074 				ASSERT(pcip->deptype == DEP);
1075 				if (req32.data != NULL) {
1076 					if (copyinstr((void *)(uintptr_t)
1077 					    req32.data, dep, deplen, NULL)) {
1078 						PMD(PMD_ERROR, ("ioctl: %s: "
1079 						    "0x%p dep size %lx, EFAULT"
1080 						    "\n", cmdstr,
1081 						    (void *)req.data, deplen))
1082 						ret = EFAULT;
1083 						break;
1084 					}
1085 #ifdef DEBUG
1086 					else {
1087 						PMD(PMD_DEP, ("ioctl: %s: "
1088 						    "dep %s\n", cmdstr, dep))
1089 					}
1090 #endif
1091 				} else {
1092 					PMD(PMD_ERROR, ("ioctl: %s: no "
1093 					    "dependent\n", cmdstr))
1094 					ret = EINVAL;
1095 					break;
1096 				}
1097 			}
1098 		} else
1099 #endif /* _MULTI_DATAMODEL */
1100 		{
1101 			if (ddi_copyin((caddr_t)arg,
1102 			    &req, sizeof (req), mode) != 0) {
1103 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1104 				    "EFAULT\n\n", cmdstr))
1105 				ret = EFAULT;
1106 				break;
1107 			}
1108 			if (pcip->inargs & INWHO) {
1109 				ret = copyinstr((char *)req.physpath, who,
1110 				    MAXNAMELEN, &wholen);
1111 				if (ret) {
1112 					PMD(PMD_ERROR, ("ioctl: %s copyinstr"
1113 					    " fails returning %d\n", cmdstr,
1114 					    ret))
1115 					break;
1116 				}
1117 				req.physpath = who;
1118 				PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
1119 				    cmdstr, req.physpath))
1120 			}
1121 			if (!(pcip->inargs & INDATA)) {
1122 				req.data = NULL;
1123 				req.datasize = 0;
1124 			}
1125 			switch (pcip->diptype) {
1126 			case DIP:
1127 				if (!(dip =
1128 				    pm_name_to_dip(req.physpath, 1))) {
1129 					PMD(PMD_ERROR, ("ioctl: %s: "
1130 					    "pm_name_to_dip for %s failed\n",
1131 					    cmdstr, req.physpath))
1132 					return (ENODEV);
1133 				}
1134 				ASSERT(!dipheld);
1135 				dipheld++;
1136 				break;
1137 			case NODIP:
1138 				break;
1139 			default:
1140 				/*
1141 				 * Internal error, invalid ioctl description
1142 				 * force debug entry even if pm_debug not set
1143 				 */
1144 #ifdef	DEBUG
1145 				pm_log("invalid diptype %d for cmd %d (%s)\n",
1146 				    pcip->diptype, cmd, pcip->name);
1147 #endif
1148 				ASSERT(0);
1149 				return (EIO);
1150 			}
1151 			if (pcip->inargs & INDATAINT) {
1152 				int *ip;
1153 
1154 				ASSERT(!(pcip->inargs & INDATASTRING));
1155 				ip = req.data;
1156 				icount = req.datasize / sizeof (int);
1157 				if (icount <= 0) {
1158 					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1159 					    " 0 or neg EFAULT\n\n", cmdstr))
1160 					ret = EFAULT;
1161 					break;
1162 				}
1163 				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1164 				if (ddi_copyin((caddr_t)ip, req.data,
1165 				    req.datasize, mode) != 0) {
1166 					PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1167 					    "EFAULT\n\n", cmdstr))
1168 					ret = EFAULT;
1169 					break;
1170 				}
1171 			}
1172 			if (pcip->inargs & INDATASTRING) {
1173 				ASSERT(!(pcip->inargs & INDATAINT));
1174 				ASSERT(pcip->deptype == DEP);
1175 				if (req.data != NULL) {
1176 					if (copyinstr((caddr_t)req.data,
1177 					    dep, deplen, NULL)) {
1178 						PMD(PMD_ERROR, ("ioctl: %s: "
1179 						    "0x%p dep size %lu, "
1180 						    "EFAULT\n", cmdstr,
1181 						    (void *)req.data, deplen))
1182 						ret = EFAULT;
1183 						break;
1184 					}
1185 #ifdef DEBUG
1186 					else {
1187 						PMD(PMD_DEP, ("ioctl: %s: "
1188 						    "dep %s\n", cmdstr, dep))
1189 					}
1190 #endif
1191 				} else {
1192 					PMD(PMD_ERROR, ("ioctl: %s: no "
1193 					    "dependent\n", cmdstr))
1194 					ret = EINVAL;
1195 					break;
1196 				}
1197 			}
1198 		}
1199 		/*
1200 		 * Now we've got all the args in for the commands that
1201 		 * use the new pm_req struct.
1202 		 */
1203 		switch (cmd) {
1204 		case PM_REPARSE_PM_PROPS:
1205 		{
1206 			struct dev_ops	*drv;
1207 			struct cb_ops	*cb;
1208 			void		*propval;
1209 			int length;
1210 			/*
1211 			 * This ioctl is provided only for the ddivs pm test.
1212 			 * We only do it to a driver which explicitly allows
1213 			 * us to do so by exporting a pm-reparse-ok property.
1214 			 * We only care whether the property exists or not.
1215 			 */
1216 			if ((drv = ddi_get_driver(dip)) == NULL) {
1217 				ret = EINVAL;
1218 				break;
1219 			}
1220 			if ((cb = drv->devo_cb_ops) != NULL) {
1221 				if ((*cb->cb_prop_op)(DDI_DEV_T_ANY, dip,
1222 				    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1223 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1224 				    "pm-reparse-ok", (caddr_t)&propval,
1225 				    &length) != DDI_SUCCESS) {
1226 					ret = EINVAL;
1227 					break;
1228 				}
1229 			} else if (ddi_prop_op(DDI_DEV_T_ANY, dip,
1230 			    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1231 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1232 			    "pm-reparse-ok", (caddr_t)&propval,
1233 			    &length) != DDI_SUCCESS) {
1234 				ret = EINVAL;
1235 				break;
1236 			}
1237 			kmem_free(propval, length);
1238 			ret =  e_new_pm_props(dip);
1239 			break;
1240 		}
1241 
1242 		case PM_GET_DEVICE_THRESHOLD:
1243 		{
1244 			PM_LOCK_DIP(dip);
1245 			if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
1246 				PM_UNLOCK_DIP(dip);
1247 				PMD(PMD_ERROR, ("ioctl: %s: ENODEV\n",
1248 				    cmdstr))
1249 				ret = ENODEV;
1250 				break;
1251 			}
1252 			*rval_p = DEVI(dip)->devi_pm_dev_thresh;
1253 			PM_UNLOCK_DIP(dip);
1254 			ret = 0;
1255 			break;
1256 		}
1257 
1258 		case PM_DIRECT_PM:
1259 		{
1260 			int has_dep;
1261 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1262 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1263 				    "ENODEV\n", cmdstr))
1264 				ret = ENODEV;
1265 				break;
1266 			}
1267 			/*
1268 			 * Check to see if we are there is a dependency on
1269 			 * this kept device, if so, return EBUSY.
1270 			 */
1271 			(void) ddi_pathname(dip, pathbuf);
1272 			pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
1273 			    NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
1274 			if (has_dep) {
1275 				PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
1276 				    cmdstr))
1277 				ret = EBUSY;
1278 				break;
1279 			}
1280 			PM_LOCK_DIP(dip);
1281 			if (PM_ISDIRECT(dip) || (info->pmi_clone != 0)) {
1282 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1283 				    "%s@%s(%s#%d): EBUSY\n", cmdstr,
1284 				    PM_DEVICE(dip)))
1285 				PM_UNLOCK_DIP(dip);
1286 				ret = EBUSY;
1287 				break;
1288 			}
1289 			info->pmi_dev_pm_state |= PM_DIRECT;
1290 			info->pmi_clone = clone;
1291 			PM_UNLOCK_DIP(dip);
1292 			PMD(PMD_DPM, ("ioctl: %s: info %p, pmi_clone %d\n",
1293 			    cmdstr, (void *)info, clone))
1294 			mutex_enter(&pm_clone_lock);
1295 			pm_register_watcher(clone, dip);
1296 			mutex_exit(&pm_clone_lock);
1297 			ret = 0;
1298 			break;
1299 		}
1300 
1301 		case PM_RELEASE_DIRECT_PM:
1302 		{
1303 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1304 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1305 				    "ENODEV\n", cmdstr))
1306 				ret = ENODEV;
1307 				break;
1308 			}
1309 			PM_LOCK_DIP(dip);
1310 			if (info->pmi_clone != clone) {
1311 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1312 				    "%s@%s(%s#%d) EINVAL\n", cmdstr,
1313 				    PM_DEVICE(dip)))
1314 				ret = EINVAL;
1315 				PM_UNLOCK_DIP(dip);
1316 				break;
1317 			}
1318 			ASSERT(PM_ISDIRECT(dip));
1319 			info->pmi_dev_pm_state &= ~PM_DIRECT;
1320 			PM_UNLOCK_DIP(dip);
1321 			/* Bring ourselves up if there is a keeper. */
1322 			(void) ddi_pathname(dip, pathbuf);
1323 			pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
1324 			    NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
1325 			pm_discard_entries(clone);
1326 			pm_deregister_watcher(clone, dip);
1327 			/*
1328 			 * Now we could let the other threads that are
1329 			 * trying to do a DIRECT_PM thru
1330 			 */
1331 			PM_LOCK_DIP(dip);
1332 			info->pmi_clone = 0;
1333 			PM_UNLOCK_DIP(dip);
1334 			pm_proceed(dip, PMP_RELEASE, -1, -1);
1335 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1336 			    cmdstr))
1337 			pm_rescan(dip);
1338 			ret = 0;
1339 			break;
1340 		}
1341 
1342 		case PM_SET_CURRENT_POWER:
1343 		{
1344 			int comp = req.component;
1345 			int  value = req.value;
1346 			PMD(PMD_DPM, ("ioctl: %s: %s component %d to value "
1347 			    "%d\n", cmdstr, req.physpath, comp, value))
1348 			if (!e_pm_valid_comp(dip, comp, NULL) ||
1349 			    !e_pm_valid_power(dip, comp, value)) {
1350 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1351 				    "physpath=%s, comp=%d, level=%d, fails\n",
1352 				    cmdstr, req.physpath, comp, value))
1353 				ret = EINVAL;
1354 				break;
1355 			}
1356 
1357 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1358 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1359 				    "ENODEV\n", cmdstr))
1360 				ret = ENODEV;
1361 				break;
1362 			}
1363 			if (info->pmi_clone != clone) {
1364 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1365 				    "(not owner) %s fails; clone %d, owner %d"
1366 				    "\n", cmdstr, req.physpath, clone,
1367 				    info->pmi_clone))
1368 				ret = EINVAL;
1369 				break;
1370 			}
1371 			ASSERT(PM_ISDIRECT(dip));
1372 
1373 			if (pm_set_power(dip, comp, value, PM_LEVEL_EXACT,
1374 			    PM_CANBLOCK_BLOCK, 0, &ret) != DDI_SUCCESS) {
1375 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1376 				    "pm_set_power for %s fails, errno=%d\n",
1377 				    cmdstr, req.physpath, ret))
1378 				break;
1379 			}
1380 
1381 			pm_proceed(dip, PMP_SETPOWER, comp, value);
1382 
1383 			/*
1384 			 * Power down all idle components if console framebuffer
1385 			 * is powered off.
1386 			 */
1387 			if (PM_IS_CFB(dip) && (pm_system_idle_threshold ==
1388 			    pm_default_idle_threshold)) {
1389 				dev_info_t	*root = ddi_root_node();
1390 				if (PM_ISBC(dip)) {
1391 					if (comp == 0 && value == 0 &&
1392 					    (pm_timeout_idledown() != 0)) {
1393 						ddi_walk_devs(root,
1394 						    pm_start_idledown,
1395 						    (void *)PMID_CFB);
1396 					}
1397 				} else {
1398 					int count = 0;
1399 					for (i = 0; i < PM_NUMCMPTS(dip); i++) {
1400 						ret = pm_get_current_power(dip,
1401 						    i, &curpower);
1402 						if (ret == DDI_SUCCESS &&
1403 						    curpower == 0)
1404 							count++;
1405 					}
1406 					if ((count == PM_NUMCMPTS(dip)) &&
1407 					    (pm_timeout_idledown() != 0)) {
1408 						ddi_walk_devs(root,
1409 						    pm_start_idledown,
1410 						    (void *)PMID_CFB);
1411 					}
1412 				}
1413 			}
1414 
1415 			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1416 			    cmdstr))
1417 			pm_rescan(dip);
1418 			*rval_p = 0;
1419 			ret = 0;
1420 			break;
1421 		}
1422 
1423 		case PM_GET_FULL_POWER:
1424 		{
1425 			int normal;
1426 			ASSERT(dip);
1427 			PMD(PMD_NORM, ("ioctl: %s: %s component %d\n",
1428 			    cmdstr, req.physpath, req.component))
1429 			normal =  pm_get_normal_power(dip, req.component);
1430 
1431 			if (normal == DDI_FAILURE) {
1432 				PMD(PMD_ERROR | PMD_NORM, ("ioctl: %s: "
1433 				    "returns EINVAL\n", cmdstr))
1434 				ret = EINVAL;
1435 				break;
1436 			}
1437 			*rval_p = normal;
1438 			PMD(PMD_NORM, ("ioctl: %s: returns %d\n",
1439 			    cmdstr, normal))
1440 			ret = 0;
1441 			break;
1442 		}
1443 
1444 		case PM_GET_CURRENT_POWER:
1445 		{
1446 			if (pm_get_current_power(dip, req.component,
1447 			    rval_p) != DDI_SUCCESS) {
1448 				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
1449 				    "EINVAL\n", cmdstr))
1450 				ret = EINVAL;
1451 				break;
1452 			}
1453 			PMD(PMD_DPM, ("ioctl: %s: %s comp %d returns %d\n",
1454 			    cmdstr, req.physpath, req.component, *rval_p))
1455 			if (*rval_p == PM_LEVEL_UNKNOWN)
1456 				ret = EAGAIN;
1457 			else
1458 				ret = 0;
1459 			break;
1460 		}
1461 
1462 		case PM_GET_TIME_IDLE:
1463 		{
1464 			time_t timestamp;
1465 			int comp = req.component;
1466 			pm_component_t *cp;
1467 			if (!e_pm_valid_comp(dip, comp, &cp)) {
1468 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1469 				    "component %d > numcmpts - 1 %d--EINVAL\n",
1470 				    cmdstr, PM_DEVICE(dip), comp,
1471 				    PM_NUMCMPTS(dip) - 1))
1472 				ret = EINVAL;
1473 				break;
1474 			}
1475 			timestamp = cp->pmc_timestamp;
1476 			if (timestamp) {
1477 				time_t now;
1478 				(void) drv_getparm(TIME, &now);
1479 				*rval_p = (now - timestamp);
1480 			} else {
1481 				*rval_p = 0;
1482 			}
1483 			ret = 0;
1484 			break;
1485 		}
1486 
1487 		case PM_ADD_DEPENDENT:
1488 		{
1489 			dev_info_t	*kept_dip;
1490 
1491 			PMD(PMD_KEEPS, ("%s, kept %s, keeper %s\n", cmdstr,
1492 			    dep, req.physpath))
1493 
1494 			/*
1495 			 * hold and install kept while processing dependency
1496 			 * keeper (in .physpath) has already been held.
1497 			 */
1498 			if (dep[0] == '\0') {
1499 				PMD(PMD_ERROR, ("kept NULL or null\n"))
1500 				ret = EINVAL;
1501 				break;
1502 			} else if ((kept_dip =
1503 			    pm_name_to_dip(dep, 1)) == NULL) {
1504 				PMD(PMD_ERROR, ("no dip for kept %s\n", dep))
1505 				ret = ENODEV;
1506 				break;
1507 			} else if (kept_dip == dip) {
1508 				PMD(PMD_ERROR, ("keeper(%s, %p) - kept(%s, %p) "
1509 				    "self-dependency not allowed.\n",
1510 				    dep, (void *)kept_dip, req.physpath,
1511 				    (void *) dip))
1512 				PM_RELE(dip);	/* release "double" hold */
1513 				ret = EINVAL;
1514 				break;
1515 			}
1516 			ASSERT(!(strcmp(req.physpath, (char *)dep) == 0));
1517 
1518 			/*
1519 			 * record dependency, then walk through device tree
1520 			 * independently on behalf of kept and keeper to
1521 			 * establish newly created dependency.
1522 			 */
1523 			pm_dispatch_to_dep_thread(PM_DEP_WK_RECORD_KEEPER,
1524 			    req.physpath, dep, PM_DEP_WAIT, NULL, 0);
1525 
1526 			/*
1527 			 * release kept after establishing dependency, keeper
1528 			 * is released as part of ioctl exit processing.
1529 			 */
1530 			PM_RELE(kept_dip);
1531 			*rval_p = 0;
1532 			ret = 0;
1533 			break;
1534 		}
1535 
1536 		case PM_ADD_DEPENDENT_PROPERTY:
1537 		{
1538 			char *keeper, *kept;
1539 
1540 			if (dep[0] == '\0') {
1541 				PMD(PMD_ERROR, ("ioctl: %s: dep NULL or "
1542 				    "null\n", cmdstr))
1543 				ret = EINVAL;
1544 				break;
1545 			}
1546 			kept = dep;
1547 			keeper = req.physpath;
1548 			/*
1549 			 * record keeper - kept dependency, then walk through
1550 			 * device tree to find out all attached keeper, walk
1551 			 * through again to apply dependency to all the
1552 			 * potential kept.
1553 			 */
1554 			pm_dispatch_to_dep_thread(
1555 			    PM_DEP_WK_RECORD_KEEPER_PROP, keeper, kept,
1556 			    PM_DEP_WAIT, NULL, 0);
1557 
1558 			*rval_p = 0;
1559 			ret = 0;
1560 			break;
1561 		}
1562 
1563 		case PM_SET_DEVICE_THRESHOLD:
1564 		{
1565 			pm_thresh_rec_t *rp;
1566 			pm_pte_t *ep;	/* threshold header storage */
1567 			int *tp;	/* threshold storage */
1568 			size_t size;
1569 			extern int pm_thresh_specd(dev_info_t *);
1570 
1571 			/*
1572 			 * The header struct plus one entry struct plus one
1573 			 * threshold plus the length of the string
1574 			 */
1575 			size = sizeof (pm_thresh_rec_t) +
1576 			    (sizeof (pm_pte_t) * 1) +
1577 			    (1 * sizeof (int)) +
1578 			    strlen(req.physpath) + 1;
1579 
1580 			rp = kmem_zalloc(size, KM_SLEEP);
1581 			rp->ptr_size = size;
1582 			rp->ptr_numcomps = 0;	/* means device threshold */
1583 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1584 			rp->ptr_entries = ep;
1585 			tp = (int *)((intptr_t)ep +
1586 			    (1 * sizeof (pm_pte_t)));
1587 			ep->pte_numthresh = 1;
1588 			ep->pte_thresh = tp;
1589 			*tp++ = req.value;
1590 			(void) strcat((char *)tp, req.physpath);
1591 			rp->ptr_physpath = (char *)tp;
1592 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1593 			    (intptr_t)rp + rp->ptr_size);
1594 			PMD(PMD_THRESH, ("ioctl: %s: record thresh %d for "
1595 			    "%s\n", cmdstr, req.value, req.physpath))
1596 			pm_record_thresh(rp);
1597 			/*
1598 			 * Don't free rp, pm_record_thresh() keeps it.
1599 			 * We don't try to apply it ourselves because we'd need
1600 			 * to know too much about locking.  Since we don't
1601 			 * hold a lock the entry could be removed before
1602 			 * we get here
1603 			 */
1604 			ASSERT(dip == NULL);
1605 			ret = 0;		/* can't fail now */
1606 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1607 				break;
1608 			}
1609 			(void) pm_thresh_specd(dip);
1610 			PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d)\n",
1611 			    cmdstr, PM_DEVICE(dip)))
1612 			PM_RELE(dip);
1613 			break;
1614 		}
1615 
1616 		case PM_RESET_DEVICE_THRESHOLD:
1617 		{
1618 			/*
1619 			 * This only applies to a currently attached and power
1620 			 * managed node
1621 			 */
1622 			/*
1623 			 * We don't do this to old-style drivers
1624 			 */
1625 			info = PM_GET_PM_INFO(dip);
1626 			if (info == NULL) {
1627 				PMD(PMD_ERROR, ("ioctl: %s: %s not power "
1628 				    "managed\n", cmdstr, req.physpath))
1629 				ret = EINVAL;
1630 				break;
1631 			}
1632 			if (PM_ISBC(dip)) {
1633 				PMD(PMD_ERROR, ("ioctl: %s: %s is BC\n",
1634 				    cmdstr, req.physpath))
1635 				ret = EINVAL;
1636 				break;
1637 			}
1638 			pm_unrecord_threshold(req.physpath);
1639 			if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
1640 				pm_set_device_threshold(dip,
1641 				    pm_cpu_idle_threshold, PMC_CPU_THRESH);
1642 			else
1643 				pm_set_device_threshold(dip,
1644 				    pm_system_idle_threshold, PMC_DEF_THRESH);
1645 			ret = 0;
1646 			break;
1647 		}
1648 
1649 		case PM_GET_NUM_COMPONENTS:
1650 		{
1651 			ret = 0;
1652 			*rval_p = PM_NUMCMPTS(dip);
1653 			break;
1654 		}
1655 
1656 		case PM_GET_DEVICE_TYPE:
1657 		{
1658 			ret = 0;
1659 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1660 				PMD(PMD_ERROR, ("ioctl: %s: "
1661 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
1662 				*rval_p = PM_NO_PM_COMPONENTS;
1663 				break;
1664 			}
1665 			if (PM_ISBC(dip)) {
1666 				*rval_p = PM_CREATE_COMPONENTS;
1667 			} else {
1668 				*rval_p = PM_AUTOPM;
1669 			}
1670 			break;
1671 		}
1672 
1673 		case PM_SET_COMPONENT_THRESHOLDS:
1674 		{
1675 			int comps = 0;
1676 			int *end = (int *)req.data + icount;
1677 			pm_thresh_rec_t *rp;
1678 			pm_pte_t *ep;	/* threshold header storage */
1679 			int *tp;	/* threshold storage */
1680 			int *ip;
1681 			int j;
1682 			size_t size;
1683 			extern int pm_thresh_specd(dev_info_t *);
1684 			extern int pm_valid_thresh(dev_info_t *,
1685 			    pm_thresh_rec_t *);
1686 
1687 			for (ip = req.data; *ip; ip++) {
1688 				if (ip >= end) {
1689 					ret = EFAULT;
1690 					break;
1691 				}
1692 				comps++;
1693 				/* skip over indicated number of entries */
1694 				for (j = *ip; j; j--) {
1695 					if (++ip >= end) {
1696 						ret = EFAULT;
1697 						break;
1698 					}
1699 				}
1700 				if (ret)
1701 					break;
1702 			}
1703 			if (ret)
1704 				break;
1705 			if ((intptr_t)ip != (intptr_t)end - sizeof (int)) {
1706 				/* did not exactly fill buffer */
1707 				ret = EINVAL;
1708 				break;
1709 			}
1710 			if (comps == 0) {
1711 				PMD(PMD_ERROR, ("ioctl: %s: %s 0 components"
1712 				    "--EINVAL\n", cmdstr, req.physpath))
1713 				ret = EINVAL;
1714 				break;
1715 			}
1716 			/*
1717 			 * The header struct plus one entry struct per component
1718 			 * plus the size of the lists minus the counts
1719 			 * plus the length of the string
1720 			 */
1721 			size = sizeof (pm_thresh_rec_t) +
1722 			    (sizeof (pm_pte_t) * comps) + req.datasize -
1723 			    ((comps + 1) * sizeof (int)) +
1724 			    strlen(req.physpath) + 1;
1725 
1726 			rp = kmem_zalloc(size, KM_SLEEP);
1727 			rp->ptr_size = size;
1728 			rp->ptr_numcomps = comps;
1729 			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1730 			rp->ptr_entries = ep;
1731 			tp = (int *)((intptr_t)ep +
1732 			    (comps * sizeof (pm_pte_t)));
1733 			for (ip = req.data; *ip; ep++) {
1734 				ep->pte_numthresh = *ip;
1735 				ep->pte_thresh = tp;
1736 				for (j = *ip++; j; j--) {
1737 					*tp++ = *ip++;
1738 				}
1739 			}
1740 			(void) strcat((char *)tp, req.physpath);
1741 			rp->ptr_physpath = (char *)tp;
1742 			ASSERT((intptr_t)end == (intptr_t)ip + sizeof (int));
1743 			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1744 			    (intptr_t)rp + rp->ptr_size);
1745 
1746 			ASSERT(dip == NULL);
1747 			/*
1748 			 * If this is not a currently power managed node,
1749 			 * then we can't check for validity of the thresholds
1750 			 */
1751 			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1752 				/* don't free rp, pm_record_thresh uses it */
1753 				pm_record_thresh(rp);
1754 				PMD(PMD_ERROR, ("ioctl: %s: pm_name_to_dip "
1755 				    "for %s failed\n", cmdstr, req.physpath))
1756 				ret = 0;
1757 				break;
1758 			}
1759 			ASSERT(!dipheld);
1760 			dipheld++;
1761 
1762 			if (!pm_valid_thresh(dip, rp)) {
1763 				PMD(PMD_ERROR, ("ioctl: %s: invalid thresh "
1764 				    "for %s@%s(%s#%d)\n", cmdstr,
1765 				    PM_DEVICE(dip)))
1766 				kmem_free(rp, size);
1767 				ret = EINVAL;
1768 				break;
1769 			}
1770 			/*
1771 			 * We don't just apply it ourselves because we'd need
1772 			 * to know too much about locking.  Since we don't
1773 			 * hold a lock the entry could be removed before
1774 			 * we get here
1775 			 */
1776 			pm_record_thresh(rp);
1777 			(void) pm_thresh_specd(dip);
1778 			ret = 0;
1779 			break;
1780 		}
1781 
1782 		case PM_GET_COMPONENT_THRESHOLDS:
1783 		{
1784 			int musthave;
1785 			int numthresholds = 0;
1786 			int wordsize;
1787 			int numcomps;
1788 			caddr_t uaddr = req.data;	/* user address */
1789 			int val;	/* int value to be copied out */
1790 			int32_t val32;	/* int32 value to be copied out */
1791 			caddr_t vaddr;	/* address to copyout from */
1792 			int j;
1793 
1794 #ifdef	_MULTI_DATAMODEL
1795 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1796 				wordsize = sizeof (int32_t);
1797 			} else
1798 #endif /* _MULTI_DATAMODEL */
1799 			{
1800 				wordsize = sizeof (int);
1801 			}
1802 
1803 			ASSERT(dip);
1804 
1805 			numcomps = PM_NUMCMPTS(dip);
1806 			for (i = 0; i < numcomps; i++) {
1807 				cp = PM_CP(dip, i);
1808 				numthresholds += cp->pmc_comp.pmc_numlevels - 1;
1809 			}
1810 			musthave = (numthresholds + numcomps + 1) *  wordsize;
1811 			if (req.datasize < musthave) {
1812 				PMD(PMD_ERROR, ("ioctl: %s: size %ld, need "
1813 				    "%d--EINVAL\n", cmdstr, req.datasize,
1814 				    musthave))
1815 				ret = EINVAL;
1816 				break;
1817 			}
1818 			PM_LOCK_DIP(dip);
1819 			for (i = 0; i < numcomps; i++) {
1820 				int *thp;
1821 				cp = PM_CP(dip, i);
1822 				thp = cp->pmc_comp.pmc_thresh;
1823 				/* first copyout the count */
1824 				if (wordsize == sizeof (int32_t)) {
1825 					val32 = cp->pmc_comp.pmc_numlevels - 1;
1826 					vaddr = (caddr_t)&val32;
1827 				} else {
1828 					val = cp->pmc_comp.pmc_numlevels - 1;
1829 					vaddr = (caddr_t)&val;
1830 				}
1831 				if (ddi_copyout(vaddr, (void *)uaddr,
1832 				    wordsize, mode) != 0) {
1833 					PM_UNLOCK_DIP(dip);
1834 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1835 					    "(%s#%d) vaddr %p EFAULT\n",
1836 					    cmdstr, PM_DEVICE(dip),
1837 					    (void*)vaddr))
1838 					ret = EFAULT;
1839 					break;
1840 				}
1841 				vaddr = uaddr;
1842 				vaddr += wordsize;
1843 				uaddr = (caddr_t)vaddr;
1844 				/* then copyout each threshold value */
1845 				for (j = 0; j < cp->pmc_comp.pmc_numlevels - 1;
1846 				    j++) {
1847 					if (wordsize == sizeof (int32_t)) {
1848 						val32 = thp[j + 1];
1849 						vaddr = (caddr_t)&val32;
1850 					} else {
1851 						val = thp[i + 1];
1852 						vaddr = (caddr_t)&val;
1853 					}
1854 					if (ddi_copyout(vaddr, (void *) uaddr,
1855 					    wordsize, mode) != 0) {
1856 						PM_UNLOCK_DIP(dip);
1857 						PMD(PMD_ERROR, ("ioctl: %s: "
1858 						    "%s@%s(%s#%d) uaddr %p "
1859 						    "EFAULT\n", cmdstr,
1860 						    PM_DEVICE(dip),
1861 						    (void *)uaddr))
1862 						ret = EFAULT;
1863 						break;
1864 					}
1865 					vaddr = uaddr;
1866 					vaddr += wordsize;
1867 					uaddr = (caddr_t)vaddr;
1868 				}
1869 			}
1870 			if (ret)
1871 				break;
1872 			/* last copyout a terminating 0 count */
1873 			if (wordsize == sizeof (int32_t)) {
1874 				val32 = 0;
1875 				vaddr = (caddr_t)&val32;
1876 			} else {
1877 				ASSERT(wordsize == sizeof (int));
1878 				val = 0;
1879 				vaddr = (caddr_t)&val;
1880 			}
1881 			if (ddi_copyout(vaddr, uaddr, wordsize, mode) != 0) {
1882 				PM_UNLOCK_DIP(dip);
1883 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1884 				    "vaddr %p (0 count) EFAULT\n", cmdstr,
1885 				    PM_DEVICE(dip), (void *)vaddr))
1886 				ret = EFAULT;
1887 				break;
1888 			}
1889 			/* finished, so don't need to increment addresses */
1890 			PM_UNLOCK_DIP(dip);
1891 			ret = 0;
1892 			break;
1893 		}
1894 
1895 		case PM_GET_STATS:
1896 		{
1897 			time_t now;
1898 			time_t *timestamp;
1899 			extern int pm_cur_power(pm_component_t *cp);
1900 			int musthave;
1901 			int wordsize;
1902 
1903 #ifdef	_MULTI_DATAMODEL
1904 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1905 				wordsize = sizeof (int32_t);
1906 			} else
1907 #endif /* _MULTI_DATAMODEL */
1908 			{
1909 				wordsize = sizeof (int);
1910 			}
1911 
1912 			comps = PM_NUMCMPTS(dip);
1913 			if (comps == 0 || PM_GET_PM_INFO(dip) == NULL) {
1914 				PMD(PMD_ERROR, ("ioctl: %s: %s no components"
1915 				    " or not power managed--EINVAL\n", cmdstr,
1916 				    req.physpath))
1917 				ret = EINVAL;
1918 				break;
1919 			}
1920 			musthave = comps * 2 * wordsize;
1921 			if (req.datasize < musthave) {
1922 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
1923 				    "%d--EINVAL\n", cmdstr, req.datasize,
1924 				    musthave))
1925 				ret = EINVAL;
1926 				break;
1927 			}
1928 
1929 			PM_LOCK_DIP(dip);
1930 			(void) drv_getparm(TIME, &now);
1931 			timestamp = kmem_zalloc(comps * sizeof (time_t),
1932 			    KM_SLEEP);
1933 			pm_get_timestamps(dip, timestamp);
1934 			/*
1935 			 * First the current power levels
1936 			 */
1937 			for (i = 0; i < comps; i++) {
1938 				int curpwr;
1939 				int32_t curpwr32;
1940 				caddr_t cpaddr;
1941 
1942 				cp = PM_CP(dip, i);
1943 				if (wordsize == sizeof (int)) {
1944 					curpwr = pm_cur_power(cp);
1945 					cpaddr = (caddr_t)&curpwr;
1946 				} else {
1947 					ASSERT(wordsize == sizeof (int32_t));
1948 					curpwr32 = pm_cur_power(cp);
1949 					cpaddr = (caddr_t)&curpwr32;
1950 				}
1951 				if (ddi_copyout(cpaddr, (void *) req.data,
1952 				    wordsize, mode) != 0) {
1953 					PM_UNLOCK_DIP(dip);
1954 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1955 					    "(%s#%d) req.data %p EFAULT\n",
1956 					    cmdstr, PM_DEVICE(dip),
1957 					    (void *)req.data))
1958 					ASSERT(!dipheld);
1959 					return (EFAULT);
1960 				}
1961 				cpaddr = (caddr_t)req.data;
1962 				cpaddr += wordsize;
1963 				req.data = cpaddr;
1964 			}
1965 			/*
1966 			 * Then the times remaining
1967 			 */
1968 			for (i = 0; i < comps; i++) {
1969 				int retval;
1970 				int32_t retval32;
1971 				caddr_t rvaddr;
1972 				int curpwr;
1973 
1974 				cp = PM_CP(dip, i);
1975 				curpwr = cp->pmc_cur_pwr;
1976 				if (curpwr == 0 || timestamp[i] == 0) {
1977 					PMD(PMD_STATS, ("ioctl: %s: "
1978 					    "cur_pwer %x, timestamp %lx\n",
1979 					    cmdstr, curpwr, timestamp[i]))
1980 					retval = INT_MAX;
1981 				} else {
1982 					int thresh;
1983 					(void) pm_current_threshold(dip, i,
1984 					    &thresh);
1985 					retval = thresh - (now - timestamp[i]);
1986 					PMD(PMD_STATS, ("ioctl: %s: current "
1987 					    "thresh %x, now %lx, timestamp %lx,"
1988 					    " retval %x\n", cmdstr, thresh, now,
1989 					    timestamp[i], retval))
1990 				}
1991 				if (wordsize == sizeof (int)) {
1992 					rvaddr = (caddr_t)&retval;
1993 				} else {
1994 					ASSERT(wordsize == sizeof (int32_t));
1995 					retval32 = retval;
1996 					rvaddr = (caddr_t)&retval32;
1997 				}
1998 				if (ddi_copyout(rvaddr, (void *) req.data,
1999 				    wordsize, mode) != 0) {
2000 					PM_UNLOCK_DIP(dip);
2001 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2002 					    "(%s#%d) req.data %p EFAULT\n",
2003 					    cmdstr, PM_DEVICE(dip),
2004 					    (void *)req.data))
2005 					ASSERT(!dipheld);
2006 					kmem_free(timestamp,
2007 					    comps * sizeof (time_t));
2008 					return (EFAULT);
2009 				}
2010 				rvaddr = (caddr_t)req.data;
2011 				rvaddr += wordsize;
2012 				req.data = (int *)rvaddr;
2013 			}
2014 			PM_UNLOCK_DIP(dip);
2015 			*rval_p = comps;
2016 			ret = 0;
2017 			kmem_free(timestamp, comps * sizeof (time_t));
2018 			break;
2019 		}
2020 
2021 		case PM_GET_CMD_NAME:
2022 		{
2023 			PMD(PMD_IOCTL, ("%s: %s\n", cmdstr,
2024 			    pm_decode_cmd(req.value)))
2025 			if (ret = copyoutstr(pm_decode_cmd(req.value),
2026 			    (char *)req.data, req.datasize, &lencopied)) {
2027 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2028 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2029 				    PM_DEVICE(dip), (void *)req.data))
2030 				break;
2031 			}
2032 			*rval_p = lencopied;
2033 			ret = 0;
2034 			break;
2035 		}
2036 
2037 		case PM_GET_COMPONENT_NAME:
2038 		{
2039 			ASSERT(dip);
2040 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2041 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2042 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2043 				    cmdstr, PM_DEVICE(dip), req.component,
2044 				    PM_NUMCMPTS(dip) - 1))
2045 				ret = EINVAL;
2046 				break;
2047 			}
2048 			if (ret = copyoutstr(cp->pmc_comp.pmc_name,
2049 			    (char *)req.data, req.datasize, &lencopied)) {
2050 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2051 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2052 				    PM_DEVICE(dip), (void *)req.data))
2053 				break;
2054 			}
2055 			*rval_p = lencopied;
2056 			ret = 0;
2057 			break;
2058 		}
2059 
2060 		case PM_GET_POWER_NAME:
2061 		{
2062 			int i;
2063 
2064 			ASSERT(dip);
2065 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2066 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2067 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2068 				    cmdstr, PM_DEVICE(dip), req.component,
2069 				    PM_NUMCMPTS(dip) - 1))
2070 				ret = EINVAL;
2071 				break;
2072 			}
2073 			if ((i = req.value) < 0 ||
2074 			    i > cp->pmc_comp.pmc_numlevels - 1) {
2075 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2076 				    "value %d > num_levels - 1 %d--EINVAL\n",
2077 				    cmdstr, PM_DEVICE(dip), req.value,
2078 				    cp->pmc_comp.pmc_numlevels - 1))
2079 				ret = EINVAL;
2080 				break;
2081 			}
2082 			dep = cp->pmc_comp.pmc_lnames[req.value];
2083 			if (ret = copyoutstr(dep,
2084 			    req.data, req.datasize, &lencopied)) {
2085 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2086 				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2087 				    PM_DEVICE(dip), (void *)req.data))
2088 				break;
2089 			}
2090 			*rval_p = lencopied;
2091 			ret = 0;
2092 			break;
2093 		}
2094 
2095 		case PM_GET_POWER_LEVELS:
2096 		{
2097 			int musthave;
2098 			int numlevels;
2099 			int wordsize;
2100 
2101 #ifdef	_MULTI_DATAMODEL
2102 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2103 				wordsize = sizeof (int32_t);
2104 			} else
2105 #endif /* _MULTI_DATAMODEL */
2106 			{
2107 				wordsize = sizeof (int);
2108 			}
2109 			ASSERT(dip);
2110 
2111 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2112 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2113 				    "has %d components, component %d requested"
2114 				    "--EINVAL\n", cmdstr, PM_DEVICE(dip),
2115 				    PM_NUMCMPTS(dip), req.component))
2116 				ret = EINVAL;
2117 				break;
2118 			}
2119 			numlevels = cp->pmc_comp.pmc_numlevels;
2120 			musthave = numlevels *  wordsize;
2121 			if (req.datasize < musthave) {
2122 				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
2123 				    "%d--EINVAL\n", cmdstr, req.datasize,
2124 				    musthave))
2125 				ret = EINVAL;
2126 				break;
2127 			}
2128 			PM_LOCK_DIP(dip);
2129 			for (i = 0; i < numlevels; i++) {
2130 				int level;
2131 				int32_t level32;
2132 				caddr_t laddr;
2133 
2134 				if (wordsize == sizeof (int)) {
2135 					level = cp->pmc_comp.pmc_lvals[i];
2136 					laddr = (caddr_t)&level;
2137 				} else {
2138 					level32 = cp->pmc_comp.pmc_lvals[i];
2139 					laddr = (caddr_t)&level32;
2140 				}
2141 				if (ddi_copyout(laddr, (void *) req.data,
2142 				    wordsize, mode) != 0) {
2143 					PM_UNLOCK_DIP(dip);
2144 					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2145 					    "(%s#%d) laddr %p EFAULT\n",
2146 					    cmdstr, PM_DEVICE(dip),
2147 					    (void *)laddr))
2148 					ASSERT(!dipheld);
2149 					return (EFAULT);
2150 				}
2151 				laddr = (caddr_t)req.data;
2152 				laddr += wordsize;
2153 				req.data = (int *)laddr;
2154 			}
2155 			PM_UNLOCK_DIP(dip);
2156 			*rval_p = numlevels;
2157 			ret = 0;
2158 			break;
2159 		}
2160 
2161 
2162 		case PM_GET_NUM_POWER_LEVELS:
2163 		{
2164 			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2165 				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2166 				    "component %d > numcmpts - 1 %d--EINVAL\n",
2167 				    cmdstr, PM_DEVICE(dip), req.component,
2168 				    PM_NUMCMPTS(dip) - 1))
2169 				ret = EINVAL;
2170 				break;
2171 			}
2172 			*rval_p = cp->pmc_comp.pmc_numlevels;
2173 			ret = 0;
2174 			break;
2175 		}
2176 
2177 		case PM_GET_DEVICE_THRESHOLD_BASIS:
2178 		{
2179 			ret = 0;
2180 			PM_LOCK_DIP(dip);
2181 			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
2182 				PM_UNLOCK_DIP(dip);
2183 				PMD(PMD_ERROR, ("ioctl: %s: "
2184 				    "PM_NO_PM_COMPONENTS\n", cmdstr))
2185 				*rval_p = PM_NO_PM_COMPONENTS;
2186 				break;
2187 			}
2188 			if (PM_ISDIRECT(dip)) {
2189 				PM_UNLOCK_DIP(dip);
2190 				*rval_p = PM_DIRECTLY_MANAGED;
2191 				break;
2192 			}
2193 			switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
2194 			case PMC_DEF_THRESH:
2195 			case PMC_NEXDEF_THRESH:
2196 				*rval_p = PM_DEFAULT_THRESHOLD;
2197 				break;
2198 			case PMC_DEV_THRESH:
2199 				*rval_p = PM_DEVICE_THRESHOLD;
2200 				break;
2201 			case PMC_COMP_THRESH:
2202 				*rval_p = PM_COMPONENT_THRESHOLD;
2203 				break;
2204 			case PMC_CPU_THRESH:
2205 				*rval_p = PM_CPU_THRESHOLD;
2206 				break;
2207 			default:
2208 				if (PM_ISBC(dip)) {
2209 					*rval_p = PM_OLD_THRESHOLD;
2210 					break;
2211 				}
2212 				PMD(PMD_ERROR, ("ioctl: %s: default, not "
2213 				    "BC--EINVAL", cmdstr))
2214 				ret = EINVAL;
2215 				break;
2216 			}
2217 			PM_UNLOCK_DIP(dip);
2218 			break;
2219 		}
2220 		default:
2221 			/*
2222 			 * Internal error, invalid ioctl description
2223 			 * force debug entry even if pm_debug not set
2224 			 */
2225 #ifdef	DEBUG
2226 			pm_log("invalid diptype %d for cmd %d (%s)\n",
2227 			    pcip->diptype, cmd, pcip->name);
2228 #endif
2229 			ASSERT(0);
2230 			return (EIO);
2231 		}
2232 		break;
2233 	}
2234 
2235 	case PM_PSC:
2236 	{
2237 		/*
2238 		 * Commands that require pm_state_change_t as arg
2239 		 */
2240 #ifdef	_MULTI_DATAMODEL
2241 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2242 			pscp32 = (pm_state_change32_t *)arg;
2243 			if (ddi_copyin((caddr_t)arg, &psc32,
2244 			    sizeof (psc32), mode) != 0) {
2245 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2246 				    "EFAULT\n\n", cmdstr))
2247 				ASSERT(!dipheld);
2248 				return (EFAULT);
2249 			}
2250 			psc.physpath = (caddr_t)(uintptr_t)psc32.physpath;
2251 			psc.size = psc32.size;
2252 		} else
2253 #endif /* _MULTI_DATAMODEL */
2254 		{
2255 			pscp = (pm_state_change_t *)arg;
2256 			if (ddi_copyin((caddr_t)arg, &psc,
2257 			    sizeof (psc), mode) != 0) {
2258 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2259 				    "EFAULT\n\n", cmdstr))
2260 				ASSERT(!dipheld);
2261 				return (EFAULT);
2262 			}
2263 		}
2264 		switch (cmd) {
2265 
2266 		case PM_GET_STATE_CHANGE:
2267 		case PM_GET_STATE_CHANGE_WAIT:
2268 		{
2269 			psce_t			*pscep;
2270 			pm_state_change_t	*p;
2271 			caddr_t			physpath;
2272 			size_t			physlen;
2273 
2274 			/*
2275 			 * We want to know if any device has changed state.
2276 			 * We look up by clone.  In case we have another thread
2277 			 * from the same process, we loop.
2278 			 * pm_psc_clone_to_interest() returns a locked entry.
2279 			 * We create an internal copy of the event entry prior
2280 			 * to copyout to user space because we don't want to
2281 			 * hold the psce_lock while doing copyout as we might
2282 			 * hit page fault  which eventually brings us back
2283 			 * here requesting the same lock.
2284 			 */
2285 			mutex_enter(&pm_clone_lock);
2286 			if (!pm_interest_registered(clone))
2287 				pm_register_watcher(clone, NULL);
2288 			while ((pscep =
2289 			    pm_psc_clone_to_interest(clone)) == NULL) {
2290 				if (cmd == PM_GET_STATE_CHANGE) {
2291 					PMD(PMD_IOCTL, ("ioctl: %s: "
2292 					    "EWOULDBLOCK\n", cmdstr))
2293 					mutex_exit(&pm_clone_lock);
2294 					ASSERT(!dipheld);
2295 					return (EWOULDBLOCK);
2296 				} else {
2297 					if (cv_wait_sig(&pm_clones_cv[clone],
2298 					    &pm_clone_lock) == 0) {
2299 						mutex_exit(&pm_clone_lock);
2300 						PMD(PMD_ERROR, ("ioctl: %s "
2301 						    "EINTR\n", cmdstr))
2302 						ASSERT(!dipheld);
2303 						return (EINTR);
2304 					}
2305 				}
2306 			}
2307 			mutex_exit(&pm_clone_lock);
2308 
2309 			physlen = pscep->psce_out->size;
2310 			physpath = NULL;
2311 			/*
2312 			 * If we were unable to store the path while bringing
2313 			 * up the console fb upon entering the prom, we give
2314 			 * a "" name with the overrun event set
2315 			 */
2316 			if (physlen == (size_t)-1) {	/* kmemalloc failed */
2317 				physpath = kmem_zalloc(1, KM_SLEEP);
2318 				physlen = 1;
2319 			}
2320 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2321 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n", cmdstr))
2322 				mutex_exit(&pscep->psce_lock);
2323 				ret = EFAULT;
2324 				break;
2325 			}
2326 			if (physpath == NULL) {
2327 				physpath = kmem_zalloc(physlen, KM_SLEEP);
2328 				bcopy((const void *) pscep->psce_out->physpath,
2329 				    (void *) physpath, physlen);
2330 			}
2331 
2332 			p = pscep->psce_out;
2333 #ifdef	_MULTI_DATAMODEL
2334 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2335 #ifdef DEBUG
2336 				size_t usrcopysize;
2337 #endif
2338 				psc32.flags = (ushort_t)p->flags;
2339 				psc32.event = (ushort_t)p->event;
2340 				psc32.timestamp = (int32_t)p->timestamp;
2341 				psc32.component = (int32_t)p->component;
2342 				psc32.old_level = (int32_t)p->old_level;
2343 				psc32.new_level = (int32_t)p->new_level;
2344 				copysize32 = ((intptr_t)&psc32.size -
2345 				    (intptr_t)&psc32.component);
2346 #ifdef DEBUG
2347 				usrcopysize = ((intptr_t)&pscp32->size -
2348 				    (intptr_t)&pscp32->component);
2349 				ASSERT(usrcopysize == copysize32);
2350 #endif
2351 			} else
2352 #endif /* _MULTI_DATAMODEL */
2353 			{
2354 				psc.flags = p->flags;
2355 				psc.event = p->event;
2356 				psc.timestamp = p->timestamp;
2357 				psc.component = p->component;
2358 				psc.old_level = p->old_level;
2359 				psc.new_level = p->new_level;
2360 				copysize = ((long)&p->size -
2361 				    (long)&p->component);
2362 			}
2363 			if (p->size != (size_t)-1)
2364 				kmem_free(p->physpath, p->size);
2365 			p->size = 0;
2366 			p->physpath = NULL;
2367 			if (pscep->psce_out == pscep->psce_last)
2368 				p = pscep->psce_first;
2369 			else
2370 				p++;
2371 			pscep->psce_out = p;
2372 			mutex_exit(&pscep->psce_lock);
2373 
2374 			ret = copyoutstr(physpath, psc.physpath,
2375 			    physlen, &lencopied);
2376 			kmem_free(physpath, physlen);
2377 			if (ret) {
2378 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2379 				    "failed--EFAULT\n", cmdstr,
2380 				    (void *)psc.physpath))
2381 				break;
2382 			}
2383 
2384 #ifdef	_MULTI_DATAMODEL
2385 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2386 				if (ddi_copyout(&psc32.component,
2387 				    &pscp32->component, copysize32, mode)
2388 				    != 0) {
2389 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2390 					    "failed--EFAULT\n", cmdstr))
2391 					ret = EFAULT;
2392 					break;
2393 				}
2394 			} else
2395 #endif	/* _MULTI_DATAMODEL */
2396 			{
2397 				if (ddi_copyout(&psc.component,
2398 				    &pscp->component, copysize, mode) != 0) {
2399 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2400 					    "failed--EFAULT\n", cmdstr))
2401 					ret = EFAULT;
2402 					break;
2403 				}
2404 			}
2405 			ret = 0;
2406 			break;
2407 		}
2408 
2409 		case PM_DIRECT_NOTIFY:
2410 		case PM_DIRECT_NOTIFY_WAIT:
2411 		{
2412 			psce_t			*pscep;
2413 			pm_state_change_t	*p;
2414 			caddr_t			physpath;
2415 			size_t			physlen;
2416 			/*
2417 			 * We want to know if any direct device of ours has
2418 			 * something we should know about.  We look up by clone.
2419 			 * In case we have another thread from the same process,
2420 			 * we loop.
2421 			 * pm_psc_clone_to_direct() returns a locked entry.
2422 			 */
2423 			mutex_enter(&pm_clone_lock);
2424 			while (pm_poll_cnt[clone] == 0 ||
2425 			    (pscep = pm_psc_clone_to_direct(clone)) == NULL) {
2426 				if (cmd == PM_DIRECT_NOTIFY) {
2427 					PMD(PMD_IOCTL, ("ioctl: %s: "
2428 					    "EWOULDBLOCK\n", cmdstr))
2429 					mutex_exit(&pm_clone_lock);
2430 					ASSERT(!dipheld);
2431 					return (EWOULDBLOCK);
2432 				} else {
2433 					if (cv_wait_sig(&pm_clones_cv[clone],
2434 					    &pm_clone_lock) == 0) {
2435 						mutex_exit(&pm_clone_lock);
2436 						PMD(PMD_ERROR, ("ioctl: %s: "
2437 						    "EINTR\n", cmdstr))
2438 						ASSERT(!dipheld);
2439 						return (EINTR);
2440 					}
2441 				}
2442 			}
2443 			mutex_exit(&pm_clone_lock);
2444 			physlen = pscep->psce_out->size;
2445 			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2446 				mutex_exit(&pscep->psce_lock);
2447 				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n",
2448 				    cmdstr))
2449 				ret = EFAULT;
2450 				break;
2451 			}
2452 			physpath = kmem_zalloc(physlen, KM_SLEEP);
2453 			bcopy((const void *) pscep->psce_out->physpath,
2454 			    (void *) physpath, physlen);
2455 
2456 			p = pscep->psce_out;
2457 #ifdef	_MULTI_DATAMODEL
2458 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2459 #ifdef DEBUG
2460 				size_t usrcopysize;
2461 #endif
2462 				psc32.component = (int32_t)p->component;
2463 				psc32.flags = (ushort_t)p->flags;
2464 				psc32.event = (ushort_t)p->event;
2465 				psc32.timestamp = (int32_t)p->timestamp;
2466 				psc32.old_level = (int32_t)p->old_level;
2467 				psc32.new_level = (int32_t)p->new_level;
2468 				copysize32 = (intptr_t)&psc32.size -
2469 				    (intptr_t)&psc32.component;
2470 				PMD(PMD_DPM, ("ioctl: %s: PDN32 %s, comp %d "
2471 				    "%d -> %d\n", cmdstr, physpath,
2472 				    p->component, p->old_level, p->new_level))
2473 #ifdef DEBUG
2474 				usrcopysize = (intptr_t)&pscp32->size -
2475 				    (intptr_t)&pscp32->component;
2476 				ASSERT(usrcopysize == copysize32);
2477 #endif
2478 			} else
2479 #endif
2480 			{
2481 				psc.component = p->component;
2482 				psc.flags = p->flags;
2483 				psc.event = p->event;
2484 				psc.timestamp = p->timestamp;
2485 				psc.old_level = p->old_level;
2486 				psc.new_level = p->new_level;
2487 				copysize = (intptr_t)&p->size -
2488 				    (intptr_t)&p->component;
2489 				PMD(PMD_DPM, ("ioctl: %s: PDN %s, comp %d "
2490 				    "%d -> %d\n", cmdstr, physpath,
2491 				    p->component, p->old_level, p->new_level))
2492 			}
2493 			mutex_enter(&pm_clone_lock);
2494 			PMD(PMD_IOCTL, ("ioctl: %s: pm_poll_cnt[%d] is %d "
2495 			    "before decrement\n", cmdstr, clone,
2496 			    pm_poll_cnt[clone]))
2497 			pm_poll_cnt[clone]--;
2498 			mutex_exit(&pm_clone_lock);
2499 			kmem_free(p->physpath, p->size);
2500 			p->size = 0;
2501 			p->physpath = NULL;
2502 			if (pscep->psce_out == pscep->psce_last)
2503 				p = pscep->psce_first;
2504 			else
2505 				p++;
2506 			pscep->psce_out = p;
2507 			mutex_exit(&pscep->psce_lock);
2508 
2509 			ret = copyoutstr(physpath, psc.physpath,
2510 			    physlen, &lencopied);
2511 			kmem_free(physpath, physlen);
2512 			if (ret) {
2513 				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2514 				    "failed--EFAULT\n", cmdstr,
2515 				    (void *)psc.physpath))
2516 				break;
2517 			}
2518 
2519 #ifdef	_MULTI_DATAMODEL
2520 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2521 				if (ddi_copyout(&psc32.component,
2522 				    &pscp32->component, copysize32, mode)
2523 				    != 0) {
2524 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2525 					    "failed--EFAULT\n", cmdstr))
2526 					ret = EFAULT;
2527 					break;
2528 				}
2529 			} else
2530 #endif	/* _MULTI_DATAMODEL */
2531 			{
2532 				if (ddi_copyout(&psc.component,
2533 				    &pscp->component, copysize, mode) != 0) {
2534 					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2535 					    "failed--EFAULT\n", cmdstr))
2536 					ret = EFAULT;
2537 					break;
2538 				}
2539 			}
2540 			ret = 0;
2541 			break;
2542 		}
2543 		default:
2544 			/*
2545 			 * Internal error, invalid ioctl description
2546 			 * force debug entry even if pm_debug not set
2547 			 */
2548 #ifdef	DEBUG
2549 			pm_log("invalid diptype %d for cmd %d (%s)\n",
2550 			    pcip->diptype, cmd, pcip->name);
2551 #endif
2552 			ASSERT(0);
2553 			return (EIO);
2554 		}
2555 		break;
2556 	}
2557 
2558 	case PM_SRCH:		/* command that takes a pm_searchargs_t arg */
2559 	{
2560 		/*
2561 		 * If no ppm, then there is nothing to search.
2562 		 */
2563 		if (DEVI(ddi_root_node())->devi_pm_ppm == NULL) {
2564 			ret = ENODEV;
2565 			break;
2566 		}
2567 
2568 #ifdef	_MULTI_DATAMODEL
2569 		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2570 			if (ddi_copyin((caddr_t)arg, &psa32,
2571 			    sizeof (psa32), mode) != 0) {
2572 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2573 				    "EFAULT\n\n", cmdstr))
2574 				return (EFAULT);
2575 			}
2576 			if (copyinstr((void *)(uintptr_t)psa32.pms_listname,
2577 			    listname, MAXCOPYBUF, NULL)) {
2578 				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2579 				    "%d, " "EFAULT\n", cmdstr,
2580 				    (void *)(uintptr_t)psa32.pms_listname,
2581 				    MAXCOPYBUF))
2582 				ret = EFAULT;
2583 				break;
2584 			}
2585 			if (copyinstr((void *)(uintptr_t)psa32.pms_manufacturer,
2586 			    manufacturer, MAXCOPYBUF, NULL)) {
2587 				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2588 				    "%d, " "EFAULT\n", cmdstr,
2589 				    (void *)(uintptr_t)psa32.pms_manufacturer,
2590 				    MAXCOPYBUF))
2591 				ret = EFAULT;
2592 				break;
2593 			}
2594 			if (copyinstr((void *)(uintptr_t)psa32.pms_product,
2595 			    product, MAXCOPYBUF, NULL)) {
2596 				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2597 				    "%d, " "EFAULT\n", cmdstr,
2598 				    (void *)(uintptr_t)psa32.pms_product,
2599 				    MAXCOPYBUF))
2600 				ret = EFAULT;
2601 				break;
2602 			}
2603 		} else
2604 #endif /* _MULTI_DATAMODEL */
2605 		{
2606 			if (ddi_copyin((caddr_t)arg, &psa,
2607 			    sizeof (psa), mode) != 0) {
2608 				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2609 				    "EFAULT\n\n", cmdstr))
2610 				return (EFAULT);
2611 			}
2612 			if (copyinstr(psa.pms_listname,
2613 			    listname, MAXCOPYBUF, NULL)) {
2614 				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2615 				    "%d, " "EFAULT\n", cmdstr,
2616 				    (void *)psa.pms_listname, MAXCOPYBUF))
2617 				ret = EFAULT;
2618 				break;
2619 			}
2620 			if (copyinstr(psa.pms_manufacturer,
2621 			    manufacturer, MAXCOPYBUF, NULL)) {
2622 				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2623 				    "%d, " "EFAULT\n", cmdstr,
2624 				    (void *)psa.pms_manufacturer, MAXCOPYBUF))
2625 				ret = EFAULT;
2626 				break;
2627 			}
2628 			if (copyinstr(psa.pms_product,
2629 			    product, MAXCOPYBUF, NULL)) {
2630 				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2631 				    "%d, " "EFAULT\n", cmdstr,
2632 				    (void *)psa.pms_product, MAXCOPYBUF))
2633 				ret = EFAULT;
2634 				break;
2635 			}
2636 		}
2637 		psa.pms_listname = listname;
2638 		psa.pms_manufacturer = manufacturer;
2639 		psa.pms_product = product;
2640 		switch (cmd) {
2641 		case PM_SEARCH_LIST:
2642 			ret = pm_ppm_searchlist(&psa);
2643 			break;
2644 
2645 		default:
2646 			/*
2647 			 * Internal error, invalid ioctl description
2648 			 * force debug entry even if pm_debug not set
2649 			 */
2650 #ifdef	DEBUG
2651 			pm_log("invalid diptype %d for cmd %d (%s)\n",
2652 			    pcip->diptype, cmd, pcip->name);
2653 #endif
2654 			ASSERT(0);
2655 			return (EIO);
2656 		}
2657 		break;
2658 	}
2659 
2660 	case NOSTRUCT:
2661 	{
2662 		switch (cmd) {
2663 		case PM_START_PM:
2664 		case PM_START_CPUPM:
2665 		{
2666 			mutex_enter(&pm_scan_lock);
2667 			if ((cmd == PM_START_PM && autopm_enabled) ||
2668 			    (cmd == PM_START_CPUPM && PM_CPUPM_ENABLED)) {
2669 				mutex_exit(&pm_scan_lock);
2670 				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2671 				    cmdstr))
2672 				ret = EBUSY;
2673 				break;
2674 			}
2675 			if (cmd == PM_START_PM)
2676 				autopm_enabled = 1;
2677 			else
2678 				cpupm = PM_CPUPM_ENABLE;
2679 			mutex_exit(&pm_scan_lock);
2680 			ddi_walk_devs(ddi_root_node(), pm_start_pm_walk, &cmd);
2681 			ret = 0;
2682 			break;
2683 		}
2684 
2685 		case PM_RESET_PM:
2686 		case PM_STOP_PM:
2687 		case PM_STOP_CPUPM:
2688 		{
2689 			extern void pm_discard_thresholds(void);
2690 
2691 			mutex_enter(&pm_scan_lock);
2692 			if ((cmd == PM_STOP_PM && !autopm_enabled) ||
2693 			    (cmd == PM_STOP_CPUPM && PM_CPUPM_DISABLED)) {
2694 				mutex_exit(&pm_scan_lock);
2695 				PMD(PMD_ERROR, ("ioctl: %s: EINVAL\n",
2696 				    cmdstr))
2697 				ret = EINVAL;
2698 				break;
2699 			}
2700 			if (cmd == PM_STOP_PM) {
2701 				autopm_enabled = 0;
2702 				pm_S3_enabled = 0;
2703 				autoS3_enabled = 0;
2704 			} else if (cmd == PM_STOP_CPUPM) {
2705 				cpupm = PM_CPUPM_DISABLE;
2706 			} else {
2707 				autopm_enabled = 0;
2708 				autoS3_enabled = 0;
2709 				cpupm = PM_CPUPM_NOTSET;
2710 			}
2711 			mutex_exit(&pm_scan_lock);
2712 
2713 			/*
2714 			 * bring devices to full power level, stop scan
2715 			 */
2716 			ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk, &cmd);
2717 			ret = 0;
2718 			if (cmd == PM_STOP_PM || cmd == PM_STOP_CPUPM)
2719 				break;
2720 			/*
2721 			 * Now do only PM_RESET_PM stuff.
2722 			 */
2723 			pm_system_idle_threshold = pm_default_idle_threshold;
2724 			pm_cpu_idle_threshold = 0;
2725 			pm_discard_thresholds();
2726 			pm_all_to_default_thresholds();
2727 			pm_dispatch_to_dep_thread(PM_DEP_WK_REMOVE_DEP,
2728 			    NULL, NULL, PM_DEP_WAIT, NULL, 0);
2729 			break;
2730 		}
2731 
2732 		case PM_GET_SYSTEM_THRESHOLD:
2733 		{
2734 			*rval_p = pm_system_idle_threshold;
2735 			ret = 0;
2736 			break;
2737 		}
2738 
2739 		case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
2740 		{
2741 			*rval_p = pm_default_idle_threshold;
2742 			ret = 0;
2743 			break;
2744 		}
2745 
2746 		case PM_GET_CPU_THRESHOLD:
2747 		{
2748 			*rval_p = pm_cpu_idle_threshold;
2749 			ret = 0;
2750 			break;
2751 		}
2752 
2753 		case PM_SET_SYSTEM_THRESHOLD:
2754 		case PM_SET_CPU_THRESHOLD:
2755 		{
2756 			if ((int)arg < 0) {
2757 				PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
2758 				    "--EINVAL\n", cmdstr, (int)arg))
2759 				ret = EINVAL;
2760 				break;
2761 			}
2762 			PMD(PMD_IOCTL, ("ioctl: %s: 0x%x 0t%d\n", cmdstr,
2763 			    (int)arg, (int)arg))
2764 			if (cmd == PM_SET_SYSTEM_THRESHOLD)
2765 				pm_system_idle_threshold = (int)arg;
2766 			else {
2767 				pm_cpu_idle_threshold = (int)arg;
2768 			}
2769 			ddi_walk_devs(ddi_root_node(), pm_set_idle_thresh_walk,
2770 			    (void *) &cmd);
2771 
2772 			ret = 0;
2773 			break;
2774 		}
2775 
2776 		case PM_IDLE_DOWN:
2777 		{
2778 			if (pm_timeout_idledown() != 0) {
2779 				ddi_walk_devs(ddi_root_node(),
2780 				    pm_start_idledown, (void *)PMID_IOC);
2781 			}
2782 			ret = 0;
2783 			break;
2784 		}
2785 
2786 		case PM_GET_PM_STATE:
2787 		{
2788 			if (autopm_enabled) {
2789 				*rval_p = PM_SYSTEM_PM_ENABLED;
2790 			} else {
2791 				*rval_p = PM_SYSTEM_PM_DISABLED;
2792 			}
2793 			ret = 0;
2794 			break;
2795 		}
2796 
2797 		case PM_GET_CPUPM_STATE:
2798 		{
2799 			if (PM_CPUPM_ENABLED)
2800 				*rval_p = PM_CPU_PM_ENABLED;
2801 			else if (PM_CPUPM_DISABLED)
2802 				*rval_p = PM_CPU_PM_DISABLED;
2803 			else
2804 				*rval_p = PM_CPU_PM_NOTSET;
2805 			ret = 0;
2806 			break;
2807 		}
2808 
2809 		case PM_GET_AUTOS3_STATE:
2810 		{
2811 			if (autoS3_enabled) {
2812 				*rval_p = PM_AUTOS3_ENABLED;
2813 			} else {
2814 				*rval_p = PM_AUTOS3_DISABLED;
2815 			}
2816 			ret = 0;
2817 			break;
2818 		}
2819 
2820 		case PM_GET_S3_SUPPORT_STATE:
2821 		{
2822 			if (pm_S3_enabled) {
2823 				*rval_p = PM_S3_SUPPORT_ENABLED;
2824 			} else {
2825 				*rval_p = PM_S3_SUPPORT_DISABLED;
2826 			}
2827 			ret = 0;
2828 			break;
2829 		}
2830 
2831 		/*
2832 		 * pmconfig tells us if the platform supports S3
2833 		 */
2834 		case PM_ENABLE_S3:
2835 		{
2836 			mutex_enter(&pm_scan_lock);
2837 			if (pm_S3_enabled) {
2838 				mutex_exit(&pm_scan_lock);
2839 				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2840 				    cmdstr))
2841 				ret = EBUSY;
2842 				break;
2843 			}
2844 			pm_S3_enabled = 1;
2845 			mutex_exit(&pm_scan_lock);
2846 			ret = 0;
2847 			break;
2848 		}
2849 
2850 		case PM_DISABLE_S3:
2851 		{
2852 			mutex_enter(&pm_scan_lock);
2853 			pm_S3_enabled = 0;
2854 			mutex_exit(&pm_scan_lock);
2855 			ret = 0;
2856 			break;
2857 		}
2858 
2859 		case PM_START_AUTOS3:
2860 		{
2861 			mutex_enter(&pm_scan_lock);
2862 			if (autoS3_enabled) {
2863 				mutex_exit(&pm_scan_lock);
2864 				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2865 				    cmdstr))
2866 				ret = EBUSY;
2867 				break;
2868 			}
2869 			autoS3_enabled = 1;
2870 			mutex_exit(&pm_scan_lock);
2871 			ret = 0;
2872 			break;
2873 		}
2874 
2875 		case PM_STOP_AUTOS3:
2876 		{
2877 			mutex_enter(&pm_scan_lock);
2878 			autoS3_enabled = 0;
2879 			mutex_exit(&pm_scan_lock);
2880 			ret = 0;
2881 			break;
2882 		}
2883 
2884 		default:
2885 			/*
2886 			 * Internal error, invalid ioctl description
2887 			 * force debug entry even if pm_debug not set
2888 			 */
2889 #ifdef	DEBUG
2890 			pm_log("invalid diptype %d for cmd %d (%s)\n",
2891 			    pcip->diptype, cmd, pcip->name);
2892 #endif
2893 			ASSERT(0);
2894 			return (EIO);
2895 		}
2896 		break;
2897 	}
2898 
2899 	default:
2900 		/*
2901 		 * Internal error, invalid ioctl description
2902 		 * force debug entry even if pm_debug not set
2903 		 */
2904 #ifdef	DEBUG
2905 		pm_log("ioctl: invalid str_type %d for cmd %d (%s)\n",
2906 		    pcip->str_type, cmd, pcip->name);
2907 #endif
2908 		ASSERT(0);
2909 		return (EIO);
2910 	}
2911 	ASSERT(ret != 0x0badcafe);	/* some cmd in wrong case! */
2912 	if (dipheld) {
2913 		ASSERT(dip);
2914 		PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d) for "
2915 		    "exiting pm_ioctl\n", cmdstr, PM_DEVICE(dip)))
2916 		PM_RELE(dip);
2917 	}
2918 	PMD(PMD_IOCTL, ("ioctl: %s: end, ret=%d\n", cmdstr, ret))
2919 	return (ret);
2920 }
2921