xref: /titanic_52/usr/src/uts/sun4u/excalibur/io/xcalppm.c (revision bdfc6d18da790deeec2e0eb09c625902defe2498)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Platform Power Management driver for SUNW,Sun-Blade-1000
30  */
31 #include <sys/modctl.h>
32 #include <sys/conf.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/ddi_impldefs.h>
36 #include <sys/ppmvar.h>
37 #include <sys/ppmio.h>
38 #include <sys/xcalppm_reg.h>
39 #include <sys/xcalppm_var.h>
40 #include <sys/stat.h>
41 #include <sys/epm.h>
42 #include <sys/archsystm.h>
43 #include <sys/cpuvar.h>
44 #include <sys/cheetahregs.h>
45 #include <sys/us3_module.h>
46 
47 /*
48  * Locking Considerations
49  *
50  * To look at and/or modify xcppm_domain fields or elements of its list of
51  * xcppm_dev structures the domain_lock for the affected domain must be held.
52  *
53  * When the autopm framework needs to change the power of a component of a
54  * device, it needs to hold the associated power lock (see discussion at
55  * top of uts/common/os/sunpm.c).
56  *
57  * If the framework needs to lock a dev/cmpt for a device which this ppm
58  * has claimed, xcppm_ctlops will be called with PMR_PPM_LOCK_POWER.  Ppm
59  * needs to be involved because, due to platform constraints, changing the
60  * power of one device may require that other devices be changed in the same
61  * operation.
62  *
63  * In some domains (e.g., cpus) the power lock must be acquired for all the
64  * affected devices to avoid possible corruption of the power states.  The
65  * joint change must be an atomic operation.  Ppm handles this by acquiring
66  * the domain lock, then walking the list of affected devices and acquiring
67  * the power lock for each of them.  To unlock, the list is traversed and
68  * each of the power locks is freed, followed by freeing the domain lock.
69  *
70  * For other domains ppm will only be changing the power of a single device
71  * that is known to the framework.  In these cases, the locking is done by
72  * acquiring the domain lock and directly calling the framework routine for
73  * getting a single power lock.
74  */
75 
76 static int	xcppm_attach(dev_info_t *, ddi_attach_cmd_t);
77 static int	xcppm_detach(dev_info_t *, ddi_detach_cmd_t);
78 static int	xcppm_ctlops(dev_info_t *, dev_info_t *,
79 		    ddi_ctl_enum_t, void *, void *);
80 static void	xcppm_dev_init(ppm_dev_t *);
81 static void	xcppm_dev_fini(ppm_dev_t *);
82 static void	xcppm_iocset(uint8_t);
83 static uint8_t	xcppm_iocget(void);
84 
85 /*
86  * Note: 1394 and pciupa were originally required to be LOCK_ALL domains.
87  * However, the underlying nexus drivers aren't able to do power mgmt
88  * (because of hw implementation issues).  The locking protocol for these
89  * domains is changed to LOCK_ONE to simplify other code.  The domain
90  * code itself will be removed in the future.
91  */
92 static ppm_domain_t xcppm_1394 = { "domain_1394",	PPMD_LOCK_ONE };
93 static ppm_domain_t xcppm_cpu  = { "domain_cpu",	PPMD_LOCK_ALL };
94 static ppm_domain_t xcppm_fet  = { "domain_powerfet",	PPMD_LOCK_ONE };
95 static ppm_domain_t xcppm_upa  = { "domain_pciupa",	PPMD_LOCK_ONE };
96 
97 ppm_domain_t *ppm_domains[] = {
98 	&xcppm_1394,
99 	&xcppm_cpu,
100 	&xcppm_fet,
101 	&xcppm_upa,
102 	NULL
103 };
104 
105 
106 struct ppm_funcs ppmf = {
107 	xcppm_dev_init,			/* dev_init */
108 	xcppm_dev_fini,			/* dev_fini */
109 	xcppm_iocset,			/* iocset */
110 	xcppm_iocget,			/* iocget */
111 };
112 
113 
114 /*
115  * The order of entries must be from slowest to fastest and in
116  * one-to-one correspondence with the cpu_level array.
117  */
118 static const uint16_t bbc_estar_control_masks[] = {
119 	BBC_ESTAR_SLOW, BBC_ESTAR_MEDIUM, BBC_ESTAR_FAST
120 };
121 
122 int bbc_delay = 10;			/* microsec */
123 
124 
125 /*
126  * Configuration data structures
127  */
128 static struct cb_ops xcppm_cb_ops = {
129 	ppm_open,		/* open */
130 	ppm_close,		/* close */
131 	nodev,			/* strategy */
132 	nodev,			/* print */
133 	nodev,			/* dump */
134 	nodev,			/* read */
135 	nodev,			/* write */
136 	ppm_ioctl,		/* ioctl */
137 	nodev,			/* devmap */
138 	nodev,			/* mmap */
139 	nodev,			/* segmap */
140 	nochpoll,		/* poll */
141 	ddi_prop_op,		/* prop_op */
142 	NULL,			/* streamtab */
143 	D_MP | D_NEW,		/* driver compatibility flag */
144 	CB_REV,			/* cb_ops revision */
145 	nodev,			/* async read */
146 	nodev			/* async write */
147 };
148 
149 static struct bus_ops xcppm_bus_ops = {
150 	BUSO_REV,
151 	0,
152 	0,
153 	0,
154 	0,
155 	0,
156 	ddi_no_dma_map,
157 	ddi_no_dma_allochdl,
158 	ddi_no_dma_freehdl,
159 	ddi_no_dma_bindhdl,
160 	ddi_no_dma_unbindhdl,
161 	ddi_no_dma_flush,
162 	ddi_no_dma_win,
163 	ddi_no_dma_mctl,
164 	xcppm_ctlops,
165 	0,
166 	0,			/* (*bus_get_eventcookie)();	*/
167 	0,			/* (*bus_add_eventcall)();	*/
168 	0,			/* (*bus_remove_eventcall)();	*/
169 	0			/* (*bus_post_event)();		*/
170 };
171 
172 static struct dev_ops xcppm_ops = {
173 	DEVO_REV,		/* devo_rev */
174 	0,			/* refcnt */
175 	ppm_getinfo,		/* info */
176 	nulldev,		/* identify */
177 	nulldev,		/* probe */
178 	xcppm_attach,		/* attach */
179 	xcppm_detach,		/* detach */
180 	nodev,			/* reset */
181 	&xcppm_cb_ops,		/* driver operations */
182 	&xcppm_bus_ops,		/* bus operations */
183 	NULL,			/* power */
184 };
185 
186 extern struct mod_ops mod_driverops;
187 
188 static struct modldrv modldrv = {
189 	&mod_driverops,		/* type of module - pseudo */
190 	"platform pm driver v%I%",
191 	&xcppm_ops
192 };
193 
194 static struct modlinkage modlinkage = {
195 	MODREV_1,
196 	&modldrv,
197 	NULL
198 };
199 
200 
201 int
202 _init(void)
203 {
204 	return (ppm_init(&modlinkage, sizeof (xcppm_unit_t), "xc"));
205 }
206 
207 
208 int
209 _fini(void)
210 {
211 	return (EBUSY);
212 }
213 
214 
215 int
216 _info(struct modinfo *modinfop)
217 {
218 	return (mod_info(&modlinkage, modinfop));
219 }
220 
221 
222 static int
223 xcppm_map_all_regs(dev_info_t *dip)
224 {
225 	ddi_device_acc_attr_t attr_be, attr_le;
226 	int rv0, rv1, rv2, rv3;
227 	xcppm_unit_t *unitp;
228 	caddr_t base_addr;
229 	uint8_t data8;
230 
231 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
232 	attr_be.devacc_attr_version = DDI_DEVICE_ATTR_V0;
233 	attr_be.devacc_attr_endian_flags  = DDI_STRUCTURE_BE_ACC;
234 	attr_be.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
235 
236 	attr_le.devacc_attr_version = DDI_DEVICE_ATTR_V0;
237 	attr_le.devacc_attr_endian_flags  = DDI_STRUCTURE_LE_ACC;
238 	attr_le.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
239 
240 	rv0 = ddi_regs_map_setup(dip, 0, &base_addr, 0, 0, &attr_be,
241 	    &unitp->hndls.bbc_estar_ctrl);
242 
243 	unitp->regs.bbc_estar_ctrl = (uint16_t *)(base_addr +
244 	    BBC_ESTAR_CTRL_OFFSET);
245 	unitp->regs.bbc_assert_change = (uint32_t *)(base_addr +
246 	    BBC_ASSERT_CHANGE_OFFSET);
247 	unitp->regs.bbc_pll_settle = (uint32_t *)(base_addr +
248 	    BBC_PLL_SETTLE_OFFSET);
249 
250 	rv1 = ddi_regs_map_setup(dip, 1,
251 	    (caddr_t *)&unitp->regs.rio_mode_auxio,
252 	    0, 0, &attr_le, &unitp->hndls.rio_mode_auxio);
253 
254 	rv2 = ddi_regs_map_setup(dip, 2, &base_addr,
255 	    0, 0, &attr_le, &unitp->hndls.gpio_bank_select);
256 
257 	unitp->regs.gpio_bank_sel_index = (uint8_t *)(base_addr +
258 	    GPIO_BANK_SEL_INDEX_OFFSET);
259 	unitp->regs.gpio_bank_sel_data = (uint8_t *)(base_addr +
260 	    GPIO_BANK_SEL_DATA_OFFSET);
261 
262 	rv3 = ddi_regs_map_setup(dip, 3, &base_addr, 0, 0, &attr_le,
263 	    &unitp->hndls.gpio_data_ports);
264 
265 	unitp->regs.gpio_port1_data = (uint8_t *)(base_addr +
266 	    GPIO_PORT1_DATA_OFFSET);
267 	unitp->regs.gpio_port2_data = (uint8_t *)(base_addr +
268 	    GPIO_PORT2_DATA_OFFSET);
269 
270 	if (rv0 != DDI_SUCCESS || rv1 != DDI_SUCCESS ||
271 	    rv2 != DDI_SUCCESS || rv3 != DDI_SUCCESS) {
272 		if (rv0 == DDI_SUCCESS)
273 			ddi_regs_map_free(&unitp->hndls.bbc_estar_ctrl);
274 		if (rv1 == DDI_SUCCESS)
275 			ddi_regs_map_free(&unitp->hndls.rio_mode_auxio);
276 		if (rv2 == DDI_SUCCESS)
277 			ddi_regs_map_free(&unitp->hndls.gpio_bank_select);
278 		if (rv3 == DDI_SUCCESS)
279 			ddi_regs_map_free(&unitp->hndls.gpio_data_ports);
280 		return (DDI_FAILURE);
281 	}
282 
283 	/*
284 	 * Ppm uses GPIO bits in Bank 0.  Make sure Bank 0 is selected.
285 	 */
286 	data8 = SIO_CONFIG2_INDEX;
287 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
288 	    unitp->regs.gpio_bank_sel_index, data8);
289 	data8 = XCPPM_GET8(unitp->hndls.gpio_bank_select,
290 	    unitp->regs.gpio_bank_sel_data);
291 
292 	data8 &= 0x7f;	/* Set Bit7 to zero */
293 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
294 	    unitp->regs.gpio_bank_sel_data, data8);
295 
296 	return (DDI_SUCCESS);
297 }
298 
299 
300 static int
301 xcppm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
302 {
303 #ifdef DEBUG
304 	char *str = "xcppm_attach";
305 #endif
306 	xcppm_unit_t *unitp;
307 	ppm_domain_t **dompp;
308 	int retval;
309 
310 	DPRINTF(D_ATTACH, ("%s: attach cmd %d\n", str, cmd));
311 	retval = DDI_SUCCESS;
312 
313 	switch (cmd) {
314 	case DDI_ATTACH:
315 		if (ppm_inst != -1) {
316 			DPRINTF(D_ERROR,
317 			    ("%s: instance already attached\n", str));
318 			return (DDI_FAILURE);
319 		}
320 		ppm_inst = ddi_get_instance(dip);
321 
322 		/*
323 		 * Allocate and initialize soft state structure
324 		 */
325 		if (ddi_soft_state_zalloc(ppm_statep, ppm_inst) != 0)
326 			return (DDI_FAILURE);
327 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
328 		mutex_init(&unitp->unit_lock, NULL, MUTEX_DRIVER, NULL);
329 		mutex_init(&unitp->creator_lock, NULL, MUTEX_DRIVER, NULL);
330 
331 		if (ddi_create_minor_node(dip, "ppm", S_IFCHR,
332 		    ppm_inst, "ddi_ppm", 0) == DDI_FAILURE) {
333 			ddi_soft_state_free(ppm_statep, ppm_inst);
334 			DPRINTF(D_ERROR,
335 			    ("%s: Can't create minor for 0x%p\n", str, dip));
336 			return (DDI_FAILURE);
337 		}
338 		ddi_report_dev(dip);
339 		unitp->dip = dip;
340 
341 		if (retval = ppm_create_db(dip))
342 			return (retval);
343 
344 		/*
345 		 * Map all of the registers under the ppm node.
346 		 */
347 		if (xcppm_map_all_regs(dip) != DDI_SUCCESS)
348 			return (DDI_FAILURE);
349 
350 		if ((retval =
351 		    pm_register_ppm(ppm_claim_dev, dip)) != DDI_SUCCESS) {
352 			DPRINTF(D_ERROR,
353 			    ("%s: can't register ppm handler\n", str));
354 			return (retval);
355 		}
356 
357 		for (dompp = ppm_domains; *dompp; dompp++)
358 			mutex_init(&(*dompp)->lock, NULL, MUTEX_DRIVER, NULL);
359 
360 		break;
361 
362 	case DDI_RESUME:
363 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
364 		mutex_enter(&unitp->unit_lock);
365 		unitp->state &= ~XCPPM_ST_SUSPENDED;
366 		mutex_exit(&unitp->unit_lock);
367 		break;
368 
369 	default:
370 		cmn_err(CE_CONT, "xcppm_attach: unknown "
371 		    "attach command %d, dip 0x%p\n", cmd, dip);
372 		retval = DDI_FAILURE;
373 	}
374 
375 	return (retval);
376 }
377 
378 
379 /*
380  * set the front panel LED:
381  * PPM_LEDON turns it on, PPM_LEDOFF turns it off.
382  * for GPIO register: 0x0 means led-on, 0x2 means led-off.
383  */
384 static void
385 xcppm_set_led(int action)
386 {
387 	xcppm_unit_t *unitp;
388 	uint8_t	reg;
389 
390 	ASSERT(action == PPM_LEDON || action == PPM_LEDOFF);
391 	DPRINTF(D_LED, ("xcppm_set_led: Turn LED %s\n",
392 	    (action == PPM_LEDON) ? "on" : "off"));
393 
394 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
395 	reg = XCPPM_GET8(unitp->hndls.gpio_data_ports,
396 	    unitp->regs.gpio_port1_data);
397 	if (action == PPM_LEDON)
398 		reg &= ~LED;
399 	else
400 		reg |= LED;
401 	XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
402 	    unitp->regs.gpio_port1_data, reg);
403 }
404 
405 
406 static void
407 xcppm_blink_led(void *action)
408 {
409 	xcppm_unit_t *unitp;
410 	int new_action;
411 	clock_t intvl;
412 
413 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
414 	mutex_enter(&unitp->unit_lock);
415 	if (unitp->led_tid == 0) {
416 		mutex_exit(&unitp->unit_lock);
417 		return;
418 	}
419 
420 	if ((int)action == PPM_LEDON) {
421 		new_action = PPM_LEDOFF;
422 		intvl = PPM_LEDOFF_INTERVAL;
423 	} else {
424 		ASSERT((int)action == PPM_LEDOFF);
425 		new_action = PPM_LEDON;
426 		intvl = PPM_LEDON_INTERVAL;
427 	}
428 
429 	xcppm_set_led(new_action);
430 	unitp->led_tid = timeout(xcppm_blink_led, (void *)new_action, intvl);
431 	mutex_exit(&unitp->unit_lock);
432 }
433 
434 
435 static void
436 xcppm_freeze_led(void *action)
437 {
438 	xcppm_unit_t *unitp;
439 	timeout_id_t tid;
440 
441 	DPRINTF(D_LOWEST, ("xcppm_freeze_led: action %d\n", (int)action));
442 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
443 	mutex_enter(&unitp->unit_lock);
444 	tid = unitp->led_tid;
445 	unitp->led_tid = 0;
446 	mutex_exit(&unitp->unit_lock);
447 	untimeout(tid);
448 	mutex_enter(&unitp->unit_lock);
449 	xcppm_set_led((int)action);
450 	mutex_exit(&unitp->unit_lock);
451 }
452 
453 
454 /* ARGSUSED */
455 static int
456 xcppm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
457 {
458 	xcppm_unit_t *unitp;
459 
460 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
461 	DPRINTF(D_DETACH, ("xcppm_detach: cmd %d\n", cmd));
462 
463 	switch (cmd) {
464 	case DDI_DETACH:
465 		return (DDI_FAILURE);
466 
467 	case DDI_SUSPEND:
468 		mutex_enter(&unitp->unit_lock);
469 		unitp->state |= XCPPM_ST_SUSPENDED;
470 		mutex_exit(&unitp->unit_lock);
471 
472 		/*
473 		 * Suspend requires that timeout callouts to be canceled.
474 		 * Turning off the LED blinking will cancel the timeout.
475 		 */
476 		xcppm_freeze_led((void *)PPM_LEDON);
477 		return (DDI_SUCCESS);
478 
479 	default:
480 		return (DDI_FAILURE);
481 	}
482 }
483 
484 
485 /*
486  * Device we claimed has detached.  We must get rid of
487  * our state which was used to track this device.
488  */
489 static void
490 xcppm_detach_ctlop(dev_info_t *dip, power_req_t *reqp)
491 {
492 	ppm_dev_t *ppmd;
493 
494 	ppmd = PPM_GET_PRIVATE(dip);
495 	if (ppmd == NULL || reqp->req.ppm_config_req.result != DDI_SUCCESS)
496 		return;
497 
498 	ppm_rem_dev(dip);
499 }
500 
501 
502 /*
503  * The system is being resumed from a cpr suspend operation and this
504  * device's attach entry will be called shortly.  The driver will set
505  * the device's power to a conventional starting value, and we need to
506  * stay in sync and set our private copy to the same value.
507  */
508 /* ARGSUSED */
509 static void
510 xcppm_resume_ctlop(dev_info_t *dip, power_req_t *reqp)
511 {
512 	ppm_domain_t *domp;
513 	ppm_dev_t *ppmd;
514 	int powered;
515 
516 	ppmd = PPM_GET_PRIVATE(dip);
517 	if (ppmd == NULL)
518 		return;
519 
520 	/*
521 	 * Maintain correct powered count for domain which cares
522 	 */
523 	powered = 0;
524 	domp = ppmd->domp;
525 	mutex_enter(&domp->lock);
526 	if (domp == &xcppm_fet) {
527 		for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
528 			if (ppmd->dip == dip && ppmd->level)
529 				powered++;
530 		}
531 
532 		/*
533 		 * If this device was powered off when the system was
534 		 * suspended, this resume acts like a power-on transition,
535 		 * so we adjust the count.
536 		 */
537 		if (powered == 0)
538 			domp->pwr_cnt++;
539 	}
540 
541 	for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
542 		if (ppmd->dip == dip)
543 			ppmd->level = ppmd->rplvl = PM_LEVEL_UNKNOWN;
544 	}
545 	mutex_exit(&domp->lock);
546 }
547 
548 
549 /*
550  * Change the power level for a component of a device.  If the change
551  * arg is true, we call the framework to actually change the device's
552  * power; otherwise, we just update our own copy of the power level.
553  */
554 static int
555 xcppm_set_level(ppm_dev_t *ppmd, int cmpt, int level, boolean_t change)
556 {
557 #ifdef DEBUG
558 	char *str = "xcppm_set_level";
559 #endif
560 	int ret;
561 
562 	ret = DDI_SUCCESS;
563 	if (change)
564 		ret = pm_power(ppmd->dip, cmpt, level);
565 
566 	DPRINTF(D_SETLVL, ("%s: \"%s\" change=%d, old %d, new %d, ret %d\n",
567 	    str, ppmd->path, change, ppmd->level, level, ret));
568 
569 	if (ret == DDI_SUCCESS) {
570 		ppmd->level = level;
571 		ppmd->rplvl = PM_LEVEL_UNKNOWN;
572 	}
573 
574 	return (ret);
575 }
576 
577 
578 static int
579 xcppm_change_power_level(ppm_dev_t *ppmd, int cmpt, int level)
580 {
581 	return (xcppm_set_level(ppmd, cmpt, level, B_TRUE));
582 }
583 
584 
585 static int
586 xcppm_record_level_change(ppm_dev_t *ppmd, int cmpt, int level)
587 {
588 	return (xcppm_set_level(ppmd, cmpt, level, B_FALSE));
589 }
590 
591 
592 static uint8_t
593 xcppm_gpio_port2(int action, uint8_t pos)
594 {
595 #ifdef DEBUG
596 	char *str = "xcppm_gpio_port2";
597 #endif
598 	xcppm_unit_t *unitp;
599 	uint8_t data8, buf8;
600 	uint8_t	ret;
601 
602 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
603 	mutex_enter(&unitp->gpio_lock);
604 
605 	data8 = buf8 = XCPPM_GET8(unitp->hndls.gpio_data_ports,
606 	    unitp->regs.gpio_port2_data);
607 
608 	switch (action) {
609 	case XCPPM_GETBIT:
610 		ret = data8 & pos;
611 		DPRINTF(D_GPIO, ("%s: READ: GPIO Bank2 value 0x%x\n",
612 		    str, buf8));
613 		break;
614 
615 	case XCPPM_SETBIT:
616 	case XCPPM_CLRBIT:
617 		if (action == XCPPM_SETBIT)
618 			data8 |= pos;
619 		else
620 			data8 &= ~pos;
621 		XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
622 		    unitp->regs.gpio_port2_data, data8);
623 		ret = data8 & pos;
624 		DPRINTF(D_GPIO, ("%s: %s: GPIO Bank2 "
625 		    "bit 0x%x changed from 0x%x to 0x%x\n",
626 		    str, (action == XCPPM_SETBIT) ? "UP" : "DOWN",
627 		    pos, buf8, data8));
628 		break;
629 
630 	default:
631 		cmn_err(CE_PANIC, "xcalppm: unrecognized register "
632 		    "IO command %d\n", action);
633 		break;
634 	}
635 	mutex_exit(&unitp->gpio_lock);
636 
637 	return (ret);
638 }
639 
640 
641 /*
642  * Raise the power level of a subrange of cpus.  Used when cpu driver
643  * failed an attempt to lower the power of a cpu (probably because
644  * it got busy).  Need to revert the ones we already changed.
645  *
646  * ecpup = the ppm_dev_t for the cpu which failed to lower power
647  * level = power level to reset prior cpus to
648  */
649 static void
650 xcppm_revert_cpu_power(ppm_dev_t *ecpup, int level)
651 {
652 	ppm_dev_t *cpup;
653 
654 	for (cpup = xcppm_cpu.devlist; cpup != ecpup; cpup = cpup->next) {
655 		DPRINTF(D_CPU, ("xrcp: \"%s\", revert to level %d\n",
656 		    cpup->path, level));
657 		(void) xcppm_change_power_level(cpup, 0, level);
658 	}
659 }
660 
661 /*
662  * Switch the DC/DC converter.  Clearing the GPIO bit in SuperI/O puts
663  * the converter in low power mode and setting the bit puts it back in
664  * normal mode.
665  */
666 static void
667 xcppm_switch_dcdc_converter(int action)
668 {
669 	int tries = XCPPM_VCL_TRIES;
670 	uint_t spl;
671 	uint64_t stick_begin, stick_end;
672 	uint64_t tick_begin, tick_end;
673 	uint64_t cur_speed_ratio, full_speed_ratio;
674 	static int xcppm_dcdc_lpm;
675 
676 	switch (action) {
677 	case XCPPM_SETBIT:
678 		if (xcppm_dcdc_lpm) {
679 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
680 			    "switch to normal power mode.\n"));
681 			(void) xcppm_gpio_port2(action, HIGHPWR);
682 			xcppm_dcdc_lpm = 0;
683 		}
684 		break;
685 	case XCPPM_CLRBIT:
686 		/*
687 		 * In some fast CPU configurations, DC/DC converter was
688 		 * put in low power mode before CPUs made the transition
689 		 * to 1/32 of clock speed.  In those cases, system was
690 		 * shut down by hardware for protection.  To resolve that
691 		 * problem, we make sure CPUs have made the clock transition
692 		 * before the DC/DC converter has been put to low power mode.
693 		 */
694 		ASSERT(xcppm_dcdc_lpm == 0);
695 		kpreempt_disable();
696 		full_speed_ratio = cpunodes[CPU->cpu_id].clock_freq /
697 		    sys_tick_freq;
698 		while (tries) {
699 			spl = ddi_enter_critical();
700 			tick_begin = gettick_counter();
701 			stick_timestamp((int64_t *)&stick_begin);
702 			ddi_exit_critical(spl);
703 			drv_usecwait(XCPPM_VCL_DELAY);
704 			spl = ddi_enter_critical();
705 			tick_end = gettick_counter();
706 			stick_timestamp((int64_t *)&stick_end);
707 			ddi_exit_critical(spl);
708 			cur_speed_ratio = (tick_end - tick_begin) /
709 			    (stick_end - stick_begin);
710 
711 			/*
712 			 * tick/stick at current speed should at most be
713 			 * equal to full-speed tick/stick, adjusted with
714 			 * full/lowest clock speed ratio.  If not, speed
715 			 * transition has not happened yet.
716 			 */
717 			if (cur_speed_ratio <= ((full_speed_ratio /
718 			    XCPPM_VCL_DIVISOR) + 1)) {
719 				DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
720 				    "switch to low power mode.\n"));
721 				(void) xcppm_gpio_port2(action, HIGHPWR);
722 				xcppm_dcdc_lpm = 1;
723 				break;
724 			}
725 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: CPU "
726 			    "has not made transition to lowest speed yet "
727 			    "(%d)\n", tries));
728 			tries--;
729 		}
730 		kpreempt_enable();
731 		break;
732 	}
733 }
734 
735 static void
736 xcppm_rio_mode(xcppm_unit_t *unitp, int mode)
737 {
738 	uint32_t data32, buf32;
739 
740 	mutex_enter(&unitp->gpio_lock);
741 	data32 = buf32 = XCPPM_GET32(unitp->hndls.rio_mode_auxio,
742 	    unitp->regs.rio_mode_auxio);
743 	if (mode == XCPPM_SETBIT)
744 		data32 |= RIO_BBC_ESTAR_MODE;
745 	else
746 		data32 &= ~RIO_BBC_ESTAR_MODE;
747 	XCPPM_SETGET32(unitp->hndls.rio_mode_auxio,
748 	    unitp->regs.rio_mode_auxio, data32);
749 	mutex_exit(&unitp->gpio_lock);
750 
751 	DPRINTF(D_CPU, ("xcppm_rio_mode: %s: change from 0x%x to 0x%x\n",
752 	    (mode == XCPPM_SETBIT) ? "DOWN" : "UP", buf32, data32));
753 }
754 
755 
756 /*
757  * change the power level of all cpus to the arg value;
758  * the caller needs to ensure that a legal transition is requested.
759  */
760 static int
761 xcppm_change_cpu_power(int newlevel)
762 {
763 #ifdef DEBUG
764 	char *str = "xcppm_ccp";
765 #endif
766 	int index, level, oldlevel;
767 	int lowest, highest;
768 	int undo_flag, ret;
769 	int speedup, incr;
770 	uint32_t data32;
771 	uint16_t data16;
772 	xcppm_unit_t *unitp;
773 	ppm_dev_t *cpup;
774 	dev_info_t *dip;
775 	char *chstr;
776 
777 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
778 	ASSERT(unitp);
779 	cpup = xcppm_cpu.devlist;
780 	lowest = cpup->lowest;
781 	highest = cpup->highest;
782 
783 	/*
784 	 * not all cpus may have transitioned to a known level by this time
785 	 */
786 	oldlevel = (cpup->level == PM_LEVEL_UNKNOWN) ? highest : cpup->level;
787 	dip = cpup->dip;
788 	ASSERT(dip);
789 
790 	DPRINTF(D_CPU, ("%s: old %d, new %d, highest %d, lowest %d\n",
791 	    str, oldlevel, newlevel, highest, lowest));
792 
793 	if (newlevel > oldlevel) {
794 		chstr = "UP";
795 		speedup = 1;
796 		incr = 1;
797 	} else if (newlevel < oldlevel) {
798 		chstr = "DOWN";
799 		speedup = 0;
800 		incr = -1;
801 	} else
802 		return (DDI_SUCCESS);
803 
804 	undo_flag = 0;
805 	if (speedup) {
806 		/*
807 		 * If coming up from lowest power level, set the E*
808 		 * mode bit in GPIO to make power supply efficient
809 		 * at normal power.
810 		 */
811 		if (oldlevel == cpup->lowest) {
812 			xcppm_switch_dcdc_converter(XCPPM_SETBIT);
813 			undo_flag = 1;
814 		}
815 	} else {
816 		/*
817 		 * set BBC Estar mode bit in RIO AUXIO register
818 		 */
819 		if (oldlevel == highest) {
820 			xcppm_rio_mode(unitp, XCPPM_SETBIT);
821 			undo_flag = 1;
822 		}
823 	}
824 
825 	/*
826 	 * this loop will execute 1x or 2x depending on
827 	 * number of times we need to change clock rates
828 	 */
829 	for (level = oldlevel+incr; level != newlevel+incr; level += incr) {
830 		for (cpup = xcppm_cpu.devlist; cpup; cpup = cpup->next) {
831 			if (cpup->level == level)
832 				continue;
833 			ret = xcppm_change_power_level(cpup, 0, level);
834 			DPRINTF(D_CPU, ("%s: \"%s\", %s to level %d, ret %d\n",
835 			    str, cpup->path, chstr, cpup->level, ret));
836 			if (ret == DDI_SUCCESS)
837 				continue;
838 
839 			/*
840 			 * if the driver was unable to lower cpu speed,
841 			 * the cpu probably got busy; set the previous
842 			 * cpus back to the original level
843 			 */
844 			if (speedup == 0)
845 				xcppm_revert_cpu_power(cpup, level + 1);
846 
847 			if (undo_flag) {
848 				if (speedup)
849 					xcppm_switch_dcdc_converter(
850 					    XCPPM_CLRBIT);
851 				else
852 					xcppm_rio_mode(unitp, XCPPM_CLRBIT);
853 			}
854 			return (ret);
855 		}
856 
857 		index = level - 1;
858 		spm_change_schizo_speed(index);
859 		DPRINTF(D_CPU, ("%s: safari config reg changed\n", str));
860 
861 		/*
862 		 * set the delay times for changing to this rate
863 		 */
864 		data32 = XCPPM_BBC_DELAY(index);
865 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
866 		    (caddr_t)unitp->regs.bbc_assert_change, data32);
867 		DPRINTF(D_CPU, ("%s: %s: Wrote E* Assert Change Time "
868 		    "(t1) = 0x%x\n", str, chstr, data32));
869 
870 		data32 = XCPPM_BBC_DELAY(index);
871 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
872 		    (caddr_t)unitp->regs.bbc_pll_settle, data32);
873 		DPRINTF(D_CPU, ("%s: %s: Wrote E* PLL Settle Time "
874 		    "(t4) = 0x%x\n", str, chstr, data32));
875 
876 		data16 = bbc_estar_control_masks[index];
877 		XCPPM_SETGET16(unitp->hndls.bbc_estar_ctrl,
878 		    (caddr_t)unitp->regs.bbc_estar_ctrl, data16);
879 		DPRINTF(D_CPU, ("%s: %s: Wrote BCC E* Control = 0x%x\n",
880 		    str, chstr, data16));
881 	}
882 
883 	/*
884 	 * clear CPU Estar Mode bit in the gpio register
885 	 */
886 	if (speedup) {
887 		if (newlevel == highest)
888 			xcppm_rio_mode(unitp, XCPPM_CLRBIT);
889 	} else {
890 		if (newlevel == lowest)
891 			xcppm_switch_dcdc_converter(XCPPM_CLRBIT);
892 	}
893 
894 	return (DDI_SUCCESS);
895 }
896 
897 
898 /*
899  * Process a request to change the power level of a cpu.  If all cpus
900  * don't want to be at the same power yet, or if we are currently
901  * refusing slowdown requests due to thermal stress, just cache the
902  * request.  Otherwise, make the change for all cpus.
903  */
904 /* ARGSUSED */
905 static int
906 xcppm_manage_cpus(dev_info_t *dip, power_req_t *reqp, int *result)
907 {
908 #ifdef DEBUG
909 	char *str = "xcppm_manage_cpus";
910 #endif
911 	int old, new, ret, kmflag;
912 	ppm_dev_t *ppmd;
913 	pm_ppm_devlist_t *devlist = NULL, *p;
914 	int		do_rescan = 0;
915 	dev_info_t	*rescan_dip;
916 
917 	*result = DDI_SUCCESS;
918 	switch (reqp->request_type) {
919 	case PMR_PPM_SET_POWER:
920 		break;
921 	case PMR_PPM_POWER_CHANGE_NOTIFY:
922 		/* cpu driver can`t change cpu power level by itself */
923 	default:
924 		return (DDI_FAILURE);
925 	}
926 
927 	ppmd = PPM_GET_PRIVATE(dip);
928 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
929 	old = reqp->req.ppm_set_power_req.old_level;
930 	new = reqp->req.ppm_set_power_req.new_level;
931 
932 	/*
933 	 * At power on, the cpus are at full speed.  There is no hardware
934 	 * transition needed for going from unknown to full.  However, the
935 	 * state of the pm framework and cpu driver needs to be adjusted.
936 	 */
937 	if (ppmd->level == PM_LEVEL_UNKNOWN && new == ppmd->highest) {
938 		*result = ret = xcppm_change_power_level(ppmd, 0, new);
939 		if (ret != DDI_SUCCESS) {
940 			DPRINTF(D_CPU, ("%s: Failed to change "
941 			    "power level to %d\n", str, new));
942 		}
943 		return (ret);
944 	}
945 
946 	if (new == ppmd->level) {
947 		DPRINTF(D_CPU, ("%s: already at power level %d\n", str, new));
948 		return (DDI_SUCCESS);
949 	}
950 
951 	ppmd->rplvl = new;
952 
953 	/*
954 	 * A request from lower to higher level transition is granted and
955 	 * made effective on both cpus. For more than two cpu platform model,
956 	 * the following code needs to be modified to remember the rest of
957 	 * the unsoliciting cpus to be rescan'ed.
958 	 * A request from higher to lower must be agreed by all cpus.
959 	 */
960 	for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
961 		if (ppmd->rplvl == new)
962 			continue;
963 
964 		if (new < old) {
965 			DPRINTF(D_SOME, ("%s: not all cpus want to go down to "
966 			    "level %d yet\n", str, new));
967 			return (DDI_SUCCESS);
968 		}
969 
970 		/*
971 		 * If a single cpu requests power up, honor the request
972 		 * by powering up both cpus.
973 		 */
974 		if (new > old) {
975 			DPRINTF(D_SOME, ("%s: powering up device(%s@%s, %p) "
976 			    "because of request from dip(%s@%s, %p), "
977 			    "need pm_rescan\n", str, PM_NAME(ppmd->dip),
978 			    PM_ADDR(ppmd->dip), (void *)ppmd->dip,
979 			    PM_NAME(dip), PM_ADDR(dip), (void *)dip))
980 			do_rescan++;
981 			rescan_dip = ppmd->dip;
982 			break;
983 		}
984 	}
985 
986 	ret = xcppm_change_cpu_power(new);
987 	*result = ret;
988 
989 	if (ret == DDI_SUCCESS) {
990 		if (reqp->req.ppm_set_power_req.canblock == PM_CANBLOCK_BLOCK)
991 			kmflag = KM_SLEEP;
992 		else
993 			kmflag = KM_NOSLEEP;
994 
995 		for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
996 			if (ppmd->dip == dip)
997 				continue;
998 
999 			if ((p = kmem_zalloc(sizeof (pm_ppm_devlist_t),
1000 			    kmflag)) == NULL) {
1001 				break;
1002 			}
1003 			p->ppd_who = ppmd->dip;
1004 			p->ppd_cmpt = ppmd->cmpt;
1005 			p->ppd_old_level = old;
1006 			p->ppd_new_level = new;
1007 			p->ppd_next = devlist;
1008 
1009 			devlist = p;
1010 		}
1011 		reqp->req.ppm_set_power_req.cookie = (void *) devlist;
1012 
1013 		if (do_rescan > 0)
1014 			pm_rescan(rescan_dip);
1015 	}
1016 
1017 	return (ret);
1018 }
1019 
1020 
1021 /*
1022  * If powering off and all devices in this domain will now be off,
1023  * shut off common power.  If powering up and no devices up yet,
1024  * turn on common power.  Always make the requested power level
1025  * change for the target device.
1026  */
1027 static int
1028 xcppm_manage_fet(dev_info_t *dip, power_req_t *reqp, int *result)
1029 {
1030 #ifdef DEBUG
1031 	char *str = "xcppm_manage_fet";
1032 #endif
1033 	int (*pwr_func)(ppm_dev_t *, int, int);
1034 	int new, old, cmpt, incr = 0;
1035 	ppm_dev_t *ppmd;
1036 
1037 	ppmd = PPM_GET_PRIVATE(dip);
1038 	DPRINTF(D_FET, ("%s: \"%s\", req %s\n", str,
1039 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1040 
1041 	*result = DDI_SUCCESS;	/* change later for failures */
1042 	switch (reqp->request_type) {
1043 	case PMR_PPM_SET_POWER:
1044 		pwr_func = xcppm_change_power_level;
1045 		old = reqp->req.ppm_set_power_req.old_level;
1046 		new = reqp->req.ppm_set_power_req.new_level;
1047 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1048 		break;
1049 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1050 		pwr_func = xcppm_record_level_change;
1051 		old = reqp->req.ppm_notify_level_req.old_level;
1052 		new = reqp->req.ppm_notify_level_req.new_level;
1053 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1054 		break;
1055 	default:
1056 		return (*result = DDI_FAILURE);
1057 
1058 	}
1059 
1060 	/* This is common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1061 	DPRINTF(D_FET, ("%s: \"%s\", old %d, new %d\n",
1062 	    str, ppmd->path, old, new));
1063 
1064 	ASSERT(old == ppmd->level);
1065 	if (new == ppmd->level)
1066 		return (DDI_SUCCESS);
1067 
1068 	PPM_LOCK_DOMAIN(ppmd->domp);
1069 	/*
1070 	 * Devices in this domain are known to have 0 (off) as their
1071 	 * lowest power level.  We use this fact to simplify the logic.
1072 	 */
1073 	if (new > 0) {
1074 		if (ppmd->domp->pwr_cnt == 0)
1075 			(void) xcppm_gpio_port2(XCPPM_SETBIT, DRVON);
1076 		if (old == 0) {
1077 			ppmd->domp->pwr_cnt++;
1078 			incr = 1;
1079 			DPRINTF(D_FET, ("%s: UP cnt = %d\n",
1080 			    str, ppmd->domp->pwr_cnt));
1081 		}
1082 	}
1083 
1084 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1085 
1086 	ASSERT(ppmd->domp->pwr_cnt > 0);
1087 
1088 	if ((*result = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1089 		DPRINTF(D_FET, ("%s: \"%s\" power change failed \n",
1090 		    str, ppmd->path));
1091 	}
1092 
1093 	PPM_LOCK_DOMAIN(ppmd->domp);
1094 
1095 	/*
1096 	 * Decr the power count in two cases:
1097 	 *
1098 	 *   1) request was to power device down and was successful
1099 	 *   2) request was to power up (we pre-incremented count), but failed.
1100 	 */
1101 	if ((*result == DDI_SUCCESS && ppmd->level == 0) ||
1102 	    (*result != DDI_SUCCESS && incr)) {
1103 		ASSERT(ppmd->domp->pwr_cnt > 0);
1104 		ppmd->domp->pwr_cnt--;
1105 		DPRINTF(D_FET, ("%s: DN cnt = %d\n", str, ppmd->domp->pwr_cnt));
1106 		if (ppmd->domp->pwr_cnt == 0)
1107 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, DRVON);
1108 	}
1109 
1110 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1111 	ASSERT(ppmd->domp->pwr_cnt >= 0);
1112 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1113 }
1114 
1115 
1116 /*
1117  * Since UPA64S relies on PCI B staying at nominal 33MHz in order to
1118  * have its interrupt pulse function properly, we ensure
1119  * - Lowering PCI B only if UPA64S is at low power, otherwise defer
1120  *   the action until UPA64S goes down; hence right after UPA64S goes
1121  *   down, perform the deferred action for PCI B;
1122  * - Always raise PCI B power prior to raising UPA64S power.
1123  *
1124  * Both UPA64S and PCI B devices are considered each other's dependency
1125  * device whenever actual power transition is handled (PMR_PPM_SET_POWER).
1126  */
1127 static int
1128 xcppm_manage_pciupa(dev_info_t *dip, power_req_t *reqp, int *result)
1129 {
1130 #ifdef DEBUG
1131 	char *str = "xcppm_manage_pciupa";
1132 #endif
1133 	int (*pwr_func)(ppm_dev_t *, int, int);
1134 	uint_t flags = 0, co_flags = 0;
1135 	ppm_dev_t *ppmd, *codev;
1136 	int new, cmpt, retval;
1137 
1138 	ppmd = PPM_GET_PRIVATE(dip);
1139 	DPRINTF(D_PCIUPA, ("%s: \"%s\", req %s\n", str,
1140 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1141 
1142 	*result = DDI_SUCCESS;
1143 
1144 	switch (reqp->request_type) {
1145 	case PMR_PPM_SET_POWER:
1146 		pwr_func = xcppm_change_power_level;
1147 		new = reqp->req.ppm_set_power_req.new_level;
1148 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1149 		break;
1150 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1151 		pwr_func = xcppm_record_level_change;
1152 		new = reqp->req.ppm_notify_level_req.new_level;
1153 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1154 		break;
1155 	default:
1156 		*result = DDI_FAILURE;
1157 		return (DDI_FAILURE);
1158 	}
1159 
1160 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1161 	ASSERT(ppmd);	/* since it should be locked already */
1162 
1163 	if (new == ppmd->level)
1164 		return (DDI_SUCCESS);
1165 
1166 	DPRINTF(D_PCIUPA, ("%s: \"%s\", levels: current %d, new %d\n",
1167 	    str, ppmd->path, ppmd->level, new));
1168 
1169 	/*
1170 	 * find power-wise co-related device
1171 	 */
1172 	flags =  ppmd->flags;
1173 
1174 #ifdef DEBUG
1175 	if (flags & ~(XCPPMF_PCIB|XCPPMF_UPA))
1176 		DPRINTF(D_ERROR, ("%s: invalid ppmd->flags value 0x%x\n",
1177 		    ppmd->flags));
1178 #endif
1179 
1180 	if (flags == XCPPMF_UPA)
1181 		co_flags = XCPPMF_PCIB;
1182 	else if (flags == XCPPMF_PCIB)
1183 		co_flags = XCPPMF_UPA;
1184 
1185 	for (codev = ppmd->domp->devlist; codev; codev = codev->next)
1186 		if ((codev->cmpt == 0) && (codev->flags == co_flags))
1187 			break;
1188 
1189 	if (new > ppmd->level) {
1190 		/*
1191 		 * Raise power level -
1192 		 * pre-raising: upa ensure pci is powered up.
1193 		 */
1194 		if ((flags == XCPPMF_UPA) && codev &&
1195 		    (codev->level != codev->highest)) {
1196 			if ((retval = xcppm_change_power_level(codev,
1197 			    0, codev->highest)) != DDI_SUCCESS &&
1198 			    codev->level != codev->highest) {
1199 				*result = retval;
1200 				return (DDI_FAILURE);
1201 			}
1202 		}
1203 		if ((retval = (*pwr_func)(ppmd, 0, new)) != DDI_SUCCESS) {
1204 			*result = retval;
1205 			return (DDI_FAILURE);
1206 		}
1207 	} else if (new < ppmd->level) {
1208 		/*
1209 		 * Lower power level
1210 		 *
1211 		 * once upa is attached, pci checks upa level:
1212 		 * if upa is at high level, defer the request and return.
1213 		 * otherwise, set power level then check and lower pci level.
1214 		 */
1215 		if ((flags == XCPPMF_PCIB) && codev &&
1216 		    (codev->level != codev->lowest)) {
1217 			ppmd->rplvl = new;
1218 			return (DDI_SUCCESS);
1219 		}
1220 		if ((retval = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1221 		    ppmd->level != new) {
1222 			*result = retval;
1223 			return (DDI_FAILURE);
1224 		}
1225 
1226 		if (flags == XCPPMF_UPA) {
1227 			if (codev && (codev->rplvl != PM_LEVEL_UNKNOWN) &&
1228 			    (codev->rplvl < codev->level)) {
1229 				DPRINTF(D_PCIUPA, ("%s: codev \"%s\" "
1230 				    "rplvl %d level %d\n", str, codev->path,
1231 				    codev->rplvl, codev->level));
1232 				if ((retval = xcppm_change_power_level(
1233 				    codev, 0, codev->rplvl)) != DDI_SUCCESS) {
1234 					*result = retval;
1235 					return (DDI_FAILURE);
1236 				}
1237 			}
1238 		}
1239 	}
1240 
1241 	return (DDI_SUCCESS);
1242 }
1243 
1244 
1245 /*
1246  * When all of the children of the 1394 nexus are idle, a call will be
1247  * made to the nexus driver's own power entry point to lower power.  Ppm
1248  * intercepts this and kills 1394 cable power (since the driver doesn't
1249  * have access to the required register).  Similar logic applies when
1250  * coming up from the state where all the children were off.
1251  */
1252 static int
1253 xcppm_manage_1394(dev_info_t *dip, power_req_t *reqp, int *result)
1254 {
1255 #ifdef DEBUG
1256 	char *str = "xcppm_manage_1394";
1257 #endif
1258 	int (*pwr_func)(ppm_dev_t *, int, int);
1259 	int new, old, cmpt;
1260 	ppm_dev_t *ppmd;
1261 
1262 	ppmd = PPM_GET_PRIVATE(dip);
1263 	DPRINTF(D_1394, ("%s: \"%s\", req %s\n", str,
1264 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1265 
1266 	switch (reqp->request_type) {
1267 	case PMR_PPM_SET_POWER:
1268 		pwr_func = xcppm_change_power_level;
1269 		old = reqp->req.ppm_set_power_req.old_level;
1270 		new = reqp->req.ppm_set_power_req.new_level;
1271 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1272 		break;
1273 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1274 		pwr_func = xcppm_record_level_change;
1275 		old = reqp->req.ppm_notify_level_req.old_level;
1276 		new = reqp->req.ppm_notify_level_req.new_level;
1277 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1278 		break;
1279 	default:
1280 		return (*result = DDI_FAILURE);
1281 	}
1282 
1283 
1284 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1285 	DPRINTF(D_1394, ("%s: dev %s@%s, old %d new %d\n", str,
1286 	    ddi_binding_name(dip), ddi_get_name_addr(dip), old, new));
1287 
1288 	ASSERT(ppmd);	/* since it must already be locked */
1289 	ASSERT(old == ppmd->level);
1290 
1291 	if (new == ppmd->level)
1292 		return (*result = DDI_SUCCESS);
1293 
1294 	/* the reduce power case */
1295 	if (cmpt == 0 && new < ppmd->level) {
1296 		if ((*result =
1297 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1298 			return (DDI_FAILURE);
1299 		}
1300 		if (new == ppmd->lowest)
1301 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1302 		ppmd->level = new;
1303 		return (DDI_SUCCESS);
1304 	}
1305 
1306 	/* the increase power case */
1307 	if (cmpt == 0 && new > ppmd->level) {
1308 		if (ppmd->level == ppmd->lowest) {
1309 			(void) xcppm_gpio_port2(XCPPM_SETBIT, CPEN);
1310 			delay(1);
1311 		}
1312 		/*
1313 		 * Even if pwr_func fails we need to check current level again
1314 		 * because it could have been changed by an intervening
1315 		 * POWER_CHANGE_NOTIFY operation.
1316 		 */
1317 		if ((*result =
1318 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1319 		    ppmd->level == ppmd->lowest) {
1320 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1321 		} else {
1322 			ppmd->level = new;
1323 		}
1324 
1325 		return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1326 	}
1327 
1328 	/*
1329 	 * We get here if component was non-zero.  This is not what we
1330 	 * expect.  Let the device deal with it and just pass back the
1331 	 * result.
1332 	 */
1333 	*result = xcppm_change_power_level(ppmd, cmpt, new);
1334 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1335 }
1336 
1337 
1338 /*
1339  * lock, unlock, or trylock for one power mutex
1340  */
1341 static void
1342 xcppm_lock_one(ppm_dev_t *ppmd, power_req_t *reqp, int *iresp)
1343 {
1344 	switch (reqp->request_type) {
1345 	case PMR_PPM_LOCK_POWER:
1346 		pm_lock_power_single(ppmd->dip,
1347 		    reqp->req.ppm_lock_power_req.circp);
1348 		break;
1349 
1350 	case PMR_PPM_UNLOCK_POWER:
1351 		pm_unlock_power_single(ppmd->dip,
1352 		    reqp->req.ppm_unlock_power_req.circ);
1353 		break;
1354 
1355 	case PMR_PPM_TRY_LOCK_POWER:
1356 		*iresp = pm_try_locking_power_single(ppmd->dip,
1357 		    reqp->req.ppm_lock_power_req.circp);
1358 		break;
1359 	}
1360 }
1361 
1362 
1363 /*
1364  * lock, unlock, or trylock all devices within a domain.
1365  */
1366 static void
1367 xcppm_lock_all(ppm_domain_t *domp, power_req_t *reqp, int *iresp)
1368 {
1369 	/*
1370 	 * To simplify the implementation we let all the devices
1371 	 * in the domain be represented by a single device (dip).
1372 	 * We use the first device in the domain's devlist.  This
1373 	 * is safe because we return with the domain lock held
1374 	 * which prevents the list from changing.
1375 	 */
1376 	if (reqp->request_type == PMR_PPM_LOCK_POWER) {
1377 		if (!MUTEX_HELD(&domp->lock))
1378 			mutex_enter(&domp->lock);
1379 		domp->refcnt++;
1380 		ASSERT(domp->devlist != NULL);
1381 		pm_lock_power_single(domp->devlist->dip,
1382 		    reqp->req.ppm_lock_power_req.circp);
1383 		/* domain lock remains held */
1384 		return;
1385 	} else if (reqp->request_type == PMR_PPM_UNLOCK_POWER) {
1386 		ASSERT(MUTEX_HELD(&domp->lock));
1387 		ASSERT(domp->devlist != NULL);
1388 		pm_unlock_power_single(domp->devlist->dip,
1389 		    reqp->req.ppm_unlock_power_req.circ);
1390 		if (--domp->refcnt == 0)
1391 			mutex_exit(&domp->lock);
1392 		return;
1393 	}
1394 
1395 	ASSERT(reqp->request_type == PMR_PPM_TRY_LOCK_POWER);
1396 	if (!MUTEX_HELD(&domp->lock))
1397 		if (!mutex_tryenter(&domp->lock)) {
1398 			*iresp = 0;
1399 			return;
1400 		}
1401 	*iresp = pm_try_locking_power_single(domp->devlist->dip,
1402 	    reqp->req.ppm_lock_power_req.circp);
1403 	if (*iresp)
1404 		domp->refcnt++;
1405 	else
1406 		mutex_exit(&domp->lock);
1407 }
1408 
1409 
1410 /*
1411  * The pm framework calls us here to manage power for a device.
1412  * We maintain state which tells us whether we need to turn off/on
1413  * system board power components based on the status of all the devices
1414  * sharing a component.
1415  *
1416  */
1417 /* ARGSUSED */
1418 static int
1419 xcppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
1420     ddi_ctl_enum_t ctlop, void *arg, void *result)
1421 {
1422 	power_req_t *reqp = arg;
1423 	xcppm_unit_t *unitp;
1424 	ppm_domain_t *domp;
1425 	ppm_dev_t *ppmd;
1426 
1427 #ifdef DEBUG
1428 	char path[MAXPATHLEN], *ctlstr, *str = "xcppm_ctlops";
1429 	uint_t mask = ppm_debug & (D_CTLOPS1 | D_CTLOPS2);
1430 	if (mask && (ctlstr = ppm_get_ctlstr(reqp->request_type, mask))) {
1431 		prom_printf("%s: \"%s\", %s\n", str,
1432 		    ddi_pathname(rdip, path), ctlstr);
1433 	}
1434 #endif
1435 
1436 	if (ctlop != DDI_CTLOPS_POWER)
1437 		return (DDI_FAILURE);
1438 
1439 	switch (reqp->request_type) {
1440 	case PMR_PPM_UNMANAGE:
1441 	case PMR_PPM_PRE_PROBE:
1442 	case PMR_PPM_POST_PROBE:
1443 	case PMR_PPM_PRE_ATTACH:
1444 	case PMR_PPM_PRE_DETACH:
1445 		return (DDI_SUCCESS);
1446 
1447 	/*
1448 	 * There is no hardware configuration required to be done on this
1449 	 * platform prior to installing drivers.
1450 	 */
1451 	case PMR_PPM_INIT_CHILD:
1452 	case PMR_PPM_UNINIT_CHILD:
1453 		return (DDI_SUCCESS);
1454 
1455 	case PMR_PPM_ALL_LOWEST:
1456 		DPRINTF(D_LOWEST, ("%s: all devices at lowest power = %d\n",
1457 		    str, reqp->req.ppm_all_lowest_req.mode));
1458 		if (reqp->req.ppm_all_lowest_req.mode == PM_ALL_LOWEST) {
1459 			unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
1460 			mutex_enter(&unitp->unit_lock);
1461 			if (unitp->state & XCPPM_ST_SUSPENDED) {
1462 				mutex_exit(&unitp->unit_lock);
1463 				return (DDI_SUCCESS);
1464 			}
1465 
1466 			xcppm_set_led(PPM_LEDON);
1467 			unitp->led_tid = timeout(xcppm_blink_led,
1468 			    (void *)PPM_LEDON, PPM_LEDON_INTERVAL);
1469 			mutex_exit(&unitp->unit_lock);
1470 			DPRINTF(D_LOWEST, ("%s: LED blink started\n", str));
1471 		} else {
1472 			xcppm_freeze_led((void *)PPM_LEDON);
1473 			DPRINTF(D_LOWEST, ("%s: LED freeze ON\n", str));
1474 		}
1475 		return (DDI_SUCCESS);
1476 
1477 	case PMR_PPM_POST_ATTACH:
1478 		/*
1479 		 * After a successful attach, if we haven't already created
1480 		 * our private data structure for this device, ppm_get_dev()
1481 		 * will force it to be created.
1482 		 */
1483 		ppmd = PPM_GET_PRIVATE(rdip);
1484 		if (reqp->req.ppm_config_req.result != DDI_SUCCESS) {
1485 			if (ppmd)
1486 				ppm_rem_dev(rdip);
1487 		} else if (!ppmd) {
1488 			domp = ppm_lookup_dev(rdip);
1489 			ASSERT(domp);
1490 			(void) ppm_get_dev(rdip, domp);
1491 		}
1492 		return (DDI_SUCCESS);
1493 
1494 	case PMR_PPM_POST_DETACH:
1495 		xcppm_detach_ctlop(rdip, reqp);
1496 		*(int *)result = DDI_SUCCESS;
1497 		return (DDI_SUCCESS);
1498 
1499 	case PMR_PPM_PRE_RESUME:
1500 		xcppm_resume_ctlop(rdip, reqp);
1501 		return (DDI_SUCCESS);
1502 
1503 	case PMR_PPM_UNLOCK_POWER:
1504 	case PMR_PPM_TRY_LOCK_POWER:
1505 	case PMR_PPM_LOCK_POWER:
1506 		ppmd = PPM_GET_PRIVATE(rdip);
1507 		if (ppmd)
1508 			domp = ppmd->domp;
1509 		else if (reqp->request_type != PMR_PPM_UNLOCK_POWER) {
1510 			domp = ppm_lookup_dev(rdip);
1511 			ASSERT(domp);
1512 			ppmd = ppm_get_dev(rdip, domp);
1513 		}
1514 
1515 		ASSERT(domp->dflags == PPMD_LOCK_ALL ||
1516 		    domp->dflags == PPMD_LOCK_ONE);
1517 		DPRINTF(D_LOCKS, ("xcppm_lock_%s: \"%s\", %s\n",
1518 		    (domp->dflags == PPMD_LOCK_ALL) ? "all" : "one",
1519 		    ppmd->path, ppm_get_ctlstr(reqp->request_type, D_LOCKS)));
1520 
1521 		if (domp->dflags == PPMD_LOCK_ALL)
1522 			xcppm_lock_all(domp, reqp, result);
1523 		else
1524 			xcppm_lock_one(ppmd, reqp, result);
1525 		return (DDI_SUCCESS);
1526 
1527 	case PMR_PPM_POWER_LOCK_OWNER:
1528 		ASSERT(reqp->req.ppm_power_lock_owner_req.who == rdip);
1529 		ppmd = PPM_GET_PRIVATE(rdip);
1530 		if (ppmd)
1531 			domp = ppmd->domp;
1532 		else {
1533 			domp = ppm_lookup_dev(rdip);
1534 			ASSERT(domp);
1535 			ppmd = ppm_get_dev(rdip, domp);
1536 		}
1537 
1538 		/*
1539 		 * In case of LOCK_ALL, effective owner of the power lock
1540 		 * is the owner of the domain lock. otherwise, it is the owner
1541 		 * of the power lock.
1542 		 */
1543 		if (domp->dflags & PPMD_LOCK_ALL)
1544 			reqp->req.ppm_power_lock_owner_req.owner =
1545 			    mutex_owner(&domp->lock);
1546 		else {
1547 			reqp->req.ppm_power_lock_owner_req.owner =
1548 			    DEVI(rdip)->devi_busy_thread;
1549 		}
1550 		return (DDI_SUCCESS);
1551 
1552 	default:
1553 		ppmd = PPM_GET_PRIVATE(rdip);
1554 		if (ppmd == NULL) {
1555 			domp = ppm_lookup_dev(rdip);
1556 			ASSERT(domp);
1557 			ppmd = ppm_get_dev(rdip, domp);
1558 		}
1559 
1560 #ifdef DEBUG
1561 		if ((reqp->request_type == PMR_PPM_SET_POWER) &&
1562 		    (ppm_debug & D_SETPWR)) {
1563 			prom_printf("%s: \"%s\", PMR_PPM_SET_POWER\n",
1564 			    str, ppmd->path);
1565 		}
1566 #endif
1567 
1568 		if (ppmd->domp == &xcppm_cpu)
1569 			return (xcppm_manage_cpus(rdip, reqp, result));
1570 		else if (ppmd->domp == &xcppm_fet)
1571 			return (xcppm_manage_fet(rdip, reqp, result));
1572 		else if (ppmd->domp == &xcppm_upa)
1573 			return (xcppm_manage_pciupa(rdip, reqp, result));
1574 		else {
1575 			ASSERT(ppmd->domp == &xcppm_1394);
1576 			return (xcppm_manage_1394(rdip, reqp, result));
1577 		}
1578 	}
1579 }
1580 
1581 
1582 /*
1583  * Initialize our private version of real power level
1584  * as well as lowest and highest levels the device supports;
1585  * see ppmf and ppm_add_dev
1586  */
1587 static void
1588 xcppm_dev_init(ppm_dev_t *ppmd)
1589 {
1590 	struct pm_component *dcomps;
1591 	struct pm_comp *pm_comp;
1592 	dev_info_t *dip;
1593 	int maxi;
1594 
1595 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1596 	ppmd->level = PM_LEVEL_UNKNOWN;
1597 	ppmd->rplvl = PM_LEVEL_UNKNOWN;
1598 
1599 	dip = ppmd->dip;
1600 	/*
1601 	 * ppm exists to handle power-manageable devices which require
1602 	 * special handling on the current platform.  However, a
1603 	 * driver for such a device may choose not to support power
1604 	 * management on a particular load/attach.  In this case we
1605 	 * we create a structure to represent a single-component device
1606 	 * for which "level" = PM_LEVEL_UNKNOWN and "lowest" = 0
1607 	 * are effectively constant.
1608 	 */
1609 	if (PM_GET_PM_INFO(dip)) {
1610 		dcomps = DEVI(dip)->devi_pm_components;
1611 		pm_comp = &dcomps[ppmd->cmpt].pmc_comp;
1612 
1613 		ppmd->lowest = pm_comp->pmc_lvals[0];
1614 		ASSERT(ppmd->lowest >= 0);
1615 		maxi = pm_comp->pmc_numlevels - 1;
1616 		ppmd->highest = pm_comp->pmc_lvals[maxi];
1617 	}
1618 
1619 	/*
1620 	 * add any domain-specific initialization here
1621 	 */
1622 	if (ppmd->domp == &xcppm_fet) {
1623 		/*
1624 		 * when a new device is added to domain_powefet
1625 		 * it is counted here as being powered up.
1626 		 */
1627 		ppmd->domp->pwr_cnt++;
1628 		DPRINTF(D_FET, ("xcppm_dev_init: UP cnt = %d\n",
1629 		    ppmd->domp->pwr_cnt));
1630 	} else if (ppmd->domp == &xcppm_upa) {
1631 		/*
1632 		 * There may be a better way to determine the device type
1633 		 * instead of comparing to hard coded string names.
1634 		 */
1635 		if (strstr(ppmd->path, "pci@8,700000"))
1636 			ppmd->flags = XCPPMF_PCIB;
1637 		else if (strstr(ppmd->path, "upa@8,480000"))
1638 			ppmd->flags = XCPPMF_UPA;
1639 	}
1640 }
1641 
1642 
1643 /*
1644  * see ppmf and ppm_rem_dev
1645  */
1646 static void
1647 xcppm_dev_fini(ppm_dev_t *ppmd)
1648 {
1649 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1650 	if (ppmd->domp == &xcppm_fet) {
1651 		if (ppmd->level != ppmd->lowest) {
1652 			ppmd->domp->pwr_cnt--;
1653 			DPRINTF(D_FET, ("xcppm_dev_fini: DN cnt = %d\n",
1654 			    ppmd->domp->pwr_cnt));
1655 		};
1656 	}
1657 }
1658 
1659 
1660 /*
1661  * see ppmf and ppm_ioctl, PPMIOCSET
1662  */
1663 static void
1664 xcppm_iocset(uint8_t value)
1665 {
1666 	int action;
1667 
1668 	if (value == PPM_IDEV_POWER_ON)
1669 		action = XCPPM_SETBIT;
1670 	else if (value == PPM_IDEV_POWER_OFF)
1671 		action = XCPPM_CLRBIT;
1672 	(void) xcppm_gpio_port2(action, DRVON);
1673 }
1674 
1675 
1676 /*
1677  * see ppmf and ppm_ioctl, PPMIOCGET
1678  */
1679 static uint8_t
1680 xcppm_iocget(void)
1681 {
1682 	uint8_t bit;
1683 
1684 	bit = xcppm_gpio_port2(XCPPM_GETBIT, DRVON);
1685 	return ((bit == DRVON) ? PPM_IDEV_POWER_ON : PPM_IDEV_POWER_OFF);
1686 }
1687