xref: /titanic_41/usr/src/uts/sun4u/excalibur/io/xcalppm.c (revision 6be356c5780a1ccb886bba08d6eb56b61f021564)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Platform Power Management driver for SUNW,Sun-Blade-1000
30  */
31 #include <sys/modctl.h>
32 #include <sys/conf.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/ddi_impldefs.h>
36 #include <sys/ppmvar.h>
37 #include <sys/ppmio.h>
38 #include <sys/xcalppm_reg.h>
39 #include <sys/xcalppm_var.h>
40 #include <sys/stat.h>
41 #include <sys/epm.h>
42 #include <sys/archsystm.h>
43 #include <sys/cpuvar.h>
44 #include <sys/cheetahregs.h>
45 #include <sys/us3_module.h>
46 
47 /*
48  * Locking Considerations
49  *
50  * To look at and/or modify xcppm_domain fields or elements of its list of
51  * xcppm_dev structures the domain_lock for the affected domain must be held.
52  *
53  * When the autopm framework needs to change the power of a component of a
54  * device, it needs to hold the associated power lock (see discussion at
55  * top of uts/common/os/sunpm.c).
56  *
57  * If the framework needs to lock a dev/cmpt for a device which this ppm
58  * has claimed, xcppm_ctlops will be called with PMR_PPM_LOCK_POWER.  Ppm
59  * needs to be involved because, due to platform constraints, changing the
60  * power of one device may require that other devices be changed in the same
61  * operation.
62  *
63  * In some domains (e.g., cpus) the power lock must be acquired for all the
64  * affected devices to avoid possible corruption of the power states.  The
65  * joint change must be an atomic operation.  Ppm handles this by acquiring
66  * the domain lock, then walking the list of affected devices and acquiring
67  * the power lock for each of them.  To unlock, the list is traversed and
68  * each of the power locks is freed, followed by freeing the domain lock.
69  *
70  * For other domains ppm will only be changing the power of a single device
71  * that is known to the framework.  In these cases, the locking is done by
72  * acquiring the domain lock and directly calling the framework routine for
73  * getting a single power lock.
74  */
75 
76 static int	xcppm_attach(dev_info_t *, ddi_attach_cmd_t);
77 static int	xcppm_detach(dev_info_t *, ddi_detach_cmd_t);
78 static int	xcppm_ctlops(dev_info_t *, dev_info_t *,
79 		    ddi_ctl_enum_t, void *, void *);
80 static void	xcppm_dev_init(ppm_dev_t *);
81 static void	xcppm_dev_fini(ppm_dev_t *);
82 static void	xcppm_iocset(uint8_t);
83 static uint8_t	xcppm_iocget(void);
84 
85 /*
86  * Note: 1394 and pciupa were originally required to be LOCK_ALL domains.
87  * However, the underlying nexus drivers aren't able to do power mgmt
88  * (because of hw implementation issues).  The locking protocol for these
89  * domains is changed to LOCK_ONE to simplify other code.  The domain
90  * code itself will be removed in the future.
91  */
92 static ppm_domain_t xcppm_1394 = { "domain_1394",	PPMD_LOCK_ONE };
93 static ppm_domain_t xcppm_cpu  = { "domain_cpu",	PPMD_LOCK_ALL };
94 static ppm_domain_t xcppm_fet  = { "domain_powerfet",	PPMD_LOCK_ONE };
95 static ppm_domain_t xcppm_upa  = { "domain_pciupa",	PPMD_LOCK_ONE };
96 
97 ppm_domain_t *ppm_domains[] = {
98 	&xcppm_1394,
99 	&xcppm_cpu,
100 	&xcppm_fet,
101 	&xcppm_upa,
102 	NULL
103 };
104 
105 
106 struct ppm_funcs ppmf = {
107 	xcppm_dev_init,			/* dev_init */
108 	xcppm_dev_fini,			/* dev_fini */
109 	xcppm_iocset,			/* iocset */
110 	xcppm_iocget,			/* iocget */
111 };
112 
113 
114 /*
115  * The order of entries must be from slowest to fastest and in
116  * one-to-one correspondence with the cpu_level array.
117  */
118 static const uint16_t bbc_estar_control_masks[] = {
119 	BBC_ESTAR_SLOW, BBC_ESTAR_MEDIUM, BBC_ESTAR_FAST
120 };
121 
122 int bbc_delay = 10;			/* microsec */
123 
124 
125 /*
126  * Configuration data structures
127  */
128 static struct cb_ops xcppm_cb_ops = {
129 	ppm_open,		/* open */
130 	ppm_close,		/* close */
131 	nodev,			/* strategy */
132 	nodev,			/* print */
133 	nodev,			/* dump */
134 	nodev,			/* read */
135 	nodev,			/* write */
136 	ppm_ioctl,		/* ioctl */
137 	nodev,			/* devmap */
138 	nodev,			/* mmap */
139 	nodev,			/* segmap */
140 	nochpoll,		/* poll */
141 	ddi_prop_op,		/* prop_op */
142 	NULL,			/* streamtab */
143 	D_MP | D_NEW,		/* driver compatibility flag */
144 	CB_REV,			/* cb_ops revision */
145 	nodev,			/* async read */
146 	nodev			/* async write */
147 };
148 
149 static struct bus_ops xcppm_bus_ops = {
150 	BUSO_REV,
151 	0,
152 	0,
153 	0,
154 	0,
155 	0,
156 	ddi_no_dma_map,
157 	ddi_no_dma_allochdl,
158 	ddi_no_dma_freehdl,
159 	ddi_no_dma_bindhdl,
160 	ddi_no_dma_unbindhdl,
161 	ddi_no_dma_flush,
162 	ddi_no_dma_win,
163 	ddi_no_dma_mctl,
164 	xcppm_ctlops,
165 	0,
166 	0,			/* (*bus_get_eventcookie)();	*/
167 	0,			/* (*bus_add_eventcall)();	*/
168 	0,			/* (*bus_remove_eventcall)();	*/
169 	0			/* (*bus_post_event)();		*/
170 };
171 
172 static struct dev_ops xcppm_ops = {
173 	DEVO_REV,		/* devo_rev */
174 	0,			/* refcnt */
175 	ppm_getinfo,		/* info */
176 	nulldev,		/* identify */
177 	nulldev,		/* probe */
178 	xcppm_attach,		/* attach */
179 	xcppm_detach,		/* detach */
180 	nodev,			/* reset */
181 	&xcppm_cb_ops,		/* driver operations */
182 	&xcppm_bus_ops,		/* bus operations */
183 	NULL,			/* power */
184 };
185 
186 extern struct mod_ops mod_driverops;
187 
188 static struct modldrv modldrv = {
189 	&mod_driverops,		/* type of module - pseudo */
190 	"platform pm driver v%I%",
191 	&xcppm_ops
192 };
193 
194 static struct modlinkage modlinkage = {
195 	MODREV_1,
196 	&modldrv,
197 	NULL
198 };
199 
200 
201 int
202 _init(void)
203 {
204 	return (ppm_init(&modlinkage, sizeof (xcppm_unit_t), "xc"));
205 }
206 
207 
208 int
209 _fini(void)
210 {
211 	return (EBUSY);
212 }
213 
214 
215 int
216 _info(struct modinfo *modinfop)
217 {
218 	return (mod_info(&modlinkage, modinfop));
219 }
220 
221 
222 static int
223 xcppm_map_all_regs(dev_info_t *dip)
224 {
225 	ddi_device_acc_attr_t attr_be, attr_le;
226 	int rv0, rv1, rv2, rv3;
227 	xcppm_unit_t *unitp;
228 	caddr_t base_addr;
229 	uint8_t data8;
230 
231 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
232 	attr_be.devacc_attr_version = DDI_DEVICE_ATTR_V0;
233 	attr_be.devacc_attr_endian_flags  = DDI_STRUCTURE_BE_ACC;
234 	attr_be.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
235 
236 	attr_le.devacc_attr_version = DDI_DEVICE_ATTR_V0;
237 	attr_le.devacc_attr_endian_flags  = DDI_STRUCTURE_LE_ACC;
238 	attr_le.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
239 
240 	rv0 = ddi_regs_map_setup(dip, 0, &base_addr, 0, 0, &attr_be,
241 	    &unitp->hndls.bbc_estar_ctrl);
242 
243 	unitp->regs.bbc_estar_ctrl = (uint16_t *)(base_addr +
244 	    BBC_ESTAR_CTRL_OFFSET);
245 	unitp->regs.bbc_assert_change = (uint32_t *)(base_addr +
246 	    BBC_ASSERT_CHANGE_OFFSET);
247 	unitp->regs.bbc_pll_settle = (uint32_t *)(base_addr +
248 	    BBC_PLL_SETTLE_OFFSET);
249 
250 	rv1 = ddi_regs_map_setup(dip, 1,
251 	    (caddr_t *)&unitp->regs.rio_mode_auxio,
252 	    0, 0, &attr_le, &unitp->hndls.rio_mode_auxio);
253 
254 	rv2 = ddi_regs_map_setup(dip, 2, &base_addr,
255 	    0, 0, &attr_le, &unitp->hndls.gpio_bank_select);
256 
257 	unitp->regs.gpio_bank_sel_index = (uint8_t *)(base_addr +
258 	    GPIO_BANK_SEL_INDEX_OFFSET);
259 	unitp->regs.gpio_bank_sel_data = (uint8_t *)(base_addr +
260 	    GPIO_BANK_SEL_DATA_OFFSET);
261 
262 	rv3 = ddi_regs_map_setup(dip, 3, &base_addr, 0, 0, &attr_le,
263 	    &unitp->hndls.gpio_data_ports);
264 
265 	unitp->regs.gpio_port1_data = (uint8_t *)(base_addr +
266 	    GPIO_PORT1_DATA_OFFSET);
267 	unitp->regs.gpio_port2_data = (uint8_t *)(base_addr +
268 	    GPIO_PORT2_DATA_OFFSET);
269 
270 	if (rv0 != DDI_SUCCESS || rv1 != DDI_SUCCESS ||
271 	    rv2 != DDI_SUCCESS || rv3 != DDI_SUCCESS) {
272 		if (rv0 == DDI_SUCCESS)
273 			ddi_regs_map_free(&unitp->hndls.bbc_estar_ctrl);
274 		if (rv1 == DDI_SUCCESS)
275 			ddi_regs_map_free(&unitp->hndls.rio_mode_auxio);
276 		if (rv2 == DDI_SUCCESS)
277 			ddi_regs_map_free(&unitp->hndls.gpio_bank_select);
278 		if (rv3 == DDI_SUCCESS)
279 			ddi_regs_map_free(&unitp->hndls.gpio_data_ports);
280 		return (DDI_FAILURE);
281 	}
282 
283 	/*
284 	 * Ppm uses GPIO bits in Bank 0.  Make sure Bank 0 is selected.
285 	 */
286 	data8 = SIO_CONFIG2_INDEX;
287 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
288 	    unitp->regs.gpio_bank_sel_index, data8);
289 	data8 = XCPPM_GET8(unitp->hndls.gpio_bank_select,
290 	    unitp->regs.gpio_bank_sel_data);
291 
292 	data8 &= 0x7f;	/* Set Bit7 to zero */
293 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
294 	    unitp->regs.gpio_bank_sel_data, data8);
295 
296 	return (DDI_SUCCESS);
297 }
298 
299 
300 static int
301 xcppm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
302 {
303 #ifdef DEBUG
304 	char *str = "xcppm_attach";
305 #endif
306 	xcppm_unit_t *unitp;
307 	ppm_domain_t **dompp;
308 	int retval;
309 
310 	DPRINTF(D_ATTACH, ("%s: attach cmd %d\n", str, cmd));
311 	retval = DDI_SUCCESS;
312 
313 	switch (cmd) {
314 	case DDI_ATTACH:
315 		if (ppm_inst != -1) {
316 			DPRINTF(D_ERROR,
317 			    ("%s: instance already attached\n", str));
318 			return (DDI_FAILURE);
319 		}
320 		ppm_inst = ddi_get_instance(dip);
321 
322 		/*
323 		 * Allocate and initialize soft state structure
324 		 */
325 		if (ddi_soft_state_zalloc(ppm_statep, ppm_inst) != 0)
326 			return (DDI_FAILURE);
327 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
328 		mutex_init(&unitp->unit_lock, NULL, MUTEX_DRIVER, NULL);
329 		mutex_init(&unitp->creator_lock, NULL, MUTEX_DRIVER, NULL);
330 
331 		if (ddi_create_minor_node(dip, "ppm", S_IFCHR,
332 		    ppm_inst, "ddi_ppm", 0) == DDI_FAILURE) {
333 			ddi_soft_state_free(ppm_statep, ppm_inst);
334 			DPRINTF(D_ERROR,
335 			    ("%s: Can't create minor for 0x%p\n", str, dip));
336 			return (DDI_FAILURE);
337 		}
338 		ddi_report_dev(dip);
339 		unitp->dip = dip;
340 
341 		if (retval = ppm_create_db(dip))
342 			return (retval);
343 
344 		/*
345 		 * Map all of the registers under the ppm node.
346 		 */
347 		if (xcppm_map_all_regs(dip) != DDI_SUCCESS)
348 			return (DDI_FAILURE);
349 
350 		if ((retval =
351 		    pm_register_ppm(ppm_claim_dev, dip)) != DDI_SUCCESS) {
352 			DPRINTF(D_ERROR,
353 			    ("%s: can't register ppm handler\n", str));
354 			return (retval);
355 		}
356 
357 		for (dompp = ppm_domains; *dompp; dompp++)
358 			mutex_init(&(*dompp)->lock, NULL, MUTEX_DRIVER, NULL);
359 
360 		break;
361 
362 	case DDI_RESUME:
363 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
364 		mutex_enter(&unitp->unit_lock);
365 		unitp->state &= ~XCPPM_ST_SUSPENDED;
366 		mutex_exit(&unitp->unit_lock);
367 		break;
368 
369 	default:
370 		cmn_err(CE_CONT, "xcppm_attach: unknown "
371 		    "attach command %d, dip 0x%p\n", cmd, dip);
372 		retval = DDI_FAILURE;
373 	}
374 
375 	return (retval);
376 }
377 
378 
379 /*
380  * set the front panel LED:
381  * PPM_LEDON turns it on, PPM_LEDOFF turns it off.
382  * for GPIO register: 0x0 means led-on, 0x2 means led-off.
383  */
384 static void
385 xcppm_set_led(int action)
386 {
387 	xcppm_unit_t *unitp;
388 	uint8_t	reg;
389 
390 	ASSERT(action == PPM_LEDON || action == PPM_LEDOFF);
391 	DPRINTF(D_LED, ("xcppm_set_led: Turn LED %s\n",
392 	    (action == PPM_LEDON) ? "on" : "off"));
393 
394 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
395 	reg = XCPPM_GET8(unitp->hndls.gpio_data_ports,
396 	    unitp->regs.gpio_port1_data);
397 	if (action == PPM_LEDON)
398 		reg &= ~LED;
399 	else
400 		reg |= LED;
401 	XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
402 	    unitp->regs.gpio_port1_data, reg);
403 }
404 
405 
406 static void
407 xcppm_blink_led(void *action)
408 {
409 	xcppm_unit_t *unitp;
410 	int new_action;
411 	clock_t intvl;
412 
413 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
414 	mutex_enter(&unitp->unit_lock);
415 	if (unitp->led_tid == 0) {
416 		mutex_exit(&unitp->unit_lock);
417 		return;
418 	}
419 
420 	if ((int)(uintptr_t)action == PPM_LEDON) {
421 		new_action = PPM_LEDOFF;
422 		intvl = PPM_LEDOFF_INTERVAL;
423 	} else {
424 		ASSERT((int)(uintptr_t)action == PPM_LEDOFF);
425 		new_action = PPM_LEDON;
426 		intvl = PPM_LEDON_INTERVAL;
427 	}
428 
429 	xcppm_set_led(new_action);
430 	unitp->led_tid = timeout(xcppm_blink_led, (void *)(uintptr_t)new_action,
431 	    intvl);
432 	mutex_exit(&unitp->unit_lock);
433 }
434 
435 
436 static void
437 xcppm_freeze_led(void *action)
438 {
439 	xcppm_unit_t *unitp;
440 	timeout_id_t tid;
441 
442 	DPRINTF(D_LOWEST, ("xcppm_freeze_led: action %d\n",
443 	    (int)(uintptr_t)action));
444 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
445 	mutex_enter(&unitp->unit_lock);
446 	tid = unitp->led_tid;
447 	unitp->led_tid = 0;
448 	mutex_exit(&unitp->unit_lock);
449 	untimeout(tid);
450 	mutex_enter(&unitp->unit_lock);
451 	xcppm_set_led((int)(uintptr_t)action);
452 	mutex_exit(&unitp->unit_lock);
453 }
454 
455 
456 /* ARGSUSED */
457 static int
458 xcppm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
459 {
460 	xcppm_unit_t *unitp;
461 
462 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
463 	DPRINTF(D_DETACH, ("xcppm_detach: cmd %d\n", cmd));
464 
465 	switch (cmd) {
466 	case DDI_DETACH:
467 		return (DDI_FAILURE);
468 
469 	case DDI_SUSPEND:
470 		mutex_enter(&unitp->unit_lock);
471 		unitp->state |= XCPPM_ST_SUSPENDED;
472 		mutex_exit(&unitp->unit_lock);
473 
474 		/*
475 		 * Suspend requires that timeout callouts to be canceled.
476 		 * Turning off the LED blinking will cancel the timeout.
477 		 */
478 		xcppm_freeze_led((void *)PPM_LEDON);
479 		return (DDI_SUCCESS);
480 
481 	default:
482 		return (DDI_FAILURE);
483 	}
484 }
485 
486 
487 /*
488  * Device we claimed has detached.  We must get rid of
489  * our state which was used to track this device.
490  */
491 static void
492 xcppm_detach_ctlop(dev_info_t *dip, power_req_t *reqp)
493 {
494 	ppm_dev_t *ppmd;
495 
496 	ppmd = PPM_GET_PRIVATE(dip);
497 	if (ppmd == NULL || reqp->req.ppm_config_req.result != DDI_SUCCESS)
498 		return;
499 
500 	ppm_rem_dev(dip);
501 }
502 
503 
504 /*
505  * The system is being resumed from a cpr suspend operation and this
506  * device's attach entry will be called shortly.  The driver will set
507  * the device's power to a conventional starting value, and we need to
508  * stay in sync and set our private copy to the same value.
509  */
510 /* ARGSUSED */
511 static void
512 xcppm_resume_ctlop(dev_info_t *dip, power_req_t *reqp)
513 {
514 	ppm_domain_t *domp;
515 	ppm_dev_t *ppmd;
516 	int powered;
517 
518 	ppmd = PPM_GET_PRIVATE(dip);
519 	if (ppmd == NULL)
520 		return;
521 
522 	/*
523 	 * Maintain correct powered count for domain which cares
524 	 */
525 	powered = 0;
526 	domp = ppmd->domp;
527 	mutex_enter(&domp->lock);
528 	if (domp == &xcppm_fet) {
529 		for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
530 			if (ppmd->dip == dip && ppmd->level)
531 				powered++;
532 		}
533 
534 		/*
535 		 * If this device was powered off when the system was
536 		 * suspended, this resume acts like a power-on transition,
537 		 * so we adjust the count.
538 		 */
539 		if (powered == 0)
540 			domp->pwr_cnt++;
541 	}
542 
543 	for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
544 		if (ppmd->dip == dip)
545 			ppmd->level = ppmd->rplvl = PM_LEVEL_UNKNOWN;
546 	}
547 	mutex_exit(&domp->lock);
548 }
549 
550 
551 /*
552  * Change the power level for a component of a device.  If the change
553  * arg is true, we call the framework to actually change the device's
554  * power; otherwise, we just update our own copy of the power level.
555  */
556 static int
557 xcppm_set_level(ppm_dev_t *ppmd, int cmpt, int level, boolean_t change)
558 {
559 #ifdef DEBUG
560 	char *str = "xcppm_set_level";
561 #endif
562 	int ret;
563 
564 	ret = DDI_SUCCESS;
565 	if (change)
566 		ret = pm_power(ppmd->dip, cmpt, level);
567 
568 	DPRINTF(D_SETLVL, ("%s: \"%s\" change=%d, old %d, new %d, ret %d\n",
569 	    str, ppmd->path, change, ppmd->level, level, ret));
570 
571 	if (ret == DDI_SUCCESS) {
572 		ppmd->level = level;
573 		ppmd->rplvl = PM_LEVEL_UNKNOWN;
574 	}
575 
576 	return (ret);
577 }
578 
579 
580 static int
581 xcppm_change_power_level(ppm_dev_t *ppmd, int cmpt, int level)
582 {
583 	return (xcppm_set_level(ppmd, cmpt, level, B_TRUE));
584 }
585 
586 
587 static int
588 xcppm_record_level_change(ppm_dev_t *ppmd, int cmpt, int level)
589 {
590 	return (xcppm_set_level(ppmd, cmpt, level, B_FALSE));
591 }
592 
593 
594 static uint8_t
595 xcppm_gpio_port2(int action, uint8_t pos)
596 {
597 #ifdef DEBUG
598 	char *str = "xcppm_gpio_port2";
599 #endif
600 	xcppm_unit_t *unitp;
601 	uint8_t data8, buf8;
602 	uint8_t	ret;
603 
604 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
605 	mutex_enter(&unitp->gpio_lock);
606 
607 	data8 = buf8 = XCPPM_GET8(unitp->hndls.gpio_data_ports,
608 	    unitp->regs.gpio_port2_data);
609 
610 	switch (action) {
611 	case XCPPM_GETBIT:
612 		ret = data8 & pos;
613 		DPRINTF(D_GPIO, ("%s: READ: GPIO Bank2 value 0x%x\n",
614 		    str, buf8));
615 		break;
616 
617 	case XCPPM_SETBIT:
618 	case XCPPM_CLRBIT:
619 		if (action == XCPPM_SETBIT)
620 			data8 |= pos;
621 		else
622 			data8 &= ~pos;
623 		XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
624 		    unitp->regs.gpio_port2_data, data8);
625 		ret = data8 & pos;
626 		DPRINTF(D_GPIO, ("%s: %s: GPIO Bank2 "
627 		    "bit 0x%x changed from 0x%x to 0x%x\n",
628 		    str, (action == XCPPM_SETBIT) ? "UP" : "DOWN",
629 		    pos, buf8, data8));
630 		break;
631 
632 	default:
633 		cmn_err(CE_PANIC, "xcalppm: unrecognized register "
634 		    "IO command %d\n", action);
635 		break;
636 	}
637 	mutex_exit(&unitp->gpio_lock);
638 
639 	return (ret);
640 }
641 
642 
643 /*
644  * Raise the power level of a subrange of cpus.  Used when cpu driver
645  * failed an attempt to lower the power of a cpu (probably because
646  * it got busy).  Need to revert the ones we already changed.
647  *
648  * ecpup = the ppm_dev_t for the cpu which failed to lower power
649  * level = power level to reset prior cpus to
650  */
651 static void
652 xcppm_revert_cpu_power(ppm_dev_t *ecpup, int level)
653 {
654 	ppm_dev_t *cpup;
655 
656 	for (cpup = xcppm_cpu.devlist; cpup != ecpup; cpup = cpup->next) {
657 		DPRINTF(D_CPU, ("xrcp: \"%s\", revert to level %d\n",
658 		    cpup->path, level));
659 		(void) xcppm_change_power_level(cpup, 0, level);
660 	}
661 }
662 
663 /*
664  * Switch the DC/DC converter.  Clearing the GPIO bit in SuperI/O puts
665  * the converter in low power mode and setting the bit puts it back in
666  * normal mode.
667  */
668 static void
669 xcppm_switch_dcdc_converter(int action)
670 {
671 	int tries = XCPPM_VCL_TRIES;
672 	uint_t spl;
673 	uint64_t stick_begin, stick_end;
674 	uint64_t tick_begin, tick_end;
675 	uint64_t cur_speed_ratio, full_speed_ratio;
676 	static int xcppm_dcdc_lpm;
677 
678 	switch (action) {
679 	case XCPPM_SETBIT:
680 		if (xcppm_dcdc_lpm) {
681 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
682 			    "switch to normal power mode.\n"));
683 			(void) xcppm_gpio_port2(action, HIGHPWR);
684 			xcppm_dcdc_lpm = 0;
685 		}
686 		break;
687 	case XCPPM_CLRBIT:
688 		/*
689 		 * In some fast CPU configurations, DC/DC converter was
690 		 * put in low power mode before CPUs made the transition
691 		 * to 1/32 of clock speed.  In those cases, system was
692 		 * shut down by hardware for protection.  To resolve that
693 		 * problem, we make sure CPUs have made the clock transition
694 		 * before the DC/DC converter has been put to low power mode.
695 		 */
696 		ASSERT(xcppm_dcdc_lpm == 0);
697 		kpreempt_disable();
698 		full_speed_ratio = cpunodes[CPU->cpu_id].clock_freq /
699 		    sys_tick_freq;
700 		while (tries) {
701 			spl = ddi_enter_critical();
702 			tick_begin = gettick_counter();
703 			stick_timestamp((int64_t *)&stick_begin);
704 			ddi_exit_critical(spl);
705 			drv_usecwait(XCPPM_VCL_DELAY);
706 			spl = ddi_enter_critical();
707 			tick_end = gettick_counter();
708 			stick_timestamp((int64_t *)&stick_end);
709 			ddi_exit_critical(spl);
710 			cur_speed_ratio = (tick_end - tick_begin) /
711 			    (stick_end - stick_begin);
712 
713 			/*
714 			 * tick/stick at current speed should at most be
715 			 * equal to full-speed tick/stick, adjusted with
716 			 * full/lowest clock speed ratio.  If not, speed
717 			 * transition has not happened yet.
718 			 */
719 			if (cur_speed_ratio <= ((full_speed_ratio /
720 			    XCPPM_VCL_DIVISOR) + 1)) {
721 				DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
722 				    "switch to low power mode.\n"));
723 				(void) xcppm_gpio_port2(action, HIGHPWR);
724 				xcppm_dcdc_lpm = 1;
725 				break;
726 			}
727 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: CPU "
728 			    "has not made transition to lowest speed yet "
729 			    "(%d)\n", tries));
730 			tries--;
731 		}
732 		kpreempt_enable();
733 		break;
734 	}
735 }
736 
737 static void
738 xcppm_rio_mode(xcppm_unit_t *unitp, int mode)
739 {
740 	uint32_t data32, buf32;
741 
742 	mutex_enter(&unitp->gpio_lock);
743 	data32 = buf32 = XCPPM_GET32(unitp->hndls.rio_mode_auxio,
744 	    unitp->regs.rio_mode_auxio);
745 	if (mode == XCPPM_SETBIT)
746 		data32 |= RIO_BBC_ESTAR_MODE;
747 	else
748 		data32 &= ~RIO_BBC_ESTAR_MODE;
749 	XCPPM_SETGET32(unitp->hndls.rio_mode_auxio,
750 	    unitp->regs.rio_mode_auxio, data32);
751 	mutex_exit(&unitp->gpio_lock);
752 
753 	DPRINTF(D_CPU, ("xcppm_rio_mode: %s: change from 0x%x to 0x%x\n",
754 	    (mode == XCPPM_SETBIT) ? "DOWN" : "UP", buf32, data32));
755 }
756 
757 
758 /*
759  * change the power level of all cpus to the arg value;
760  * the caller needs to ensure that a legal transition is requested.
761  */
762 static int
763 xcppm_change_cpu_power(int newlevel)
764 {
765 #ifdef DEBUG
766 	char *str = "xcppm_ccp";
767 #endif
768 	int index, level, oldlevel;
769 	int lowest, highest;
770 	int undo_flag, ret;
771 	int speedup, incr;
772 	uint32_t data32;
773 	uint16_t data16;
774 	xcppm_unit_t *unitp;
775 	ppm_dev_t *cpup;
776 	dev_info_t *dip;
777 	char *chstr;
778 
779 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
780 	ASSERT(unitp);
781 	cpup = xcppm_cpu.devlist;
782 	lowest = cpup->lowest;
783 	highest = cpup->highest;
784 
785 	/*
786 	 * not all cpus may have transitioned to a known level by this time
787 	 */
788 	oldlevel = (cpup->level == PM_LEVEL_UNKNOWN) ? highest : cpup->level;
789 	dip = cpup->dip;
790 	ASSERT(dip);
791 
792 	DPRINTF(D_CPU, ("%s: old %d, new %d, highest %d, lowest %d\n",
793 	    str, oldlevel, newlevel, highest, lowest));
794 
795 	if (newlevel > oldlevel) {
796 		chstr = "UP";
797 		speedup = 1;
798 		incr = 1;
799 	} else if (newlevel < oldlevel) {
800 		chstr = "DOWN";
801 		speedup = 0;
802 		incr = -1;
803 	} else
804 		return (DDI_SUCCESS);
805 
806 	undo_flag = 0;
807 	if (speedup) {
808 		/*
809 		 * If coming up from lowest power level, set the E*
810 		 * mode bit in GPIO to make power supply efficient
811 		 * at normal power.
812 		 */
813 		if (oldlevel == cpup->lowest) {
814 			xcppm_switch_dcdc_converter(XCPPM_SETBIT);
815 			undo_flag = 1;
816 		}
817 	} else {
818 		/*
819 		 * set BBC Estar mode bit in RIO AUXIO register
820 		 */
821 		if (oldlevel == highest) {
822 			xcppm_rio_mode(unitp, XCPPM_SETBIT);
823 			undo_flag = 1;
824 		}
825 	}
826 
827 	/*
828 	 * this loop will execute 1x or 2x depending on
829 	 * number of times we need to change clock rates
830 	 */
831 	for (level = oldlevel+incr; level != newlevel+incr; level += incr) {
832 		for (cpup = xcppm_cpu.devlist; cpup; cpup = cpup->next) {
833 			if (cpup->level == level)
834 				continue;
835 			ret = xcppm_change_power_level(cpup, 0, level);
836 			DPRINTF(D_CPU, ("%s: \"%s\", %s to level %d, ret %d\n",
837 			    str, cpup->path, chstr, cpup->level, ret));
838 			if (ret == DDI_SUCCESS)
839 				continue;
840 
841 			/*
842 			 * if the driver was unable to lower cpu speed,
843 			 * the cpu probably got busy; set the previous
844 			 * cpus back to the original level
845 			 */
846 			if (speedup == 0)
847 				xcppm_revert_cpu_power(cpup, level + 1);
848 
849 			if (undo_flag) {
850 				if (speedup)
851 					xcppm_switch_dcdc_converter(
852 					    XCPPM_CLRBIT);
853 				else
854 					xcppm_rio_mode(unitp, XCPPM_CLRBIT);
855 			}
856 			return (ret);
857 		}
858 
859 		index = level - 1;
860 		spm_change_schizo_speed(index);
861 		DPRINTF(D_CPU, ("%s: safari config reg changed\n", str));
862 
863 		/*
864 		 * set the delay times for changing to this rate
865 		 */
866 		data32 = XCPPM_BBC_DELAY(index);
867 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
868 		    (caddr_t)unitp->regs.bbc_assert_change, data32);
869 		DPRINTF(D_CPU, ("%s: %s: Wrote E* Assert Change Time "
870 		    "(t1) = 0x%x\n", str, chstr, data32));
871 
872 		data32 = XCPPM_BBC_DELAY(index);
873 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
874 		    (caddr_t)unitp->regs.bbc_pll_settle, data32);
875 		DPRINTF(D_CPU, ("%s: %s: Wrote E* PLL Settle Time "
876 		    "(t4) = 0x%x\n", str, chstr, data32));
877 
878 		data16 = bbc_estar_control_masks[index];
879 		XCPPM_SETGET16(unitp->hndls.bbc_estar_ctrl,
880 		    (caddr_t)unitp->regs.bbc_estar_ctrl, data16);
881 		DPRINTF(D_CPU, ("%s: %s: Wrote BCC E* Control = 0x%x\n",
882 		    str, chstr, data16));
883 	}
884 
885 	/*
886 	 * clear CPU Estar Mode bit in the gpio register
887 	 */
888 	if (speedup) {
889 		if (newlevel == highest)
890 			xcppm_rio_mode(unitp, XCPPM_CLRBIT);
891 	} else {
892 		if (newlevel == lowest)
893 			xcppm_switch_dcdc_converter(XCPPM_CLRBIT);
894 	}
895 
896 	return (DDI_SUCCESS);
897 }
898 
899 
900 /*
901  * Process a request to change the power level of a cpu.  If all cpus
902  * don't want to be at the same power yet, or if we are currently
903  * refusing slowdown requests due to thermal stress, just cache the
904  * request.  Otherwise, make the change for all cpus.
905  */
906 /* ARGSUSED */
907 static int
908 xcppm_manage_cpus(dev_info_t *dip, power_req_t *reqp, int *result)
909 {
910 #ifdef DEBUG
911 	char *str = "xcppm_manage_cpus";
912 #endif
913 	int old, new, ret, kmflag;
914 	ppm_dev_t *ppmd;
915 	pm_ppm_devlist_t *devlist = NULL, *p;
916 	int		do_rescan = 0;
917 	dev_info_t	*rescan_dip;
918 
919 	*result = DDI_SUCCESS;
920 	switch (reqp->request_type) {
921 	case PMR_PPM_SET_POWER:
922 		break;
923 	case PMR_PPM_POWER_CHANGE_NOTIFY:
924 		/* cpu driver can`t change cpu power level by itself */
925 	default:
926 		return (DDI_FAILURE);
927 	}
928 
929 	ppmd = PPM_GET_PRIVATE(dip);
930 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
931 	old = reqp->req.ppm_set_power_req.old_level;
932 	new = reqp->req.ppm_set_power_req.new_level;
933 
934 	/*
935 	 * At power on, the cpus are at full speed.  There is no hardware
936 	 * transition needed for going from unknown to full.  However, the
937 	 * state of the pm framework and cpu driver needs to be adjusted.
938 	 */
939 	if (ppmd->level == PM_LEVEL_UNKNOWN && new == ppmd->highest) {
940 		*result = ret = xcppm_change_power_level(ppmd, 0, new);
941 		if (ret != DDI_SUCCESS) {
942 			DPRINTF(D_CPU, ("%s: Failed to change "
943 			    "power level to %d\n", str, new));
944 		}
945 		return (ret);
946 	}
947 
948 	if (new == ppmd->level) {
949 		DPRINTF(D_CPU, ("%s: already at power level %d\n", str, new));
950 		return (DDI_SUCCESS);
951 	}
952 
953 	ppmd->rplvl = new;
954 
955 	/*
956 	 * A request from lower to higher level transition is granted and
957 	 * made effective on both cpus. For more than two cpu platform model,
958 	 * the following code needs to be modified to remember the rest of
959 	 * the unsoliciting cpus to be rescan'ed.
960 	 * A request from higher to lower must be agreed by all cpus.
961 	 */
962 	for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
963 		if (ppmd->rplvl == new)
964 			continue;
965 
966 		if (new < old) {
967 			DPRINTF(D_SOME, ("%s: not all cpus want to go down to "
968 			    "level %d yet\n", str, new));
969 			return (DDI_SUCCESS);
970 		}
971 
972 		/*
973 		 * If a single cpu requests power up, honor the request
974 		 * by powering up both cpus.
975 		 */
976 		if (new > old) {
977 			DPRINTF(D_SOME, ("%s: powering up device(%s@%s, %p) "
978 			    "because of request from dip(%s@%s, %p), "
979 			    "need pm_rescan\n", str, PM_NAME(ppmd->dip),
980 			    PM_ADDR(ppmd->dip), (void *)ppmd->dip,
981 			    PM_NAME(dip), PM_ADDR(dip), (void *)dip))
982 			do_rescan++;
983 			rescan_dip = ppmd->dip;
984 			break;
985 		}
986 	}
987 
988 	ret = xcppm_change_cpu_power(new);
989 	*result = ret;
990 
991 	if (ret == DDI_SUCCESS) {
992 		if (reqp->req.ppm_set_power_req.canblock == PM_CANBLOCK_BLOCK)
993 			kmflag = KM_SLEEP;
994 		else
995 			kmflag = KM_NOSLEEP;
996 
997 		for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
998 			if (ppmd->dip == dip)
999 				continue;
1000 
1001 			if ((p = kmem_zalloc(sizeof (pm_ppm_devlist_t),
1002 			    kmflag)) == NULL) {
1003 				break;
1004 			}
1005 			p->ppd_who = ppmd->dip;
1006 			p->ppd_cmpt = ppmd->cmpt;
1007 			p->ppd_old_level = old;
1008 			p->ppd_new_level = new;
1009 			p->ppd_next = devlist;
1010 
1011 			devlist = p;
1012 		}
1013 		reqp->req.ppm_set_power_req.cookie = (void *) devlist;
1014 
1015 		if (do_rescan > 0)
1016 			pm_rescan(rescan_dip);
1017 	}
1018 
1019 	return (ret);
1020 }
1021 
1022 
1023 /*
1024  * If powering off and all devices in this domain will now be off,
1025  * shut off common power.  If powering up and no devices up yet,
1026  * turn on common power.  Always make the requested power level
1027  * change for the target device.
1028  */
1029 static int
1030 xcppm_manage_fet(dev_info_t *dip, power_req_t *reqp, int *result)
1031 {
1032 #ifdef DEBUG
1033 	char *str = "xcppm_manage_fet";
1034 #endif
1035 	int (*pwr_func)(ppm_dev_t *, int, int);
1036 	int new, old, cmpt, incr = 0;
1037 	ppm_dev_t *ppmd;
1038 
1039 	ppmd = PPM_GET_PRIVATE(dip);
1040 	DPRINTF(D_FET, ("%s: \"%s\", req %s\n", str,
1041 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1042 
1043 	*result = DDI_SUCCESS;	/* change later for failures */
1044 	switch (reqp->request_type) {
1045 	case PMR_PPM_SET_POWER:
1046 		pwr_func = xcppm_change_power_level;
1047 		old = reqp->req.ppm_set_power_req.old_level;
1048 		new = reqp->req.ppm_set_power_req.new_level;
1049 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1050 		break;
1051 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1052 		pwr_func = xcppm_record_level_change;
1053 		old = reqp->req.ppm_notify_level_req.old_level;
1054 		new = reqp->req.ppm_notify_level_req.new_level;
1055 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1056 		break;
1057 	default:
1058 		return (*result = DDI_FAILURE);
1059 
1060 	}
1061 
1062 	/* This is common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1063 	DPRINTF(D_FET, ("%s: \"%s\", old %d, new %d\n",
1064 	    str, ppmd->path, old, new));
1065 
1066 	ASSERT(old == ppmd->level);
1067 	if (new == ppmd->level)
1068 		return (DDI_SUCCESS);
1069 
1070 	PPM_LOCK_DOMAIN(ppmd->domp);
1071 	/*
1072 	 * Devices in this domain are known to have 0 (off) as their
1073 	 * lowest power level.  We use this fact to simplify the logic.
1074 	 */
1075 	if (new > 0) {
1076 		if (ppmd->domp->pwr_cnt == 0)
1077 			(void) xcppm_gpio_port2(XCPPM_SETBIT, DRVON);
1078 		if (old == 0) {
1079 			ppmd->domp->pwr_cnt++;
1080 			incr = 1;
1081 			DPRINTF(D_FET, ("%s: UP cnt = %d\n",
1082 			    str, ppmd->domp->pwr_cnt));
1083 		}
1084 	}
1085 
1086 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1087 
1088 	ASSERT(ppmd->domp->pwr_cnt > 0);
1089 
1090 	if ((*result = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1091 		DPRINTF(D_FET, ("%s: \"%s\" power change failed \n",
1092 		    str, ppmd->path));
1093 	}
1094 
1095 	PPM_LOCK_DOMAIN(ppmd->domp);
1096 
1097 	/*
1098 	 * Decr the power count in two cases:
1099 	 *
1100 	 *   1) request was to power device down and was successful
1101 	 *   2) request was to power up (we pre-incremented count), but failed.
1102 	 */
1103 	if ((*result == DDI_SUCCESS && ppmd->level == 0) ||
1104 	    (*result != DDI_SUCCESS && incr)) {
1105 		ASSERT(ppmd->domp->pwr_cnt > 0);
1106 		ppmd->domp->pwr_cnt--;
1107 		DPRINTF(D_FET, ("%s: DN cnt = %d\n", str, ppmd->domp->pwr_cnt));
1108 		if (ppmd->domp->pwr_cnt == 0)
1109 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, DRVON);
1110 	}
1111 
1112 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1113 	ASSERT(ppmd->domp->pwr_cnt >= 0);
1114 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1115 }
1116 
1117 
1118 /*
1119  * Since UPA64S relies on PCI B staying at nominal 33MHz in order to
1120  * have its interrupt pulse function properly, we ensure
1121  * - Lowering PCI B only if UPA64S is at low power, otherwise defer
1122  *   the action until UPA64S goes down; hence right after UPA64S goes
1123  *   down, perform the deferred action for PCI B;
1124  * - Always raise PCI B power prior to raising UPA64S power.
1125  *
1126  * Both UPA64S and PCI B devices are considered each other's dependency
1127  * device whenever actual power transition is handled (PMR_PPM_SET_POWER).
1128  */
1129 static int
1130 xcppm_manage_pciupa(dev_info_t *dip, power_req_t *reqp, int *result)
1131 {
1132 #ifdef DEBUG
1133 	char *str = "xcppm_manage_pciupa";
1134 #endif
1135 	int (*pwr_func)(ppm_dev_t *, int, int);
1136 	uint_t flags = 0, co_flags = 0;
1137 	ppm_dev_t *ppmd, *codev;
1138 	int new, cmpt, retval;
1139 
1140 	ppmd = PPM_GET_PRIVATE(dip);
1141 	DPRINTF(D_PCIUPA, ("%s: \"%s\", req %s\n", str,
1142 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1143 
1144 	*result = DDI_SUCCESS;
1145 
1146 	switch (reqp->request_type) {
1147 	case PMR_PPM_SET_POWER:
1148 		pwr_func = xcppm_change_power_level;
1149 		new = reqp->req.ppm_set_power_req.new_level;
1150 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1151 		break;
1152 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1153 		pwr_func = xcppm_record_level_change;
1154 		new = reqp->req.ppm_notify_level_req.new_level;
1155 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1156 		break;
1157 	default:
1158 		*result = DDI_FAILURE;
1159 		return (DDI_FAILURE);
1160 	}
1161 
1162 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1163 	ASSERT(ppmd);	/* since it should be locked already */
1164 
1165 	if (new == ppmd->level)
1166 		return (DDI_SUCCESS);
1167 
1168 	DPRINTF(D_PCIUPA, ("%s: \"%s\", levels: current %d, new %d\n",
1169 	    str, ppmd->path, ppmd->level, new));
1170 
1171 	/*
1172 	 * find power-wise co-related device
1173 	 */
1174 	flags =  ppmd->flags;
1175 
1176 #ifdef DEBUG
1177 	if (flags & ~(XCPPMF_PCIB|XCPPMF_UPA))
1178 		DPRINTF(D_ERROR, ("%s: invalid ppmd->flags value 0x%x\n", str,
1179 		    ppmd->flags));
1180 #endif
1181 
1182 	if (flags == XCPPMF_UPA)
1183 		co_flags = XCPPMF_PCIB;
1184 	else if (flags == XCPPMF_PCIB)
1185 		co_flags = XCPPMF_UPA;
1186 
1187 	for (codev = ppmd->domp->devlist; codev; codev = codev->next)
1188 		if ((codev->cmpt == 0) && (codev->flags == co_flags))
1189 			break;
1190 
1191 	if (new > ppmd->level) {
1192 		/*
1193 		 * Raise power level -
1194 		 * pre-raising: upa ensure pci is powered up.
1195 		 */
1196 		if ((flags == XCPPMF_UPA) && codev &&
1197 		    (codev->level != codev->highest)) {
1198 			if ((retval = xcppm_change_power_level(codev,
1199 			    0, codev->highest)) != DDI_SUCCESS &&
1200 			    codev->level != codev->highest) {
1201 				*result = retval;
1202 				return (DDI_FAILURE);
1203 			}
1204 		}
1205 		if ((retval = (*pwr_func)(ppmd, 0, new)) != DDI_SUCCESS) {
1206 			*result = retval;
1207 			return (DDI_FAILURE);
1208 		}
1209 	} else if (new < ppmd->level) {
1210 		/*
1211 		 * Lower power level
1212 		 *
1213 		 * once upa is attached, pci checks upa level:
1214 		 * if upa is at high level, defer the request and return.
1215 		 * otherwise, set power level then check and lower pci level.
1216 		 */
1217 		if ((flags == XCPPMF_PCIB) && codev &&
1218 		    (codev->level != codev->lowest)) {
1219 			ppmd->rplvl = new;
1220 			return (DDI_SUCCESS);
1221 		}
1222 		if ((retval = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1223 		    ppmd->level != new) {
1224 			*result = retval;
1225 			return (DDI_FAILURE);
1226 		}
1227 
1228 		if (flags == XCPPMF_UPA) {
1229 			if (codev && (codev->rplvl != PM_LEVEL_UNKNOWN) &&
1230 			    (codev->rplvl < codev->level)) {
1231 				DPRINTF(D_PCIUPA, ("%s: codev \"%s\" "
1232 				    "rplvl %d level %d\n", str, codev->path,
1233 				    codev->rplvl, codev->level));
1234 				if ((retval = xcppm_change_power_level(
1235 				    codev, 0, codev->rplvl)) != DDI_SUCCESS) {
1236 					*result = retval;
1237 					return (DDI_FAILURE);
1238 				}
1239 			}
1240 		}
1241 	}
1242 
1243 	return (DDI_SUCCESS);
1244 }
1245 
1246 
1247 /*
1248  * When all of the children of the 1394 nexus are idle, a call will be
1249  * made to the nexus driver's own power entry point to lower power.  Ppm
1250  * intercepts this and kills 1394 cable power (since the driver doesn't
1251  * have access to the required register).  Similar logic applies when
1252  * coming up from the state where all the children were off.
1253  */
1254 static int
1255 xcppm_manage_1394(dev_info_t *dip, power_req_t *reqp, int *result)
1256 {
1257 #ifdef DEBUG
1258 	char *str = "xcppm_manage_1394";
1259 #endif
1260 	int (*pwr_func)(ppm_dev_t *, int, int);
1261 	int new, old, cmpt;
1262 	ppm_dev_t *ppmd;
1263 
1264 	ppmd = PPM_GET_PRIVATE(dip);
1265 	DPRINTF(D_1394, ("%s: \"%s\", req %s\n", str,
1266 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1267 
1268 	switch (reqp->request_type) {
1269 	case PMR_PPM_SET_POWER:
1270 		pwr_func = xcppm_change_power_level;
1271 		old = reqp->req.ppm_set_power_req.old_level;
1272 		new = reqp->req.ppm_set_power_req.new_level;
1273 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1274 		break;
1275 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1276 		pwr_func = xcppm_record_level_change;
1277 		old = reqp->req.ppm_notify_level_req.old_level;
1278 		new = reqp->req.ppm_notify_level_req.new_level;
1279 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1280 		break;
1281 	default:
1282 		return (*result = DDI_FAILURE);
1283 	}
1284 
1285 
1286 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1287 	DPRINTF(D_1394, ("%s: dev %s@%s, old %d new %d\n", str,
1288 	    ddi_binding_name(dip), ddi_get_name_addr(dip), old, new));
1289 
1290 	ASSERT(ppmd);	/* since it must already be locked */
1291 	ASSERT(old == ppmd->level);
1292 
1293 	if (new == ppmd->level)
1294 		return (*result = DDI_SUCCESS);
1295 
1296 	/* the reduce power case */
1297 	if (cmpt == 0 && new < ppmd->level) {
1298 		if ((*result =
1299 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1300 			return (DDI_FAILURE);
1301 		}
1302 		if (new == ppmd->lowest)
1303 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1304 		ppmd->level = new;
1305 		return (DDI_SUCCESS);
1306 	}
1307 
1308 	/* the increase power case */
1309 	if (cmpt == 0 && new > ppmd->level) {
1310 		if (ppmd->level == ppmd->lowest) {
1311 			(void) xcppm_gpio_port2(XCPPM_SETBIT, CPEN);
1312 			delay(1);
1313 		}
1314 		/*
1315 		 * Even if pwr_func fails we need to check current level again
1316 		 * because it could have been changed by an intervening
1317 		 * POWER_CHANGE_NOTIFY operation.
1318 		 */
1319 		if ((*result =
1320 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1321 		    ppmd->level == ppmd->lowest) {
1322 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1323 		} else {
1324 			ppmd->level = new;
1325 		}
1326 
1327 		return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1328 	}
1329 
1330 	/*
1331 	 * We get here if component was non-zero.  This is not what we
1332 	 * expect.  Let the device deal with it and just pass back the
1333 	 * result.
1334 	 */
1335 	*result = xcppm_change_power_level(ppmd, cmpt, new);
1336 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1337 }
1338 
1339 
1340 /*
1341  * lock, unlock, or trylock for one power mutex
1342  */
1343 static void
1344 xcppm_lock_one(ppm_dev_t *ppmd, power_req_t *reqp, int *iresp)
1345 {
1346 	switch (reqp->request_type) {
1347 	case PMR_PPM_LOCK_POWER:
1348 		pm_lock_power_single(ppmd->dip,
1349 		    reqp->req.ppm_lock_power_req.circp);
1350 		break;
1351 
1352 	case PMR_PPM_UNLOCK_POWER:
1353 		pm_unlock_power_single(ppmd->dip,
1354 		    reqp->req.ppm_unlock_power_req.circ);
1355 		break;
1356 
1357 	case PMR_PPM_TRY_LOCK_POWER:
1358 		*iresp = pm_try_locking_power_single(ppmd->dip,
1359 		    reqp->req.ppm_lock_power_req.circp);
1360 		break;
1361 	}
1362 }
1363 
1364 
1365 /*
1366  * lock, unlock, or trylock all devices within a domain.
1367  */
1368 static void
1369 xcppm_lock_all(ppm_domain_t *domp, power_req_t *reqp, int *iresp)
1370 {
1371 	/*
1372 	 * To simplify the implementation we let all the devices
1373 	 * in the domain be represented by a single device (dip).
1374 	 * We use the first device in the domain's devlist.  This
1375 	 * is safe because we return with the domain lock held
1376 	 * which prevents the list from changing.
1377 	 */
1378 	if (reqp->request_type == PMR_PPM_LOCK_POWER) {
1379 		if (!MUTEX_HELD(&domp->lock))
1380 			mutex_enter(&domp->lock);
1381 		domp->refcnt++;
1382 		ASSERT(domp->devlist != NULL);
1383 		pm_lock_power_single(domp->devlist->dip,
1384 		    reqp->req.ppm_lock_power_req.circp);
1385 		/* domain lock remains held */
1386 		return;
1387 	} else if (reqp->request_type == PMR_PPM_UNLOCK_POWER) {
1388 		ASSERT(MUTEX_HELD(&domp->lock));
1389 		ASSERT(domp->devlist != NULL);
1390 		pm_unlock_power_single(domp->devlist->dip,
1391 		    reqp->req.ppm_unlock_power_req.circ);
1392 		if (--domp->refcnt == 0)
1393 			mutex_exit(&domp->lock);
1394 		return;
1395 	}
1396 
1397 	ASSERT(reqp->request_type == PMR_PPM_TRY_LOCK_POWER);
1398 	if (!MUTEX_HELD(&domp->lock))
1399 		if (!mutex_tryenter(&domp->lock)) {
1400 			*iresp = 0;
1401 			return;
1402 		}
1403 	*iresp = pm_try_locking_power_single(domp->devlist->dip,
1404 	    reqp->req.ppm_lock_power_req.circp);
1405 	if (*iresp)
1406 		domp->refcnt++;
1407 	else
1408 		mutex_exit(&domp->lock);
1409 }
1410 
1411 
1412 /*
1413  * The pm framework calls us here to manage power for a device.
1414  * We maintain state which tells us whether we need to turn off/on
1415  * system board power components based on the status of all the devices
1416  * sharing a component.
1417  *
1418  */
1419 /* ARGSUSED */
1420 static int
1421 xcppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
1422     ddi_ctl_enum_t ctlop, void *arg, void *result)
1423 {
1424 	power_req_t *reqp = arg;
1425 	xcppm_unit_t *unitp;
1426 	ppm_domain_t *domp;
1427 	ppm_dev_t *ppmd;
1428 
1429 #ifdef DEBUG
1430 	char path[MAXPATHLEN], *ctlstr, *str = "xcppm_ctlops";
1431 	uint_t mask = ppm_debug & (D_CTLOPS1 | D_CTLOPS2);
1432 	if (mask && (ctlstr = ppm_get_ctlstr(reqp->request_type, mask))) {
1433 		prom_printf("%s: \"%s\", %s\n", str,
1434 		    ddi_pathname(rdip, path), ctlstr);
1435 	}
1436 #endif
1437 
1438 	if (ctlop != DDI_CTLOPS_POWER)
1439 		return (DDI_FAILURE);
1440 
1441 	switch (reqp->request_type) {
1442 	case PMR_PPM_UNMANAGE:
1443 	case PMR_PPM_PRE_PROBE:
1444 	case PMR_PPM_POST_PROBE:
1445 	case PMR_PPM_PRE_ATTACH:
1446 	case PMR_PPM_PRE_DETACH:
1447 		return (DDI_SUCCESS);
1448 
1449 	/*
1450 	 * There is no hardware configuration required to be done on this
1451 	 * platform prior to installing drivers.
1452 	 */
1453 	case PMR_PPM_INIT_CHILD:
1454 	case PMR_PPM_UNINIT_CHILD:
1455 		return (DDI_SUCCESS);
1456 
1457 	case PMR_PPM_ALL_LOWEST:
1458 		DPRINTF(D_LOWEST, ("%s: all devices at lowest power = %d\n",
1459 		    str, reqp->req.ppm_all_lowest_req.mode));
1460 		if (reqp->req.ppm_all_lowest_req.mode == PM_ALL_LOWEST) {
1461 			unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
1462 			mutex_enter(&unitp->unit_lock);
1463 			if (unitp->state & XCPPM_ST_SUSPENDED) {
1464 				mutex_exit(&unitp->unit_lock);
1465 				return (DDI_SUCCESS);
1466 			}
1467 
1468 			xcppm_set_led(PPM_LEDON);
1469 			unitp->led_tid = timeout(xcppm_blink_led,
1470 			    (void *)PPM_LEDON, PPM_LEDON_INTERVAL);
1471 			mutex_exit(&unitp->unit_lock);
1472 			DPRINTF(D_LOWEST, ("%s: LED blink started\n", str));
1473 		} else {
1474 			xcppm_freeze_led((void *)PPM_LEDON);
1475 			DPRINTF(D_LOWEST, ("%s: LED freeze ON\n", str));
1476 		}
1477 		return (DDI_SUCCESS);
1478 
1479 	case PMR_PPM_POST_ATTACH:
1480 		/*
1481 		 * After a successful attach, if we haven't already created
1482 		 * our private data structure for this device, ppm_get_dev()
1483 		 * will force it to be created.
1484 		 */
1485 		ppmd = PPM_GET_PRIVATE(rdip);
1486 		if (reqp->req.ppm_config_req.result != DDI_SUCCESS) {
1487 			if (ppmd)
1488 				ppm_rem_dev(rdip);
1489 		} else if (!ppmd) {
1490 			domp = ppm_lookup_dev(rdip);
1491 			ASSERT(domp);
1492 			(void) ppm_get_dev(rdip, domp);
1493 		}
1494 		return (DDI_SUCCESS);
1495 
1496 	case PMR_PPM_POST_DETACH:
1497 		xcppm_detach_ctlop(rdip, reqp);
1498 		*(int *)result = DDI_SUCCESS;
1499 		return (DDI_SUCCESS);
1500 
1501 	case PMR_PPM_PRE_RESUME:
1502 		xcppm_resume_ctlop(rdip, reqp);
1503 		return (DDI_SUCCESS);
1504 
1505 	case PMR_PPM_UNLOCK_POWER:
1506 	case PMR_PPM_TRY_LOCK_POWER:
1507 	case PMR_PPM_LOCK_POWER:
1508 		ppmd = PPM_GET_PRIVATE(rdip);
1509 		if (ppmd)
1510 			domp = ppmd->domp;
1511 		else if (reqp->request_type != PMR_PPM_UNLOCK_POWER) {
1512 			domp = ppm_lookup_dev(rdip);
1513 			ASSERT(domp);
1514 			ppmd = ppm_get_dev(rdip, domp);
1515 		}
1516 
1517 		ASSERT(domp->dflags == PPMD_LOCK_ALL ||
1518 		    domp->dflags == PPMD_LOCK_ONE);
1519 		DPRINTF(D_LOCKS, ("xcppm_lock_%s: \"%s\", %s\n",
1520 		    (domp->dflags == PPMD_LOCK_ALL) ? "all" : "one",
1521 		    ppmd->path, ppm_get_ctlstr(reqp->request_type, D_LOCKS)));
1522 
1523 		if (domp->dflags == PPMD_LOCK_ALL)
1524 			xcppm_lock_all(domp, reqp, result);
1525 		else
1526 			xcppm_lock_one(ppmd, reqp, result);
1527 		return (DDI_SUCCESS);
1528 
1529 	case PMR_PPM_POWER_LOCK_OWNER:
1530 		ASSERT(reqp->req.ppm_power_lock_owner_req.who == rdip);
1531 		ppmd = PPM_GET_PRIVATE(rdip);
1532 		if (ppmd)
1533 			domp = ppmd->domp;
1534 		else {
1535 			domp = ppm_lookup_dev(rdip);
1536 			ASSERT(domp);
1537 			ppmd = ppm_get_dev(rdip, domp);
1538 		}
1539 
1540 		/*
1541 		 * In case of LOCK_ALL, effective owner of the power lock
1542 		 * is the owner of the domain lock. otherwise, it is the owner
1543 		 * of the power lock.
1544 		 */
1545 		if (domp->dflags & PPMD_LOCK_ALL)
1546 			reqp->req.ppm_power_lock_owner_req.owner =
1547 			    mutex_owner(&domp->lock);
1548 		else {
1549 			reqp->req.ppm_power_lock_owner_req.owner =
1550 			    DEVI(rdip)->devi_busy_thread;
1551 		}
1552 		return (DDI_SUCCESS);
1553 
1554 	default:
1555 		ppmd = PPM_GET_PRIVATE(rdip);
1556 		if (ppmd == NULL) {
1557 			domp = ppm_lookup_dev(rdip);
1558 			ASSERT(domp);
1559 			ppmd = ppm_get_dev(rdip, domp);
1560 		}
1561 
1562 #ifdef DEBUG
1563 		if ((reqp->request_type == PMR_PPM_SET_POWER) &&
1564 		    (ppm_debug & D_SETPWR)) {
1565 			prom_printf("%s: \"%s\", PMR_PPM_SET_POWER\n",
1566 			    str, ppmd->path);
1567 		}
1568 #endif
1569 
1570 		if (ppmd->domp == &xcppm_cpu)
1571 			return (xcppm_manage_cpus(rdip, reqp, result));
1572 		else if (ppmd->domp == &xcppm_fet)
1573 			return (xcppm_manage_fet(rdip, reqp, result));
1574 		else if (ppmd->domp == &xcppm_upa)
1575 			return (xcppm_manage_pciupa(rdip, reqp, result));
1576 		else {
1577 			ASSERT(ppmd->domp == &xcppm_1394);
1578 			return (xcppm_manage_1394(rdip, reqp, result));
1579 		}
1580 	}
1581 }
1582 
1583 
1584 /*
1585  * Initialize our private version of real power level
1586  * as well as lowest and highest levels the device supports;
1587  * see ppmf and ppm_add_dev
1588  */
1589 static void
1590 xcppm_dev_init(ppm_dev_t *ppmd)
1591 {
1592 	struct pm_component *dcomps;
1593 	struct pm_comp *pm_comp;
1594 	dev_info_t *dip;
1595 	int maxi;
1596 
1597 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1598 	ppmd->level = PM_LEVEL_UNKNOWN;
1599 	ppmd->rplvl = PM_LEVEL_UNKNOWN;
1600 
1601 	dip = ppmd->dip;
1602 	/*
1603 	 * ppm exists to handle power-manageable devices which require
1604 	 * special handling on the current platform.  However, a
1605 	 * driver for such a device may choose not to support power
1606 	 * management on a particular load/attach.  In this case we
1607 	 * we create a structure to represent a single-component device
1608 	 * for which "level" = PM_LEVEL_UNKNOWN and "lowest" = 0
1609 	 * are effectively constant.
1610 	 */
1611 	if (PM_GET_PM_INFO(dip)) {
1612 		dcomps = DEVI(dip)->devi_pm_components;
1613 		pm_comp = &dcomps[ppmd->cmpt].pmc_comp;
1614 
1615 		ppmd->lowest = pm_comp->pmc_lvals[0];
1616 		ASSERT(ppmd->lowest >= 0);
1617 		maxi = pm_comp->pmc_numlevels - 1;
1618 		ppmd->highest = pm_comp->pmc_lvals[maxi];
1619 	}
1620 
1621 	/*
1622 	 * add any domain-specific initialization here
1623 	 */
1624 	if (ppmd->domp == &xcppm_fet) {
1625 		/*
1626 		 * when a new device is added to domain_powefet
1627 		 * it is counted here as being powered up.
1628 		 */
1629 		ppmd->domp->pwr_cnt++;
1630 		DPRINTF(D_FET, ("xcppm_dev_init: UP cnt = %d\n",
1631 		    ppmd->domp->pwr_cnt));
1632 	} else if (ppmd->domp == &xcppm_upa) {
1633 		/*
1634 		 * There may be a better way to determine the device type
1635 		 * instead of comparing to hard coded string names.
1636 		 */
1637 		if (strstr(ppmd->path, "pci@8,700000"))
1638 			ppmd->flags = XCPPMF_PCIB;
1639 		else if (strstr(ppmd->path, "upa@8,480000"))
1640 			ppmd->flags = XCPPMF_UPA;
1641 	}
1642 }
1643 
1644 
1645 /*
1646  * see ppmf and ppm_rem_dev
1647  */
1648 static void
1649 xcppm_dev_fini(ppm_dev_t *ppmd)
1650 {
1651 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1652 	if (ppmd->domp == &xcppm_fet) {
1653 		if (ppmd->level != ppmd->lowest) {
1654 			ppmd->domp->pwr_cnt--;
1655 			DPRINTF(D_FET, ("xcppm_dev_fini: DN cnt = %d\n",
1656 			    ppmd->domp->pwr_cnt));
1657 		};
1658 	}
1659 }
1660 
1661 
1662 /*
1663  * see ppmf and ppm_ioctl, PPMIOCSET
1664  */
1665 static void
1666 xcppm_iocset(uint8_t value)
1667 {
1668 	int action;
1669 
1670 	if (value == PPM_IDEV_POWER_ON)
1671 		action = XCPPM_SETBIT;
1672 	else if (value == PPM_IDEV_POWER_OFF)
1673 		action = XCPPM_CLRBIT;
1674 	(void) xcppm_gpio_port2(action, DRVON);
1675 }
1676 
1677 
1678 /*
1679  * see ppmf and ppm_ioctl, PPMIOCGET
1680  */
1681 static uint8_t
1682 xcppm_iocget(void)
1683 {
1684 	uint8_t bit;
1685 
1686 	bit = xcppm_gpio_port2(XCPPM_GETBIT, DRVON);
1687 	return ((bit == DRVON) ? PPM_IDEV_POWER_ON : PPM_IDEV_POWER_OFF);
1688 }
1689