xref: /titanic_41/usr/src/uts/sun4u/excalibur/io/xcalppm.c (revision 2b24ab6b3865caeede9eeb9db6b83e1d89dcd1ea)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Platform Power Management driver for SUNW,Sun-Blade-1000
28  */
29 #include <sys/modctl.h>
30 #include <sys/conf.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/ppmvar.h>
35 #include <sys/ppmio.h>
36 #include <sys/xcalppm_reg.h>
37 #include <sys/xcalppm_var.h>
38 #include <sys/stat.h>
39 #include <sys/epm.h>
40 #include <sys/archsystm.h>
41 #include <sys/cpuvar.h>
42 #include <sys/cheetahregs.h>
43 #include <sys/us3_module.h>
44 
45 /*
46  * Locking Considerations
47  *
48  * To look at and/or modify xcppm_domain fields or elements of its list of
49  * xcppm_dev structures the domain_lock for the affected domain must be held.
50  *
51  * When the autopm framework needs to change the power of a component of a
52  * device, it needs to hold the associated power lock (see discussion at
53  * top of uts/common/os/sunpm.c).
54  *
55  * If the framework needs to lock a dev/cmpt for a device which this ppm
56  * has claimed, xcppm_ctlops will be called with PMR_PPM_LOCK_POWER.  Ppm
57  * needs to be involved because, due to platform constraints, changing the
58  * power of one device may require that other devices be changed in the same
59  * operation.
60  *
61  * In some domains (e.g., cpus) the power lock must be acquired for all the
62  * affected devices to avoid possible corruption of the power states.  The
63  * joint change must be an atomic operation.  Ppm handles this by acquiring
64  * the domain lock, then walking the list of affected devices and acquiring
65  * the power lock for each of them.  To unlock, the list is traversed and
66  * each of the power locks is freed, followed by freeing the domain lock.
67  *
68  * For other domains ppm will only be changing the power of a single device
69  * that is known to the framework.  In these cases, the locking is done by
70  * acquiring the domain lock and directly calling the framework routine for
71  * getting a single power lock.
72  */
73 
74 static int	xcppm_attach(dev_info_t *, ddi_attach_cmd_t);
75 static int	xcppm_detach(dev_info_t *, ddi_detach_cmd_t);
76 static int	xcppm_ctlops(dev_info_t *, dev_info_t *,
77 		    ddi_ctl_enum_t, void *, void *);
78 static void	xcppm_dev_init(ppm_dev_t *);
79 static void	xcppm_dev_fini(ppm_dev_t *);
80 static void	xcppm_iocset(uint8_t);
81 static uint8_t	xcppm_iocget(void);
82 
83 /*
84  * Note: 1394 and pciupa were originally required to be LOCK_ALL domains.
85  * However, the underlying nexus drivers aren't able to do power mgmt
86  * (because of hw implementation issues).  The locking protocol for these
87  * domains is changed to LOCK_ONE to simplify other code.  The domain
88  * code itself will be removed in the future.
89  */
90 static ppm_domain_t xcppm_1394 = { "domain_1394",	PPMD_LOCK_ONE };
91 static ppm_domain_t xcppm_cpu  = { "domain_cpu",	PPMD_LOCK_ALL };
92 static ppm_domain_t xcppm_fet  = { "domain_powerfet",	PPMD_LOCK_ONE };
93 static ppm_domain_t xcppm_upa  = { "domain_pciupa",	PPMD_LOCK_ONE };
94 
95 ppm_domain_t *ppm_domains[] = {
96 	&xcppm_1394,
97 	&xcppm_cpu,
98 	&xcppm_fet,
99 	&xcppm_upa,
100 	NULL
101 };
102 
103 
104 struct ppm_funcs ppmf = {
105 	xcppm_dev_init,			/* dev_init */
106 	xcppm_dev_fini,			/* dev_fini */
107 	xcppm_iocset,			/* iocset */
108 	xcppm_iocget,			/* iocget */
109 };
110 
111 
112 /*
113  * The order of entries must be from slowest to fastest and in
114  * one-to-one correspondence with the cpu_level array.
115  */
116 static const uint16_t bbc_estar_control_masks[] = {
117 	BBC_ESTAR_SLOW, BBC_ESTAR_MEDIUM, BBC_ESTAR_FAST
118 };
119 
120 int bbc_delay = 10;			/* microsec */
121 
122 
123 /*
124  * Configuration data structures
125  */
126 static struct cb_ops xcppm_cb_ops = {
127 	ppm_open,		/* open */
128 	ppm_close,		/* close */
129 	nodev,			/* strategy */
130 	nodev,			/* print */
131 	nodev,			/* dump */
132 	nodev,			/* read */
133 	nodev,			/* write */
134 	ppm_ioctl,		/* ioctl */
135 	nodev,			/* devmap */
136 	nodev,			/* mmap */
137 	nodev,			/* segmap */
138 	nochpoll,		/* poll */
139 	ddi_prop_op,		/* prop_op */
140 	NULL,			/* streamtab */
141 	D_MP | D_NEW,		/* driver compatibility flag */
142 	CB_REV,			/* cb_ops revision */
143 	nodev,			/* async read */
144 	nodev			/* async write */
145 };
146 
147 static struct bus_ops xcppm_bus_ops = {
148 	BUSO_REV,
149 	0,
150 	0,
151 	0,
152 	0,
153 	0,
154 	ddi_no_dma_map,
155 	ddi_no_dma_allochdl,
156 	ddi_no_dma_freehdl,
157 	ddi_no_dma_bindhdl,
158 	ddi_no_dma_unbindhdl,
159 	ddi_no_dma_flush,
160 	ddi_no_dma_win,
161 	ddi_no_dma_mctl,
162 	xcppm_ctlops,
163 	0,
164 	0,			/* (*bus_get_eventcookie)();	*/
165 	0,			/* (*bus_add_eventcall)();	*/
166 	0,			/* (*bus_remove_eventcall)();	*/
167 	0			/* (*bus_post_event)();		*/
168 };
169 
170 static struct dev_ops xcppm_ops = {
171 	DEVO_REV,		/* devo_rev */
172 	0,			/* refcnt */
173 	ppm_getinfo,		/* info */
174 	nulldev,		/* identify */
175 	nulldev,		/* probe */
176 	xcppm_attach,		/* attach */
177 	xcppm_detach,		/* detach */
178 	nodev,			/* reset */
179 	&xcppm_cb_ops,		/* driver operations */
180 	&xcppm_bus_ops,		/* bus operations */
181 	NULL,			/* power */
182 	ddi_quiesce_not_supported,	/* devo_quiesce */
183 };
184 
185 extern struct mod_ops mod_driverops;
186 
187 static struct modldrv modldrv = {
188 	&mod_driverops,		/* type of module - pseudo */
189 	"platform pm driver",
190 	&xcppm_ops
191 };
192 
193 static struct modlinkage modlinkage = {
194 	MODREV_1,
195 	&modldrv,
196 	NULL
197 };
198 
199 
200 int
201 _init(void)
202 {
203 	return (ppm_init(&modlinkage, sizeof (xcppm_unit_t), "xc"));
204 }
205 
206 
207 int
208 _fini(void)
209 {
210 	return (EBUSY);
211 }
212 
213 
214 int
215 _info(struct modinfo *modinfop)
216 {
217 	return (mod_info(&modlinkage, modinfop));
218 }
219 
220 
221 static int
222 xcppm_map_all_regs(dev_info_t *dip)
223 {
224 	ddi_device_acc_attr_t attr_be, attr_le;
225 	int rv0, rv1, rv2, rv3;
226 	xcppm_unit_t *unitp;
227 	caddr_t base_addr;
228 	uint8_t data8;
229 
230 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
231 	attr_be.devacc_attr_version = DDI_DEVICE_ATTR_V0;
232 	attr_be.devacc_attr_endian_flags  = DDI_STRUCTURE_BE_ACC;
233 	attr_be.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
234 
235 	attr_le.devacc_attr_version = DDI_DEVICE_ATTR_V0;
236 	attr_le.devacc_attr_endian_flags  = DDI_STRUCTURE_LE_ACC;
237 	attr_le.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
238 
239 	rv0 = ddi_regs_map_setup(dip, 0, &base_addr, 0, 0, &attr_be,
240 	    &unitp->hndls.bbc_estar_ctrl);
241 
242 	unitp->regs.bbc_estar_ctrl = (uint16_t *)(base_addr +
243 	    BBC_ESTAR_CTRL_OFFSET);
244 	unitp->regs.bbc_assert_change = (uint32_t *)(base_addr +
245 	    BBC_ASSERT_CHANGE_OFFSET);
246 	unitp->regs.bbc_pll_settle = (uint32_t *)(base_addr +
247 	    BBC_PLL_SETTLE_OFFSET);
248 
249 	rv1 = ddi_regs_map_setup(dip, 1,
250 	    (caddr_t *)&unitp->regs.rio_mode_auxio,
251 	    0, 0, &attr_le, &unitp->hndls.rio_mode_auxio);
252 
253 	rv2 = ddi_regs_map_setup(dip, 2, &base_addr,
254 	    0, 0, &attr_le, &unitp->hndls.gpio_bank_select);
255 
256 	unitp->regs.gpio_bank_sel_index = (uint8_t *)(base_addr +
257 	    GPIO_BANK_SEL_INDEX_OFFSET);
258 	unitp->regs.gpio_bank_sel_data = (uint8_t *)(base_addr +
259 	    GPIO_BANK_SEL_DATA_OFFSET);
260 
261 	rv3 = ddi_regs_map_setup(dip, 3, &base_addr, 0, 0, &attr_le,
262 	    &unitp->hndls.gpio_data_ports);
263 
264 	unitp->regs.gpio_port1_data = (uint8_t *)(base_addr +
265 	    GPIO_PORT1_DATA_OFFSET);
266 	unitp->regs.gpio_port2_data = (uint8_t *)(base_addr +
267 	    GPIO_PORT2_DATA_OFFSET);
268 
269 	if (rv0 != DDI_SUCCESS || rv1 != DDI_SUCCESS ||
270 	    rv2 != DDI_SUCCESS || rv3 != DDI_SUCCESS) {
271 		if (rv0 == DDI_SUCCESS)
272 			ddi_regs_map_free(&unitp->hndls.bbc_estar_ctrl);
273 		if (rv1 == DDI_SUCCESS)
274 			ddi_regs_map_free(&unitp->hndls.rio_mode_auxio);
275 		if (rv2 == DDI_SUCCESS)
276 			ddi_regs_map_free(&unitp->hndls.gpio_bank_select);
277 		if (rv3 == DDI_SUCCESS)
278 			ddi_regs_map_free(&unitp->hndls.gpio_data_ports);
279 		return (DDI_FAILURE);
280 	}
281 
282 	/*
283 	 * Ppm uses GPIO bits in Bank 0.  Make sure Bank 0 is selected.
284 	 */
285 	data8 = SIO_CONFIG2_INDEX;
286 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
287 	    unitp->regs.gpio_bank_sel_index, data8);
288 	data8 = XCPPM_GET8(unitp->hndls.gpio_bank_select,
289 	    unitp->regs.gpio_bank_sel_data);
290 
291 	data8 &= 0x7f;	/* Set Bit7 to zero */
292 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
293 	    unitp->regs.gpio_bank_sel_data, data8);
294 
295 	return (DDI_SUCCESS);
296 }
297 
298 
299 static int
300 xcppm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
301 {
302 #ifdef DEBUG
303 	char *str = "xcppm_attach";
304 #endif
305 	xcppm_unit_t *unitp;
306 	ppm_domain_t **dompp;
307 	int retval;
308 
309 	DPRINTF(D_ATTACH, ("%s: attach cmd %d\n", str, cmd));
310 	retval = DDI_SUCCESS;
311 
312 	switch (cmd) {
313 	case DDI_ATTACH:
314 		if (ppm_inst != -1) {
315 			DPRINTF(D_ERROR,
316 			    ("%s: instance already attached\n", str));
317 			return (DDI_FAILURE);
318 		}
319 		ppm_inst = ddi_get_instance(dip);
320 
321 		/*
322 		 * Allocate and initialize soft state structure
323 		 */
324 		if (ddi_soft_state_zalloc(ppm_statep, ppm_inst) != 0)
325 			return (DDI_FAILURE);
326 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
327 		mutex_init(&unitp->unit_lock, NULL, MUTEX_DRIVER, NULL);
328 		mutex_init(&unitp->creator_lock, NULL, MUTEX_DRIVER, NULL);
329 
330 		if (ddi_create_minor_node(dip, "ppm", S_IFCHR,
331 		    ppm_inst, "ddi_ppm", 0) == DDI_FAILURE) {
332 			ddi_soft_state_free(ppm_statep, ppm_inst);
333 			DPRINTF(D_ERROR,
334 			    ("%s: Can't create minor for 0x%p\n", str, dip));
335 			return (DDI_FAILURE);
336 		}
337 		ddi_report_dev(dip);
338 		unitp->dip = dip;
339 
340 		if (retval = ppm_create_db(dip))
341 			return (retval);
342 
343 		/*
344 		 * Map all of the registers under the ppm node.
345 		 */
346 		if (xcppm_map_all_regs(dip) != DDI_SUCCESS)
347 			return (DDI_FAILURE);
348 
349 		if ((retval =
350 		    pm_register_ppm(ppm_claim_dev, dip)) != DDI_SUCCESS) {
351 			DPRINTF(D_ERROR,
352 			    ("%s: can't register ppm handler\n", str));
353 			return (retval);
354 		}
355 
356 		for (dompp = ppm_domains; *dompp; dompp++)
357 			mutex_init(&(*dompp)->lock, NULL, MUTEX_DRIVER, NULL);
358 
359 		break;
360 
361 	case DDI_RESUME:
362 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
363 		mutex_enter(&unitp->unit_lock);
364 		unitp->state &= ~XCPPM_ST_SUSPENDED;
365 		mutex_exit(&unitp->unit_lock);
366 		break;
367 
368 	default:
369 		cmn_err(CE_CONT, "xcppm_attach: unknown "
370 		    "attach command %d, dip 0x%p\n", cmd, dip);
371 		retval = DDI_FAILURE;
372 	}
373 
374 	return (retval);
375 }
376 
377 
378 /*
379  * set the front panel LED:
380  * PPM_LEDON turns it on, PPM_LEDOFF turns it off.
381  * for GPIO register: 0x0 means led-on, 0x2 means led-off.
382  */
383 static void
384 xcppm_set_led(int action)
385 {
386 	xcppm_unit_t *unitp;
387 	uint8_t	reg;
388 
389 	ASSERT(action == PPM_LEDON || action == PPM_LEDOFF);
390 	DPRINTF(D_LED, ("xcppm_set_led: Turn LED %s\n",
391 	    (action == PPM_LEDON) ? "on" : "off"));
392 
393 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
394 	reg = XCPPM_GET8(unitp->hndls.gpio_data_ports,
395 	    unitp->regs.gpio_port1_data);
396 	if (action == PPM_LEDON)
397 		reg &= ~LED;
398 	else
399 		reg |= LED;
400 	XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
401 	    unitp->regs.gpio_port1_data, reg);
402 }
403 
404 
405 static void
406 xcppm_blink_led(void *action)
407 {
408 	xcppm_unit_t *unitp;
409 	int new_action;
410 	clock_t intvl;
411 
412 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
413 	mutex_enter(&unitp->unit_lock);
414 	if (unitp->led_tid == 0) {
415 		mutex_exit(&unitp->unit_lock);
416 		return;
417 	}
418 
419 	if ((int)(uintptr_t)action == PPM_LEDON) {
420 		new_action = PPM_LEDOFF;
421 		intvl = PPM_LEDOFF_INTERVAL;
422 	} else {
423 		ASSERT((int)(uintptr_t)action == PPM_LEDOFF);
424 		new_action = PPM_LEDON;
425 		intvl = PPM_LEDON_INTERVAL;
426 	}
427 
428 	xcppm_set_led(new_action);
429 	unitp->led_tid = timeout(xcppm_blink_led, (void *)(uintptr_t)new_action,
430 	    intvl);
431 	mutex_exit(&unitp->unit_lock);
432 }
433 
434 
435 static void
436 xcppm_freeze_led(void *action)
437 {
438 	xcppm_unit_t *unitp;
439 	timeout_id_t tid;
440 
441 	DPRINTF(D_LOWEST, ("xcppm_freeze_led: action %d\n",
442 	    (int)(uintptr_t)action));
443 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
444 	mutex_enter(&unitp->unit_lock);
445 	tid = unitp->led_tid;
446 	unitp->led_tid = 0;
447 	mutex_exit(&unitp->unit_lock);
448 	untimeout(tid);
449 	mutex_enter(&unitp->unit_lock);
450 	xcppm_set_led((int)(uintptr_t)action);
451 	mutex_exit(&unitp->unit_lock);
452 }
453 
454 
455 /* ARGSUSED */
456 static int
457 xcppm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
458 {
459 	xcppm_unit_t *unitp;
460 
461 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
462 	DPRINTF(D_DETACH, ("xcppm_detach: cmd %d\n", cmd));
463 
464 	switch (cmd) {
465 	case DDI_DETACH:
466 		return (DDI_FAILURE);
467 
468 	case DDI_SUSPEND:
469 		mutex_enter(&unitp->unit_lock);
470 		unitp->state |= XCPPM_ST_SUSPENDED;
471 		mutex_exit(&unitp->unit_lock);
472 
473 		/*
474 		 * Suspend requires that timeout callouts to be canceled.
475 		 * Turning off the LED blinking will cancel the timeout.
476 		 */
477 		xcppm_freeze_led((void *)PPM_LEDON);
478 		return (DDI_SUCCESS);
479 
480 	default:
481 		return (DDI_FAILURE);
482 	}
483 }
484 
485 
486 /*
487  * Device we claimed has detached.  We must get rid of
488  * our state which was used to track this device.
489  */
490 static void
491 xcppm_detach_ctlop(dev_info_t *dip, power_req_t *reqp)
492 {
493 	ppm_dev_t *ppmd;
494 
495 	ppmd = PPM_GET_PRIVATE(dip);
496 	if (ppmd == NULL || reqp->req.ppm_config_req.result != DDI_SUCCESS)
497 		return;
498 
499 	ppm_rem_dev(dip);
500 }
501 
502 
503 /*
504  * The system is being resumed from a cpr suspend operation and this
505  * device's attach entry will be called shortly.  The driver will set
506  * the device's power to a conventional starting value, and we need to
507  * stay in sync and set our private copy to the same value.
508  */
509 /* ARGSUSED */
510 static void
511 xcppm_resume_ctlop(dev_info_t *dip, power_req_t *reqp)
512 {
513 	ppm_domain_t *domp;
514 	ppm_dev_t *ppmd;
515 	int powered;
516 
517 	ppmd = PPM_GET_PRIVATE(dip);
518 	if (ppmd == NULL)
519 		return;
520 
521 	/*
522 	 * Maintain correct powered count for domain which cares
523 	 */
524 	powered = 0;
525 	domp = ppmd->domp;
526 	mutex_enter(&domp->lock);
527 	if (domp == &xcppm_fet) {
528 		for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
529 			if (ppmd->dip == dip && ppmd->level)
530 				powered++;
531 		}
532 
533 		/*
534 		 * If this device was powered off when the system was
535 		 * suspended, this resume acts like a power-on transition,
536 		 * so we adjust the count.
537 		 */
538 		if (powered == 0)
539 			domp->pwr_cnt++;
540 	}
541 
542 	for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
543 		if (ppmd->dip == dip)
544 			ppmd->level = ppmd->rplvl = PM_LEVEL_UNKNOWN;
545 	}
546 	mutex_exit(&domp->lock);
547 }
548 
549 
550 /*
551  * Change the power level for a component of a device.  If the change
552  * arg is true, we call the framework to actually change the device's
553  * power; otherwise, we just update our own copy of the power level.
554  */
555 static int
556 xcppm_set_level(ppm_dev_t *ppmd, int cmpt, int level, boolean_t change)
557 {
558 #ifdef DEBUG
559 	char *str = "xcppm_set_level";
560 #endif
561 	int ret;
562 
563 	ret = DDI_SUCCESS;
564 	if (change)
565 		ret = pm_power(ppmd->dip, cmpt, level);
566 
567 	DPRINTF(D_SETLVL, ("%s: \"%s\" change=%d, old %d, new %d, ret %d\n",
568 	    str, ppmd->path, change, ppmd->level, level, ret));
569 
570 	if (ret == DDI_SUCCESS) {
571 		ppmd->level = level;
572 		ppmd->rplvl = PM_LEVEL_UNKNOWN;
573 	}
574 
575 	return (ret);
576 }
577 
578 
579 static int
580 xcppm_change_power_level(ppm_dev_t *ppmd, int cmpt, int level)
581 {
582 	return (xcppm_set_level(ppmd, cmpt, level, B_TRUE));
583 }
584 
585 
586 static int
587 xcppm_record_level_change(ppm_dev_t *ppmd, int cmpt, int level)
588 {
589 	return (xcppm_set_level(ppmd, cmpt, level, B_FALSE));
590 }
591 
592 
593 static uint8_t
594 xcppm_gpio_port2(int action, uint8_t pos)
595 {
596 #ifdef DEBUG
597 	char *str = "xcppm_gpio_port2";
598 #endif
599 	xcppm_unit_t *unitp;
600 	uint8_t data8, buf8;
601 	uint8_t	ret;
602 
603 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
604 	mutex_enter(&unitp->gpio_lock);
605 
606 	data8 = buf8 = XCPPM_GET8(unitp->hndls.gpio_data_ports,
607 	    unitp->regs.gpio_port2_data);
608 
609 	switch (action) {
610 	case XCPPM_GETBIT:
611 		ret = data8 & pos;
612 		DPRINTF(D_GPIO, ("%s: READ: GPIO Bank2 value 0x%x\n",
613 		    str, buf8));
614 		break;
615 
616 	case XCPPM_SETBIT:
617 	case XCPPM_CLRBIT:
618 		if (action == XCPPM_SETBIT)
619 			data8 |= pos;
620 		else
621 			data8 &= ~pos;
622 		XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
623 		    unitp->regs.gpio_port2_data, data8);
624 		ret = data8 & pos;
625 		DPRINTF(D_GPIO, ("%s: %s: GPIO Bank2 "
626 		    "bit 0x%x changed from 0x%x to 0x%x\n",
627 		    str, (action == XCPPM_SETBIT) ? "UP" : "DOWN",
628 		    pos, buf8, data8));
629 		break;
630 
631 	default:
632 		cmn_err(CE_PANIC, "xcalppm: unrecognized register "
633 		    "IO command %d\n", action);
634 		break;
635 	}
636 	mutex_exit(&unitp->gpio_lock);
637 
638 	return (ret);
639 }
640 
641 
642 /*
643  * Raise the power level of a subrange of cpus.  Used when cpu driver
644  * failed an attempt to lower the power of a cpu (probably because
645  * it got busy).  Need to revert the ones we already changed.
646  *
647  * ecpup = the ppm_dev_t for the cpu which failed to lower power
648  * level = power level to reset prior cpus to
649  */
650 static void
651 xcppm_revert_cpu_power(ppm_dev_t *ecpup, int level)
652 {
653 	ppm_dev_t *cpup;
654 
655 	for (cpup = xcppm_cpu.devlist; cpup != ecpup; cpup = cpup->next) {
656 		DPRINTF(D_CPU, ("xrcp: \"%s\", revert to level %d\n",
657 		    cpup->path, level));
658 		(void) xcppm_change_power_level(cpup, 0, level);
659 	}
660 }
661 
662 /*
663  * Switch the DC/DC converter.  Clearing the GPIO bit in SuperI/O puts
664  * the converter in low power mode and setting the bit puts it back in
665  * normal mode.
666  */
667 static void
668 xcppm_switch_dcdc_converter(int action)
669 {
670 	int tries = XCPPM_VCL_TRIES;
671 	uint_t spl;
672 	uint64_t stick_begin, stick_end;
673 	uint64_t tick_begin, tick_end;
674 	uint64_t cur_speed_ratio, full_speed_ratio;
675 	static int xcppm_dcdc_lpm;
676 
677 	switch (action) {
678 	case XCPPM_SETBIT:
679 		if (xcppm_dcdc_lpm) {
680 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
681 			    "switch to normal power mode.\n"));
682 			(void) xcppm_gpio_port2(action, HIGHPWR);
683 			xcppm_dcdc_lpm = 0;
684 		}
685 		break;
686 	case XCPPM_CLRBIT:
687 		/*
688 		 * In some fast CPU configurations, DC/DC converter was
689 		 * put in low power mode before CPUs made the transition
690 		 * to 1/32 of clock speed.  In those cases, system was
691 		 * shut down by hardware for protection.  To resolve that
692 		 * problem, we make sure CPUs have made the clock transition
693 		 * before the DC/DC converter has been put to low power mode.
694 		 */
695 		ASSERT(xcppm_dcdc_lpm == 0);
696 		kpreempt_disable();
697 		full_speed_ratio = cpunodes[CPU->cpu_id].clock_freq /
698 		    sys_tick_freq;
699 		while (tries) {
700 			spl = ddi_enter_critical();
701 			tick_begin = gettick_counter();
702 			stick_timestamp((int64_t *)&stick_begin);
703 			ddi_exit_critical(spl);
704 			drv_usecwait(XCPPM_VCL_DELAY);
705 			spl = ddi_enter_critical();
706 			tick_end = gettick_counter();
707 			stick_timestamp((int64_t *)&stick_end);
708 			ddi_exit_critical(spl);
709 			cur_speed_ratio = (tick_end - tick_begin) /
710 			    (stick_end - stick_begin);
711 
712 			/*
713 			 * tick/stick at current speed should at most be
714 			 * equal to full-speed tick/stick, adjusted with
715 			 * full/lowest clock speed ratio.  If not, speed
716 			 * transition has not happened yet.
717 			 */
718 			if (cur_speed_ratio <= ((full_speed_ratio /
719 			    XCPPM_VCL_DIVISOR) + 1)) {
720 				DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
721 				    "switch to low power mode.\n"));
722 				(void) xcppm_gpio_port2(action, HIGHPWR);
723 				xcppm_dcdc_lpm = 1;
724 				break;
725 			}
726 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: CPU "
727 			    "has not made transition to lowest speed yet "
728 			    "(%d)\n", tries));
729 			tries--;
730 		}
731 		kpreempt_enable();
732 		break;
733 	}
734 }
735 
736 static void
737 xcppm_rio_mode(xcppm_unit_t *unitp, int mode)
738 {
739 	uint32_t data32, buf32;
740 
741 	mutex_enter(&unitp->gpio_lock);
742 	data32 = buf32 = XCPPM_GET32(unitp->hndls.rio_mode_auxio,
743 	    unitp->regs.rio_mode_auxio);
744 	if (mode == XCPPM_SETBIT)
745 		data32 |= RIO_BBC_ESTAR_MODE;
746 	else
747 		data32 &= ~RIO_BBC_ESTAR_MODE;
748 	XCPPM_SETGET32(unitp->hndls.rio_mode_auxio,
749 	    unitp->regs.rio_mode_auxio, data32);
750 	mutex_exit(&unitp->gpio_lock);
751 
752 	DPRINTF(D_CPU, ("xcppm_rio_mode: %s: change from 0x%x to 0x%x\n",
753 	    (mode == XCPPM_SETBIT) ? "DOWN" : "UP", buf32, data32));
754 }
755 
756 
757 /*
758  * change the power level of all cpus to the arg value;
759  * the caller needs to ensure that a legal transition is requested.
760  */
761 static int
762 xcppm_change_cpu_power(int newlevel)
763 {
764 #ifdef DEBUG
765 	char *str = "xcppm_ccp";
766 #endif
767 	int index, level, oldlevel;
768 	int lowest, highest;
769 	int undo_flag, ret;
770 	int speedup, incr;
771 	uint32_t data32;
772 	uint16_t data16;
773 	xcppm_unit_t *unitp;
774 	ppm_dev_t *cpup;
775 	dev_info_t *dip;
776 	char *chstr;
777 
778 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
779 	ASSERT(unitp);
780 	cpup = xcppm_cpu.devlist;
781 	lowest = cpup->lowest;
782 	highest = cpup->highest;
783 
784 	/*
785 	 * not all cpus may have transitioned to a known level by this time
786 	 */
787 	oldlevel = (cpup->level == PM_LEVEL_UNKNOWN) ? highest : cpup->level;
788 	dip = cpup->dip;
789 	ASSERT(dip);
790 
791 	DPRINTF(D_CPU, ("%s: old %d, new %d, highest %d, lowest %d\n",
792 	    str, oldlevel, newlevel, highest, lowest));
793 
794 	if (newlevel > oldlevel) {
795 		chstr = "UP";
796 		speedup = 1;
797 		incr = 1;
798 	} else if (newlevel < oldlevel) {
799 		chstr = "DOWN";
800 		speedup = 0;
801 		incr = -1;
802 	} else
803 		return (DDI_SUCCESS);
804 
805 	undo_flag = 0;
806 	if (speedup) {
807 		/*
808 		 * If coming up from lowest power level, set the E*
809 		 * mode bit in GPIO to make power supply efficient
810 		 * at normal power.
811 		 */
812 		if (oldlevel == cpup->lowest) {
813 			xcppm_switch_dcdc_converter(XCPPM_SETBIT);
814 			undo_flag = 1;
815 		}
816 	} else {
817 		/*
818 		 * set BBC Estar mode bit in RIO AUXIO register
819 		 */
820 		if (oldlevel == highest) {
821 			xcppm_rio_mode(unitp, XCPPM_SETBIT);
822 			undo_flag = 1;
823 		}
824 	}
825 
826 	/*
827 	 * this loop will execute 1x or 2x depending on
828 	 * number of times we need to change clock rates
829 	 */
830 	for (level = oldlevel+incr; level != newlevel+incr; level += incr) {
831 		for (cpup = xcppm_cpu.devlist; cpup; cpup = cpup->next) {
832 			if (cpup->level == level)
833 				continue;
834 			ret = xcppm_change_power_level(cpup, 0, level);
835 			DPRINTF(D_CPU, ("%s: \"%s\", %s to level %d, ret %d\n",
836 			    str, cpup->path, chstr, cpup->level, ret));
837 			if (ret == DDI_SUCCESS)
838 				continue;
839 
840 			/*
841 			 * if the driver was unable to lower cpu speed,
842 			 * the cpu probably got busy; set the previous
843 			 * cpus back to the original level
844 			 */
845 			if (speedup == 0)
846 				xcppm_revert_cpu_power(cpup, level + 1);
847 
848 			if (undo_flag) {
849 				if (speedup)
850 					xcppm_switch_dcdc_converter(
851 					    XCPPM_CLRBIT);
852 				else
853 					xcppm_rio_mode(unitp, XCPPM_CLRBIT);
854 			}
855 			return (ret);
856 		}
857 
858 		index = level - 1;
859 		spm_change_schizo_speed(index);
860 		DPRINTF(D_CPU, ("%s: safari config reg changed\n", str));
861 
862 		/*
863 		 * set the delay times for changing to this rate
864 		 */
865 		data32 = XCPPM_BBC_DELAY(index);
866 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
867 		    (caddr_t)unitp->regs.bbc_assert_change, data32);
868 		DPRINTF(D_CPU, ("%s: %s: Wrote E* Assert Change Time "
869 		    "(t1) = 0x%x\n", str, chstr, data32));
870 
871 		data32 = XCPPM_BBC_DELAY(index);
872 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
873 		    (caddr_t)unitp->regs.bbc_pll_settle, data32);
874 		DPRINTF(D_CPU, ("%s: %s: Wrote E* PLL Settle Time "
875 		    "(t4) = 0x%x\n", str, chstr, data32));
876 
877 		data16 = bbc_estar_control_masks[index];
878 		XCPPM_SETGET16(unitp->hndls.bbc_estar_ctrl,
879 		    (caddr_t)unitp->regs.bbc_estar_ctrl, data16);
880 		DPRINTF(D_CPU, ("%s: %s: Wrote BCC E* Control = 0x%x\n",
881 		    str, chstr, data16));
882 	}
883 
884 	/*
885 	 * clear CPU Estar Mode bit in the gpio register
886 	 */
887 	if (speedup) {
888 		if (newlevel == highest)
889 			xcppm_rio_mode(unitp, XCPPM_CLRBIT);
890 	} else {
891 		if (newlevel == lowest)
892 			xcppm_switch_dcdc_converter(XCPPM_CLRBIT);
893 	}
894 
895 	return (DDI_SUCCESS);
896 }
897 
898 
899 /*
900  * Process a request to change the power level of a cpu.  If all cpus
901  * don't want to be at the same power yet, or if we are currently
902  * refusing slowdown requests due to thermal stress, just cache the
903  * request.  Otherwise, make the change for all cpus.
904  */
905 /* ARGSUSED */
906 static int
907 xcppm_manage_cpus(dev_info_t *dip, power_req_t *reqp, int *result)
908 {
909 #ifdef DEBUG
910 	char *str = "xcppm_manage_cpus";
911 #endif
912 	int old, new, ret, kmflag;
913 	ppm_dev_t *ppmd;
914 	pm_ppm_devlist_t *devlist = NULL, *p;
915 	int		do_rescan = 0;
916 	dev_info_t	*rescan_dip;
917 
918 	*result = DDI_SUCCESS;
919 	switch (reqp->request_type) {
920 	case PMR_PPM_SET_POWER:
921 		break;
922 	case PMR_PPM_POWER_CHANGE_NOTIFY:
923 		/* cpu driver can`t change cpu power level by itself */
924 	default:
925 		return (DDI_FAILURE);
926 	}
927 
928 	ppmd = PPM_GET_PRIVATE(dip);
929 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
930 	old = reqp->req.ppm_set_power_req.old_level;
931 	new = reqp->req.ppm_set_power_req.new_level;
932 
933 	/*
934 	 * At power on, the cpus are at full speed.  There is no hardware
935 	 * transition needed for going from unknown to full.  However, the
936 	 * state of the pm framework and cpu driver needs to be adjusted.
937 	 */
938 	if (ppmd->level == PM_LEVEL_UNKNOWN && new == ppmd->highest) {
939 		*result = ret = xcppm_change_power_level(ppmd, 0, new);
940 		if (ret != DDI_SUCCESS) {
941 			DPRINTF(D_CPU, ("%s: Failed to change "
942 			    "power level to %d\n", str, new));
943 		}
944 		return (ret);
945 	}
946 
947 	if (new == ppmd->level) {
948 		DPRINTF(D_CPU, ("%s: already at power level %d\n", str, new));
949 		return (DDI_SUCCESS);
950 	}
951 
952 	ppmd->rplvl = new;
953 
954 	/*
955 	 * A request from lower to higher level transition is granted and
956 	 * made effective on both cpus. For more than two cpu platform model,
957 	 * the following code needs to be modified to remember the rest of
958 	 * the unsoliciting cpus to be rescan'ed.
959 	 * A request from higher to lower must be agreed by all cpus.
960 	 */
961 	for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
962 		if (ppmd->rplvl == new)
963 			continue;
964 
965 		if (new < old) {
966 			DPRINTF(D_SOME, ("%s: not all cpus want to go down to "
967 			    "level %d yet\n", str, new));
968 			return (DDI_SUCCESS);
969 		}
970 
971 		/*
972 		 * If a single cpu requests power up, honor the request
973 		 * by powering up both cpus.
974 		 */
975 		if (new > old) {
976 			DPRINTF(D_SOME, ("%s: powering up device(%s@%s, %p) "
977 			    "because of request from dip(%s@%s, %p), "
978 			    "need pm_rescan\n", str, PM_NAME(ppmd->dip),
979 			    PM_ADDR(ppmd->dip), (void *)ppmd->dip,
980 			    PM_NAME(dip), PM_ADDR(dip), (void *)dip))
981 			do_rescan++;
982 			rescan_dip = ppmd->dip;
983 			break;
984 		}
985 	}
986 
987 	ret = xcppm_change_cpu_power(new);
988 	*result = ret;
989 
990 	if (ret == DDI_SUCCESS) {
991 		if (reqp->req.ppm_set_power_req.canblock == PM_CANBLOCK_BLOCK)
992 			kmflag = KM_SLEEP;
993 		else
994 			kmflag = KM_NOSLEEP;
995 
996 		for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
997 			if (ppmd->dip == dip)
998 				continue;
999 
1000 			if ((p = kmem_zalloc(sizeof (pm_ppm_devlist_t),
1001 			    kmflag)) == NULL) {
1002 				break;
1003 			}
1004 			p->ppd_who = ppmd->dip;
1005 			p->ppd_cmpt = ppmd->cmpt;
1006 			p->ppd_old_level = old;
1007 			p->ppd_new_level = new;
1008 			p->ppd_next = devlist;
1009 
1010 			devlist = p;
1011 		}
1012 		reqp->req.ppm_set_power_req.cookie = (void *) devlist;
1013 
1014 		if (do_rescan > 0)
1015 			pm_rescan(rescan_dip);
1016 	}
1017 
1018 	return (ret);
1019 }
1020 
1021 
1022 /*
1023  * If powering off and all devices in this domain will now be off,
1024  * shut off common power.  If powering up and no devices up yet,
1025  * turn on common power.  Always make the requested power level
1026  * change for the target device.
1027  */
1028 static int
1029 xcppm_manage_fet(dev_info_t *dip, power_req_t *reqp, int *result)
1030 {
1031 #ifdef DEBUG
1032 	char *str = "xcppm_manage_fet";
1033 #endif
1034 	int (*pwr_func)(ppm_dev_t *, int, int);
1035 	int new, old, cmpt, incr = 0;
1036 	ppm_dev_t *ppmd;
1037 
1038 	ppmd = PPM_GET_PRIVATE(dip);
1039 	DPRINTF(D_FET, ("%s: \"%s\", req %s\n", str,
1040 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1041 
1042 	*result = DDI_SUCCESS;	/* change later for failures */
1043 	switch (reqp->request_type) {
1044 	case PMR_PPM_SET_POWER:
1045 		pwr_func = xcppm_change_power_level;
1046 		old = reqp->req.ppm_set_power_req.old_level;
1047 		new = reqp->req.ppm_set_power_req.new_level;
1048 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1049 		break;
1050 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1051 		pwr_func = xcppm_record_level_change;
1052 		old = reqp->req.ppm_notify_level_req.old_level;
1053 		new = reqp->req.ppm_notify_level_req.new_level;
1054 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1055 		break;
1056 	default:
1057 		return (*result = DDI_FAILURE);
1058 
1059 	}
1060 
1061 	/* This is common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1062 	DPRINTF(D_FET, ("%s: \"%s\", old %d, new %d\n",
1063 	    str, ppmd->path, old, new));
1064 
1065 	ASSERT(old == ppmd->level);
1066 	if (new == ppmd->level)
1067 		return (DDI_SUCCESS);
1068 
1069 	PPM_LOCK_DOMAIN(ppmd->domp);
1070 	/*
1071 	 * Devices in this domain are known to have 0 (off) as their
1072 	 * lowest power level.  We use this fact to simplify the logic.
1073 	 */
1074 	if (new > 0) {
1075 		if (ppmd->domp->pwr_cnt == 0)
1076 			(void) xcppm_gpio_port2(XCPPM_SETBIT, DRVON);
1077 		if (old == 0) {
1078 			ppmd->domp->pwr_cnt++;
1079 			incr = 1;
1080 			DPRINTF(D_FET, ("%s: UP cnt = %d\n",
1081 			    str, ppmd->domp->pwr_cnt));
1082 		}
1083 	}
1084 
1085 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1086 
1087 	ASSERT(ppmd->domp->pwr_cnt > 0);
1088 
1089 	if ((*result = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1090 		DPRINTF(D_FET, ("%s: \"%s\" power change failed \n",
1091 		    str, ppmd->path));
1092 	}
1093 
1094 	PPM_LOCK_DOMAIN(ppmd->domp);
1095 
1096 	/*
1097 	 * Decr the power count in two cases:
1098 	 *
1099 	 *   1) request was to power device down and was successful
1100 	 *   2) request was to power up (we pre-incremented count), but failed.
1101 	 */
1102 	if ((*result == DDI_SUCCESS && ppmd->level == 0) ||
1103 	    (*result != DDI_SUCCESS && incr)) {
1104 		ASSERT(ppmd->domp->pwr_cnt > 0);
1105 		ppmd->domp->pwr_cnt--;
1106 		DPRINTF(D_FET, ("%s: DN cnt = %d\n", str, ppmd->domp->pwr_cnt));
1107 		if (ppmd->domp->pwr_cnt == 0)
1108 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, DRVON);
1109 	}
1110 
1111 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1112 	ASSERT(ppmd->domp->pwr_cnt >= 0);
1113 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1114 }
1115 
1116 
1117 /*
1118  * Since UPA64S relies on PCI B staying at nominal 33MHz in order to
1119  * have its interrupt pulse function properly, we ensure
1120  * - Lowering PCI B only if UPA64S is at low power, otherwise defer
1121  *   the action until UPA64S goes down; hence right after UPA64S goes
1122  *   down, perform the deferred action for PCI B;
1123  * - Always raise PCI B power prior to raising UPA64S power.
1124  *
1125  * Both UPA64S and PCI B devices are considered each other's dependency
1126  * device whenever actual power transition is handled (PMR_PPM_SET_POWER).
1127  */
1128 static int
1129 xcppm_manage_pciupa(dev_info_t *dip, power_req_t *reqp, int *result)
1130 {
1131 #ifdef DEBUG
1132 	char *str = "xcppm_manage_pciupa";
1133 #endif
1134 	int (*pwr_func)(ppm_dev_t *, int, int);
1135 	uint_t flags = 0, co_flags = 0;
1136 	ppm_dev_t *ppmd, *codev;
1137 	int new, cmpt, retval;
1138 
1139 	ppmd = PPM_GET_PRIVATE(dip);
1140 	DPRINTF(D_PCIUPA, ("%s: \"%s\", req %s\n", str,
1141 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1142 
1143 	*result = DDI_SUCCESS;
1144 
1145 	switch (reqp->request_type) {
1146 	case PMR_PPM_SET_POWER:
1147 		pwr_func = xcppm_change_power_level;
1148 		new = reqp->req.ppm_set_power_req.new_level;
1149 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1150 		break;
1151 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1152 		pwr_func = xcppm_record_level_change;
1153 		new = reqp->req.ppm_notify_level_req.new_level;
1154 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1155 		break;
1156 	default:
1157 		*result = DDI_FAILURE;
1158 		return (DDI_FAILURE);
1159 	}
1160 
1161 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1162 	ASSERT(ppmd);	/* since it should be locked already */
1163 
1164 	if (new == ppmd->level)
1165 		return (DDI_SUCCESS);
1166 
1167 	DPRINTF(D_PCIUPA, ("%s: \"%s\", levels: current %d, new %d\n",
1168 	    str, ppmd->path, ppmd->level, new));
1169 
1170 	/*
1171 	 * find power-wise co-related device
1172 	 */
1173 	flags =  ppmd->flags;
1174 
1175 #ifdef DEBUG
1176 	if (flags & ~(XCPPMF_PCIB|XCPPMF_UPA))
1177 		DPRINTF(D_ERROR, ("%s: invalid ppmd->flags value 0x%x\n", str,
1178 		    ppmd->flags));
1179 #endif
1180 
1181 	if (flags == XCPPMF_UPA)
1182 		co_flags = XCPPMF_PCIB;
1183 	else if (flags == XCPPMF_PCIB)
1184 		co_flags = XCPPMF_UPA;
1185 
1186 	for (codev = ppmd->domp->devlist; codev; codev = codev->next)
1187 		if ((codev->cmpt == 0) && (codev->flags == co_flags))
1188 			break;
1189 
1190 	if (new > ppmd->level) {
1191 		/*
1192 		 * Raise power level -
1193 		 * pre-raising: upa ensure pci is powered up.
1194 		 */
1195 		if ((flags == XCPPMF_UPA) && codev &&
1196 		    (codev->level != codev->highest)) {
1197 			if ((retval = xcppm_change_power_level(codev,
1198 			    0, codev->highest)) != DDI_SUCCESS &&
1199 			    codev->level != codev->highest) {
1200 				*result = retval;
1201 				return (DDI_FAILURE);
1202 			}
1203 		}
1204 		if ((retval = (*pwr_func)(ppmd, 0, new)) != DDI_SUCCESS) {
1205 			*result = retval;
1206 			return (DDI_FAILURE);
1207 		}
1208 	} else if (new < ppmd->level) {
1209 		/*
1210 		 * Lower power level
1211 		 *
1212 		 * once upa is attached, pci checks upa level:
1213 		 * if upa is at high level, defer the request and return.
1214 		 * otherwise, set power level then check and lower pci level.
1215 		 */
1216 		if ((flags == XCPPMF_PCIB) && codev &&
1217 		    (codev->level != codev->lowest)) {
1218 			ppmd->rplvl = new;
1219 			return (DDI_SUCCESS);
1220 		}
1221 		if ((retval = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1222 		    ppmd->level != new) {
1223 			*result = retval;
1224 			return (DDI_FAILURE);
1225 		}
1226 
1227 		if (flags == XCPPMF_UPA) {
1228 			if (codev && (codev->rplvl != PM_LEVEL_UNKNOWN) &&
1229 			    (codev->rplvl < codev->level)) {
1230 				DPRINTF(D_PCIUPA, ("%s: codev \"%s\" "
1231 				    "rplvl %d level %d\n", str, codev->path,
1232 				    codev->rplvl, codev->level));
1233 				if ((retval = xcppm_change_power_level(
1234 				    codev, 0, codev->rplvl)) != DDI_SUCCESS) {
1235 					*result = retval;
1236 					return (DDI_FAILURE);
1237 				}
1238 			}
1239 		}
1240 	}
1241 
1242 	return (DDI_SUCCESS);
1243 }
1244 
1245 
1246 /*
1247  * When all of the children of the 1394 nexus are idle, a call will be
1248  * made to the nexus driver's own power entry point to lower power.  Ppm
1249  * intercepts this and kills 1394 cable power (since the driver doesn't
1250  * have access to the required register).  Similar logic applies when
1251  * coming up from the state where all the children were off.
1252  */
1253 static int
1254 xcppm_manage_1394(dev_info_t *dip, power_req_t *reqp, int *result)
1255 {
1256 #ifdef DEBUG
1257 	char *str = "xcppm_manage_1394";
1258 #endif
1259 	int (*pwr_func)(ppm_dev_t *, int, int);
1260 	int new, old, cmpt;
1261 	ppm_dev_t *ppmd;
1262 
1263 	ppmd = PPM_GET_PRIVATE(dip);
1264 	DPRINTF(D_1394, ("%s: \"%s\", req %s\n", str,
1265 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1266 
1267 	switch (reqp->request_type) {
1268 	case PMR_PPM_SET_POWER:
1269 		pwr_func = xcppm_change_power_level;
1270 		old = reqp->req.ppm_set_power_req.old_level;
1271 		new = reqp->req.ppm_set_power_req.new_level;
1272 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1273 		break;
1274 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1275 		pwr_func = xcppm_record_level_change;
1276 		old = reqp->req.ppm_notify_level_req.old_level;
1277 		new = reqp->req.ppm_notify_level_req.new_level;
1278 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1279 		break;
1280 	default:
1281 		return (*result = DDI_FAILURE);
1282 	}
1283 
1284 
1285 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1286 	DPRINTF(D_1394, ("%s: dev %s@%s, old %d new %d\n", str,
1287 	    ddi_binding_name(dip), ddi_get_name_addr(dip), old, new));
1288 
1289 	ASSERT(ppmd);	/* since it must already be locked */
1290 	ASSERT(old == ppmd->level);
1291 
1292 	if (new == ppmd->level)
1293 		return (*result = DDI_SUCCESS);
1294 
1295 	/* the reduce power case */
1296 	if (cmpt == 0 && new < ppmd->level) {
1297 		if ((*result =
1298 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1299 			return (DDI_FAILURE);
1300 		}
1301 		if (new == ppmd->lowest)
1302 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1303 		ppmd->level = new;
1304 		return (DDI_SUCCESS);
1305 	}
1306 
1307 	/* the increase power case */
1308 	if (cmpt == 0 && new > ppmd->level) {
1309 		if (ppmd->level == ppmd->lowest) {
1310 			(void) xcppm_gpio_port2(XCPPM_SETBIT, CPEN);
1311 			delay(1);
1312 		}
1313 		/*
1314 		 * Even if pwr_func fails we need to check current level again
1315 		 * because it could have been changed by an intervening
1316 		 * POWER_CHANGE_NOTIFY operation.
1317 		 */
1318 		if ((*result =
1319 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1320 		    ppmd->level == ppmd->lowest) {
1321 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1322 		} else {
1323 			ppmd->level = new;
1324 		}
1325 
1326 		return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1327 	}
1328 
1329 	/*
1330 	 * We get here if component was non-zero.  This is not what we
1331 	 * expect.  Let the device deal with it and just pass back the
1332 	 * result.
1333 	 */
1334 	*result = xcppm_change_power_level(ppmd, cmpt, new);
1335 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1336 }
1337 
1338 
1339 /*
1340  * lock, unlock, or trylock for one power mutex
1341  */
1342 static void
1343 xcppm_lock_one(ppm_dev_t *ppmd, power_req_t *reqp, int *iresp)
1344 {
1345 	switch (reqp->request_type) {
1346 	case PMR_PPM_LOCK_POWER:
1347 		pm_lock_power_single(ppmd->dip,
1348 		    reqp->req.ppm_lock_power_req.circp);
1349 		break;
1350 
1351 	case PMR_PPM_UNLOCK_POWER:
1352 		pm_unlock_power_single(ppmd->dip,
1353 		    reqp->req.ppm_unlock_power_req.circ);
1354 		break;
1355 
1356 	case PMR_PPM_TRY_LOCK_POWER:
1357 		*iresp = pm_try_locking_power_single(ppmd->dip,
1358 		    reqp->req.ppm_lock_power_req.circp);
1359 		break;
1360 	}
1361 }
1362 
1363 
1364 /*
1365  * lock, unlock, or trylock all devices within a domain.
1366  */
1367 static void
1368 xcppm_lock_all(ppm_domain_t *domp, power_req_t *reqp, int *iresp)
1369 {
1370 	/*
1371 	 * To simplify the implementation we let all the devices
1372 	 * in the domain be represented by a single device (dip).
1373 	 * We use the first device in the domain's devlist.  This
1374 	 * is safe because we return with the domain lock held
1375 	 * which prevents the list from changing.
1376 	 */
1377 	if (reqp->request_type == PMR_PPM_LOCK_POWER) {
1378 		if (!MUTEX_HELD(&domp->lock))
1379 			mutex_enter(&domp->lock);
1380 		domp->refcnt++;
1381 		ASSERT(domp->devlist != NULL);
1382 		pm_lock_power_single(domp->devlist->dip,
1383 		    reqp->req.ppm_lock_power_req.circp);
1384 		/* domain lock remains held */
1385 		return;
1386 	} else if (reqp->request_type == PMR_PPM_UNLOCK_POWER) {
1387 		ASSERT(MUTEX_HELD(&domp->lock));
1388 		ASSERT(domp->devlist != NULL);
1389 		pm_unlock_power_single(domp->devlist->dip,
1390 		    reqp->req.ppm_unlock_power_req.circ);
1391 		if (--domp->refcnt == 0)
1392 			mutex_exit(&domp->lock);
1393 		return;
1394 	}
1395 
1396 	ASSERT(reqp->request_type == PMR_PPM_TRY_LOCK_POWER);
1397 	if (!MUTEX_HELD(&domp->lock))
1398 		if (!mutex_tryenter(&domp->lock)) {
1399 			*iresp = 0;
1400 			return;
1401 		}
1402 	*iresp = pm_try_locking_power_single(domp->devlist->dip,
1403 	    reqp->req.ppm_lock_power_req.circp);
1404 	if (*iresp)
1405 		domp->refcnt++;
1406 	else
1407 		mutex_exit(&domp->lock);
1408 }
1409 
1410 
1411 /*
1412  * The pm framework calls us here to manage power for a device.
1413  * We maintain state which tells us whether we need to turn off/on
1414  * system board power components based on the status of all the devices
1415  * sharing a component.
1416  *
1417  */
1418 /* ARGSUSED */
1419 static int
1420 xcppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
1421     ddi_ctl_enum_t ctlop, void *arg, void *result)
1422 {
1423 	power_req_t *reqp = arg;
1424 	xcppm_unit_t *unitp;
1425 	ppm_domain_t *domp;
1426 	ppm_dev_t *ppmd;
1427 
1428 #ifdef DEBUG
1429 	char path[MAXPATHLEN], *ctlstr, *str = "xcppm_ctlops";
1430 	uint_t mask = ppm_debug & (D_CTLOPS1 | D_CTLOPS2);
1431 	if (mask && (ctlstr = ppm_get_ctlstr(reqp->request_type, mask))) {
1432 		prom_printf("%s: \"%s\", %s\n", str,
1433 		    ddi_pathname(rdip, path), ctlstr);
1434 	}
1435 #endif
1436 
1437 	if (ctlop != DDI_CTLOPS_POWER)
1438 		return (DDI_FAILURE);
1439 
1440 	switch (reqp->request_type) {
1441 	case PMR_PPM_UNMANAGE:
1442 	case PMR_PPM_PRE_PROBE:
1443 	case PMR_PPM_POST_PROBE:
1444 	case PMR_PPM_PRE_ATTACH:
1445 	case PMR_PPM_PRE_DETACH:
1446 		return (DDI_SUCCESS);
1447 
1448 	/*
1449 	 * There is no hardware configuration required to be done on this
1450 	 * platform prior to installing drivers.
1451 	 */
1452 	case PMR_PPM_INIT_CHILD:
1453 	case PMR_PPM_UNINIT_CHILD:
1454 		return (DDI_SUCCESS);
1455 
1456 	case PMR_PPM_ALL_LOWEST:
1457 		DPRINTF(D_LOWEST, ("%s: all devices at lowest power = %d\n",
1458 		    str, reqp->req.ppm_all_lowest_req.mode));
1459 		if (reqp->req.ppm_all_lowest_req.mode == PM_ALL_LOWEST) {
1460 			unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
1461 			mutex_enter(&unitp->unit_lock);
1462 			if (unitp->state & XCPPM_ST_SUSPENDED) {
1463 				mutex_exit(&unitp->unit_lock);
1464 				return (DDI_SUCCESS);
1465 			}
1466 
1467 			xcppm_set_led(PPM_LEDON);
1468 			unitp->led_tid = timeout(xcppm_blink_led,
1469 			    (void *)PPM_LEDON, PPM_LEDON_INTERVAL);
1470 			mutex_exit(&unitp->unit_lock);
1471 			DPRINTF(D_LOWEST, ("%s: LED blink started\n", str));
1472 		} else {
1473 			xcppm_freeze_led((void *)PPM_LEDON);
1474 			DPRINTF(D_LOWEST, ("%s: LED freeze ON\n", str));
1475 		}
1476 		return (DDI_SUCCESS);
1477 
1478 	case PMR_PPM_POST_ATTACH:
1479 		/*
1480 		 * After a successful attach, if we haven't already created
1481 		 * our private data structure for this device, ppm_get_dev()
1482 		 * will force it to be created.
1483 		 */
1484 		ppmd = PPM_GET_PRIVATE(rdip);
1485 		if (reqp->req.ppm_config_req.result != DDI_SUCCESS) {
1486 			if (ppmd)
1487 				ppm_rem_dev(rdip);
1488 		} else if (!ppmd) {
1489 			domp = ppm_lookup_dev(rdip);
1490 			ASSERT(domp);
1491 			(void) ppm_get_dev(rdip, domp);
1492 		}
1493 		return (DDI_SUCCESS);
1494 
1495 	case PMR_PPM_POST_DETACH:
1496 		xcppm_detach_ctlop(rdip, reqp);
1497 		*(int *)result = DDI_SUCCESS;
1498 		return (DDI_SUCCESS);
1499 
1500 	case PMR_PPM_PRE_RESUME:
1501 		xcppm_resume_ctlop(rdip, reqp);
1502 		return (DDI_SUCCESS);
1503 
1504 	case PMR_PPM_UNLOCK_POWER:
1505 	case PMR_PPM_TRY_LOCK_POWER:
1506 	case PMR_PPM_LOCK_POWER:
1507 		ppmd = PPM_GET_PRIVATE(rdip);
1508 		if (ppmd)
1509 			domp = ppmd->domp;
1510 		else if (reqp->request_type != PMR_PPM_UNLOCK_POWER) {
1511 			domp = ppm_lookup_dev(rdip);
1512 			ASSERT(domp);
1513 			ppmd = ppm_get_dev(rdip, domp);
1514 		}
1515 
1516 		ASSERT(domp->dflags == PPMD_LOCK_ALL ||
1517 		    domp->dflags == PPMD_LOCK_ONE);
1518 		DPRINTF(D_LOCKS, ("xcppm_lock_%s: \"%s\", %s\n",
1519 		    (domp->dflags == PPMD_LOCK_ALL) ? "all" : "one",
1520 		    ppmd->path, ppm_get_ctlstr(reqp->request_type, D_LOCKS)));
1521 
1522 		if (domp->dflags == PPMD_LOCK_ALL)
1523 			xcppm_lock_all(domp, reqp, result);
1524 		else
1525 			xcppm_lock_one(ppmd, reqp, result);
1526 		return (DDI_SUCCESS);
1527 
1528 	case PMR_PPM_POWER_LOCK_OWNER:
1529 		ASSERT(reqp->req.ppm_power_lock_owner_req.who == rdip);
1530 		ppmd = PPM_GET_PRIVATE(rdip);
1531 		if (ppmd)
1532 			domp = ppmd->domp;
1533 		else {
1534 			domp = ppm_lookup_dev(rdip);
1535 			ASSERT(domp);
1536 			ppmd = ppm_get_dev(rdip, domp);
1537 		}
1538 
1539 		/*
1540 		 * In case of LOCK_ALL, effective owner of the power lock
1541 		 * is the owner of the domain lock. otherwise, it is the owner
1542 		 * of the power lock.
1543 		 */
1544 		if (domp->dflags & PPMD_LOCK_ALL)
1545 			reqp->req.ppm_power_lock_owner_req.owner =
1546 			    mutex_owner(&domp->lock);
1547 		else {
1548 			reqp->req.ppm_power_lock_owner_req.owner =
1549 			    DEVI(rdip)->devi_busy_thread;
1550 		}
1551 		return (DDI_SUCCESS);
1552 
1553 	default:
1554 		ppmd = PPM_GET_PRIVATE(rdip);
1555 		if (ppmd == NULL) {
1556 			domp = ppm_lookup_dev(rdip);
1557 			ASSERT(domp);
1558 			ppmd = ppm_get_dev(rdip, domp);
1559 		}
1560 
1561 #ifdef DEBUG
1562 		if ((reqp->request_type == PMR_PPM_SET_POWER) &&
1563 		    (ppm_debug & D_SETPWR)) {
1564 			prom_printf("%s: \"%s\", PMR_PPM_SET_POWER\n",
1565 			    str, ppmd->path);
1566 		}
1567 #endif
1568 
1569 		if (ppmd->domp == &xcppm_cpu)
1570 			return (xcppm_manage_cpus(rdip, reqp, result));
1571 		else if (ppmd->domp == &xcppm_fet)
1572 			return (xcppm_manage_fet(rdip, reqp, result));
1573 		else if (ppmd->domp == &xcppm_upa)
1574 			return (xcppm_manage_pciupa(rdip, reqp, result));
1575 		else {
1576 			ASSERT(ppmd->domp == &xcppm_1394);
1577 			return (xcppm_manage_1394(rdip, reqp, result));
1578 		}
1579 	}
1580 }
1581 
1582 
1583 /*
1584  * Initialize our private version of real power level
1585  * as well as lowest and highest levels the device supports;
1586  * see ppmf and ppm_add_dev
1587  */
1588 static void
1589 xcppm_dev_init(ppm_dev_t *ppmd)
1590 {
1591 	struct pm_component *dcomps;
1592 	struct pm_comp *pm_comp;
1593 	dev_info_t *dip;
1594 	int maxi;
1595 
1596 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1597 	ppmd->level = PM_LEVEL_UNKNOWN;
1598 	ppmd->rplvl = PM_LEVEL_UNKNOWN;
1599 
1600 	dip = ppmd->dip;
1601 	/*
1602 	 * ppm exists to handle power-manageable devices which require
1603 	 * special handling on the current platform.  However, a
1604 	 * driver for such a device may choose not to support power
1605 	 * management on a particular load/attach.  In this case we
1606 	 * we create a structure to represent a single-component device
1607 	 * for which "level" = PM_LEVEL_UNKNOWN and "lowest" = 0
1608 	 * are effectively constant.
1609 	 */
1610 	if (PM_GET_PM_INFO(dip)) {
1611 		dcomps = DEVI(dip)->devi_pm_components;
1612 		pm_comp = &dcomps[ppmd->cmpt].pmc_comp;
1613 
1614 		ppmd->lowest = pm_comp->pmc_lvals[0];
1615 		ASSERT(ppmd->lowest >= 0);
1616 		maxi = pm_comp->pmc_numlevels - 1;
1617 		ppmd->highest = pm_comp->pmc_lvals[maxi];
1618 	}
1619 
1620 	/*
1621 	 * add any domain-specific initialization here
1622 	 */
1623 	if (ppmd->domp == &xcppm_fet) {
1624 		/*
1625 		 * when a new device is added to domain_powefet
1626 		 * it is counted here as being powered up.
1627 		 */
1628 		ppmd->domp->pwr_cnt++;
1629 		DPRINTF(D_FET, ("xcppm_dev_init: UP cnt = %d\n",
1630 		    ppmd->domp->pwr_cnt));
1631 	} else if (ppmd->domp == &xcppm_upa) {
1632 		/*
1633 		 * There may be a better way to determine the device type
1634 		 * instead of comparing to hard coded string names.
1635 		 */
1636 		if (strstr(ppmd->path, "pci@8,700000"))
1637 			ppmd->flags = XCPPMF_PCIB;
1638 		else if (strstr(ppmd->path, "upa@8,480000"))
1639 			ppmd->flags = XCPPMF_UPA;
1640 	}
1641 }
1642 
1643 
1644 /*
1645  * see ppmf and ppm_rem_dev
1646  */
1647 static void
1648 xcppm_dev_fini(ppm_dev_t *ppmd)
1649 {
1650 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1651 	if (ppmd->domp == &xcppm_fet) {
1652 		if (ppmd->level != ppmd->lowest) {
1653 			ppmd->domp->pwr_cnt--;
1654 			DPRINTF(D_FET, ("xcppm_dev_fini: DN cnt = %d\n",
1655 			    ppmd->domp->pwr_cnt));
1656 		};
1657 	}
1658 }
1659 
1660 
1661 /*
1662  * see ppmf and ppm_ioctl, PPMIOCSET
1663  */
1664 static void
1665 xcppm_iocset(uint8_t value)
1666 {
1667 	int action;
1668 
1669 	if (value == PPM_IDEV_POWER_ON)
1670 		action = XCPPM_SETBIT;
1671 	else if (value == PPM_IDEV_POWER_OFF)
1672 		action = XCPPM_CLRBIT;
1673 	(void) xcppm_gpio_port2(action, DRVON);
1674 }
1675 
1676 
1677 /*
1678  * see ppmf and ppm_ioctl, PPMIOCGET
1679  */
1680 static uint8_t
1681 xcppm_iocget(void)
1682 {
1683 	uint8_t bit;
1684 
1685 	bit = xcppm_gpio_port2(XCPPM_GETBIT, DRVON);
1686 	return ((bit == DRVON) ? PPM_IDEV_POWER_ON : PPM_IDEV_POWER_OFF);
1687 }
1688