xref: /illumos-gate/usr/src/uts/sun4u/excalibur/io/xcalppm.c (revision 3fe80ca4a1f8a033d672a9a2e6e4babac651205a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Copyright 2023 Oxide Computer Company
28  */
29 
30 /*
31  * Platform Power Management driver for SUNW,Sun-Blade-1000
32  */
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/ppmvar.h>
39 #include <sys/ppmio.h>
40 #include <sys/xcalppm_reg.h>
41 #include <sys/xcalppm_var.h>
42 #include <sys/stat.h>
43 #include <sys/epm.h>
44 #include <sys/archsystm.h>
45 #include <sys/cpuvar.h>
46 #include <sys/cheetahregs.h>
47 #include <sys/us3_module.h>
48 
49 /*
50  * Locking Considerations
51  *
52  * To look at and/or modify xcppm_domain fields or elements of its list of
53  * xcppm_dev structures the domain_lock for the affected domain must be held.
54  *
55  * When the autopm framework needs to change the power of a component of a
56  * device, it needs to hold the associated power lock (see discussion at
57  * top of uts/common/os/sunpm.c).
58  *
59  * If the framework needs to lock a dev/cmpt for a device which this ppm
60  * has claimed, xcppm_ctlops will be called with PMR_PPM_LOCK_POWER.  Ppm
61  * needs to be involved because, due to platform constraints, changing the
62  * power of one device may require that other devices be changed in the same
63  * operation.
64  *
65  * In some domains (e.g., cpus) the power lock must be acquired for all the
66  * affected devices to avoid possible corruption of the power states.  The
67  * joint change must be an atomic operation.  Ppm handles this by acquiring
68  * the domain lock, then walking the list of affected devices and acquiring
69  * the power lock for each of them.  To unlock, the list is traversed and
70  * each of the power locks is freed, followed by freeing the domain lock.
71  *
72  * For other domains ppm will only be changing the power of a single device
73  * that is known to the framework.  In these cases, the locking is done by
74  * acquiring the domain lock and directly calling the framework routine for
75  * getting a single power lock.
76  */
77 
78 static int	xcppm_attach(dev_info_t *, ddi_attach_cmd_t);
79 static int	xcppm_detach(dev_info_t *, ddi_detach_cmd_t);
80 static int	xcppm_ctlops(dev_info_t *, dev_info_t *,
81 		    ddi_ctl_enum_t, void *, void *);
82 static void	xcppm_dev_init(ppm_dev_t *);
83 static void	xcppm_dev_fini(ppm_dev_t *);
84 static void	xcppm_iocset(uint8_t);
85 static uint8_t	xcppm_iocget(void);
86 
87 /*
88  * Note: 1394 and pciupa were originally required to be LOCK_ALL domains.
89  * However, the underlying nexus drivers aren't able to do power mgmt
90  * (because of hw implementation issues).  The locking protocol for these
91  * domains is changed to LOCK_ONE to simplify other code.  The domain
92  * code itself will be removed in the future.
93  */
94 static ppm_domain_t xcppm_1394 = { "domain_1394",	PPMD_LOCK_ONE };
95 static ppm_domain_t xcppm_cpu  = { "domain_cpu",	PPMD_LOCK_ALL };
96 static ppm_domain_t xcppm_fet  = { "domain_powerfet",	PPMD_LOCK_ONE };
97 static ppm_domain_t xcppm_upa  = { "domain_pciupa",	PPMD_LOCK_ONE };
98 
99 ppm_domain_t *ppm_domains[] = {
100 	&xcppm_1394,
101 	&xcppm_cpu,
102 	&xcppm_fet,
103 	&xcppm_upa,
104 	NULL
105 };
106 
107 
108 struct ppm_funcs ppmf = {
109 	xcppm_dev_init,			/* dev_init */
110 	xcppm_dev_fini,			/* dev_fini */
111 	xcppm_iocset,			/* iocset */
112 	xcppm_iocget,			/* iocget */
113 };
114 
115 
116 /*
117  * The order of entries must be from slowest to fastest and in
118  * one-to-one correspondence with the cpu_level array.
119  */
120 static const uint16_t bbc_estar_control_masks[] = {
121 	BBC_ESTAR_SLOW, BBC_ESTAR_MEDIUM, BBC_ESTAR_FAST
122 };
123 
124 int bbc_delay = 10;			/* microsec */
125 
126 
127 /*
128  * Configuration data structures
129  */
130 static struct cb_ops xcppm_cb_ops = {
131 	ppm_open,		/* open */
132 	ppm_close,		/* close */
133 	nodev,			/* strategy */
134 	nodev,			/* print */
135 	nodev,			/* dump */
136 	nodev,			/* read */
137 	nodev,			/* write */
138 	ppm_ioctl,		/* ioctl */
139 	nodev,			/* devmap */
140 	nodev,			/* mmap */
141 	nodev,			/* segmap */
142 	nochpoll,		/* poll */
143 	ddi_prop_op,		/* prop_op */
144 	NULL,			/* streamtab */
145 	D_MP | D_NEW,		/* driver compatibility flag */
146 	CB_REV,			/* cb_ops revision */
147 	nodev,			/* async read */
148 	nodev			/* async write */
149 };
150 
151 static struct bus_ops xcppm_bus_ops = {
152 	BUSO_REV,
153 	0,
154 	0,
155 	0,
156 	0,
157 	0,
158 	ddi_no_dma_map,
159 	ddi_no_dma_allochdl,
160 	ddi_no_dma_freehdl,
161 	ddi_no_dma_bindhdl,
162 	ddi_no_dma_unbindhdl,
163 	ddi_no_dma_flush,
164 	ddi_no_dma_win,
165 	ddi_no_dma_mctl,
166 	xcppm_ctlops,
167 	0,
168 	0,			/* (*bus_get_eventcookie)();	*/
169 	0,			/* (*bus_add_eventcall)();	*/
170 	0,			/* (*bus_remove_eventcall)();	*/
171 	0			/* (*bus_post_event)();		*/
172 };
173 
174 static struct dev_ops xcppm_ops = {
175 	DEVO_REV,		/* devo_rev */
176 	0,			/* refcnt */
177 	ppm_getinfo,		/* info */
178 	nulldev,		/* identify */
179 	nulldev,		/* probe */
180 	xcppm_attach,		/* attach */
181 	xcppm_detach,		/* detach */
182 	nodev,			/* reset */
183 	&xcppm_cb_ops,		/* driver operations */
184 	&xcppm_bus_ops,		/* bus operations */
185 	NULL,			/* power */
186 	ddi_quiesce_not_supported,	/* devo_quiesce */
187 };
188 
189 extern struct mod_ops mod_driverops;
190 
191 static struct modldrv modldrv = {
192 	&mod_driverops,		/* type of module - pseudo */
193 	"platform pm driver",
194 	&xcppm_ops
195 };
196 
197 static struct modlinkage modlinkage = {
198 	MODREV_1,
199 	&modldrv,
200 	NULL
201 };
202 
203 
204 int
_init(void)205 _init(void)
206 {
207 	return (ppm_init(&modlinkage, sizeof (xcppm_unit_t), "xc"));
208 }
209 
210 
211 int
_fini(void)212 _fini(void)
213 {
214 	return (EBUSY);
215 }
216 
217 
218 int
_info(struct modinfo * modinfop)219 _info(struct modinfo *modinfop)
220 {
221 	return (mod_info(&modlinkage, modinfop));
222 }
223 
224 
225 static int
xcppm_map_all_regs(dev_info_t * dip)226 xcppm_map_all_regs(dev_info_t *dip)
227 {
228 	ddi_device_acc_attr_t attr_be, attr_le;
229 	int rv0, rv1, rv2, rv3;
230 	xcppm_unit_t *unitp;
231 	caddr_t base_addr;
232 	uint8_t data8;
233 
234 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
235 	attr_be.devacc_attr_version = DDI_DEVICE_ATTR_V0;
236 	attr_be.devacc_attr_endian_flags  = DDI_STRUCTURE_BE_ACC;
237 	attr_be.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
238 
239 	attr_le.devacc_attr_version = DDI_DEVICE_ATTR_V0;
240 	attr_le.devacc_attr_endian_flags  = DDI_STRUCTURE_LE_ACC;
241 	attr_le.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
242 
243 	rv0 = ddi_regs_map_setup(dip, 0, &base_addr, 0, 0, &attr_be,
244 	    &unitp->hndls.bbc_estar_ctrl);
245 
246 	unitp->regs.bbc_estar_ctrl = (uint16_t *)(base_addr +
247 	    BBC_ESTAR_CTRL_OFFSET);
248 	unitp->regs.bbc_assert_change = (uint32_t *)(base_addr +
249 	    BBC_ASSERT_CHANGE_OFFSET);
250 	unitp->regs.bbc_pll_settle = (uint32_t *)(base_addr +
251 	    BBC_PLL_SETTLE_OFFSET);
252 
253 	rv1 = ddi_regs_map_setup(dip, 1,
254 	    (caddr_t *)&unitp->regs.rio_mode_auxio,
255 	    0, 0, &attr_le, &unitp->hndls.rio_mode_auxio);
256 
257 	rv2 = ddi_regs_map_setup(dip, 2, &base_addr,
258 	    0, 0, &attr_le, &unitp->hndls.gpio_bank_select);
259 
260 	unitp->regs.gpio_bank_sel_index = (uint8_t *)(base_addr +
261 	    GPIO_BANK_SEL_INDEX_OFFSET);
262 	unitp->regs.gpio_bank_sel_data = (uint8_t *)(base_addr +
263 	    GPIO_BANK_SEL_DATA_OFFSET);
264 
265 	rv3 = ddi_regs_map_setup(dip, 3, &base_addr, 0, 0, &attr_le,
266 	    &unitp->hndls.gpio_data_ports);
267 
268 	unitp->regs.gpio_port1_data = (uint8_t *)(base_addr +
269 	    GPIO_PORT1_DATA_OFFSET);
270 	unitp->regs.gpio_port2_data = (uint8_t *)(base_addr +
271 	    GPIO_PORT2_DATA_OFFSET);
272 
273 	if (rv0 != DDI_SUCCESS || rv1 != DDI_SUCCESS ||
274 	    rv2 != DDI_SUCCESS || rv3 != DDI_SUCCESS) {
275 		if (rv0 == DDI_SUCCESS)
276 			ddi_regs_map_free(&unitp->hndls.bbc_estar_ctrl);
277 		if (rv1 == DDI_SUCCESS)
278 			ddi_regs_map_free(&unitp->hndls.rio_mode_auxio);
279 		if (rv2 == DDI_SUCCESS)
280 			ddi_regs_map_free(&unitp->hndls.gpio_bank_select);
281 		if (rv3 == DDI_SUCCESS)
282 			ddi_regs_map_free(&unitp->hndls.gpio_data_ports);
283 		return (DDI_FAILURE);
284 	}
285 
286 	/*
287 	 * Ppm uses GPIO bits in Bank 0.  Make sure Bank 0 is selected.
288 	 */
289 	data8 = SIO_CONFIG2_INDEX;
290 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
291 	    unitp->regs.gpio_bank_sel_index, data8);
292 	data8 = XCPPM_GET8(unitp->hndls.gpio_bank_select,
293 	    unitp->regs.gpio_bank_sel_data);
294 
295 	data8 &= 0x7f;	/* Set Bit7 to zero */
296 	XCPPM_SETGET8(unitp->hndls.gpio_bank_select,
297 	    unitp->regs.gpio_bank_sel_data, data8);
298 
299 	return (DDI_SUCCESS);
300 }
301 
302 
303 static int
xcppm_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)304 xcppm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
305 {
306 #ifdef DEBUG
307 	char *str = "xcppm_attach";
308 #endif
309 	xcppm_unit_t *unitp;
310 	ppm_domain_t **dompp;
311 	int retval;
312 
313 	DPRINTF(D_ATTACH, ("%s: attach cmd %d\n", str, cmd));
314 	retval = DDI_SUCCESS;
315 
316 	switch (cmd) {
317 	case DDI_ATTACH:
318 		if (ppm_inst != -1) {
319 			DPRINTF(D_ERROR,
320 			    ("%s: instance already attached\n", str));
321 			return (DDI_FAILURE);
322 		}
323 		ppm_inst = ddi_get_instance(dip);
324 
325 		/*
326 		 * Allocate and initialize soft state structure
327 		 */
328 		if (ddi_soft_state_zalloc(ppm_statep, ppm_inst) != 0)
329 			return (DDI_FAILURE);
330 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
331 		mutex_init(&unitp->unit_lock, NULL, MUTEX_DRIVER, NULL);
332 		mutex_init(&unitp->creator_lock, NULL, MUTEX_DRIVER, NULL);
333 
334 		if (ddi_create_minor_node(dip, "ppm", S_IFCHR,
335 		    ppm_inst, "ddi_ppm", 0) == DDI_FAILURE) {
336 			ddi_soft_state_free(ppm_statep, ppm_inst);
337 			DPRINTF(D_ERROR,
338 			    ("%s: Can't create minor for 0x%p\n", str,
339 			    (void *)dip));
340 			return (DDI_FAILURE);
341 		}
342 		ddi_report_dev(dip);
343 		unitp->dip = dip;
344 
345 		if (retval = ppm_create_db(dip))
346 			return (retval);
347 
348 		/*
349 		 * Map all of the registers under the ppm node.
350 		 */
351 		if (xcppm_map_all_regs(dip) != DDI_SUCCESS)
352 			return (DDI_FAILURE);
353 
354 		if ((retval =
355 		    pm_register_ppm(ppm_claim_dev, dip)) != DDI_SUCCESS) {
356 			DPRINTF(D_ERROR,
357 			    ("%s: can't register ppm handler\n", str));
358 			return (retval);
359 		}
360 
361 		for (dompp = ppm_domains; *dompp; dompp++)
362 			mutex_init(&(*dompp)->lock, NULL, MUTEX_DRIVER, NULL);
363 
364 		break;
365 
366 	case DDI_RESUME:
367 		unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
368 		mutex_enter(&unitp->unit_lock);
369 		unitp->state &= ~XCPPM_ST_SUSPENDED;
370 		mutex_exit(&unitp->unit_lock);
371 		break;
372 
373 	default:
374 		cmn_err(CE_CONT, "xcppm_attach: unknown "
375 		    "attach command %d, dip 0x%p\n", cmd, (void *)dip);
376 		retval = DDI_FAILURE;
377 	}
378 
379 	return (retval);
380 }
381 
382 
383 /*
384  * set the front panel LED:
385  * PPM_LEDON turns it on, PPM_LEDOFF turns it off.
386  * for GPIO register: 0x0 means led-on, 0x2 means led-off.
387  */
388 static void
xcppm_set_led(int action)389 xcppm_set_led(int action)
390 {
391 	xcppm_unit_t *unitp;
392 	uint8_t	reg;
393 
394 	ASSERT(action == PPM_LEDON || action == PPM_LEDOFF);
395 	DPRINTF(D_LED, ("xcppm_set_led: Turn LED %s\n",
396 	    (action == PPM_LEDON) ? "on" : "off"));
397 
398 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
399 	reg = XCPPM_GET8(unitp->hndls.gpio_data_ports,
400 	    unitp->regs.gpio_port1_data);
401 	if (action == PPM_LEDON)
402 		reg &= ~LED;
403 	else
404 		reg |= LED;
405 	XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
406 	    unitp->regs.gpio_port1_data, reg);
407 }
408 
409 
410 static void
xcppm_blink_led(void * action)411 xcppm_blink_led(void *action)
412 {
413 	xcppm_unit_t *unitp;
414 	int new_action;
415 	clock_t intvl;
416 
417 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
418 	mutex_enter(&unitp->unit_lock);
419 	if (unitp->led_tid == 0) {
420 		mutex_exit(&unitp->unit_lock);
421 		return;
422 	}
423 
424 	if ((int)(uintptr_t)action == PPM_LEDON) {
425 		new_action = PPM_LEDOFF;
426 		intvl = PPM_LEDOFF_INTERVAL;
427 	} else {
428 		ASSERT((int)(uintptr_t)action == PPM_LEDOFF);
429 		new_action = PPM_LEDON;
430 		intvl = PPM_LEDON_INTERVAL;
431 	}
432 
433 	xcppm_set_led(new_action);
434 	unitp->led_tid = timeout(xcppm_blink_led, (void *)(uintptr_t)new_action,
435 	    intvl);
436 	mutex_exit(&unitp->unit_lock);
437 }
438 
439 
440 static void
xcppm_freeze_led(void * action)441 xcppm_freeze_led(void *action)
442 {
443 	xcppm_unit_t *unitp;
444 	timeout_id_t tid;
445 
446 	DPRINTF(D_LOWEST, ("xcppm_freeze_led: action %d\n",
447 	    (int)(uintptr_t)action));
448 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
449 	mutex_enter(&unitp->unit_lock);
450 	tid = unitp->led_tid;
451 	unitp->led_tid = 0;
452 	mutex_exit(&unitp->unit_lock);
453 	(void) untimeout(tid);
454 	mutex_enter(&unitp->unit_lock);
455 	xcppm_set_led((int)(uintptr_t)action);
456 	mutex_exit(&unitp->unit_lock);
457 }
458 
459 
460 /* ARGSUSED */
461 static int
xcppm_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)462 xcppm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
463 {
464 	xcppm_unit_t *unitp;
465 
466 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
467 	DPRINTF(D_DETACH, ("xcppm_detach: cmd %d\n", cmd));
468 
469 	switch (cmd) {
470 	case DDI_DETACH:
471 		return (DDI_FAILURE);
472 
473 	case DDI_SUSPEND:
474 		mutex_enter(&unitp->unit_lock);
475 		unitp->state |= XCPPM_ST_SUSPENDED;
476 		mutex_exit(&unitp->unit_lock);
477 
478 		/*
479 		 * Suspend requires that timeout callouts to be canceled.
480 		 * Turning off the LED blinking will cancel the timeout.
481 		 */
482 		xcppm_freeze_led((void *)PPM_LEDON);
483 		return (DDI_SUCCESS);
484 
485 	default:
486 		return (DDI_FAILURE);
487 	}
488 }
489 
490 
491 /*
492  * Device we claimed has detached.  We must get rid of
493  * our state which was used to track this device.
494  */
495 static void
xcppm_detach_ctlop(dev_info_t * dip,power_req_t * reqp)496 xcppm_detach_ctlop(dev_info_t *dip, power_req_t *reqp)
497 {
498 	ppm_dev_t *ppmd;
499 
500 	ppmd = PPM_GET_PRIVATE(dip);
501 	if (ppmd == NULL || reqp->req.ppm_config_req.result != DDI_SUCCESS)
502 		return;
503 
504 	ppm_rem_dev(dip);
505 }
506 
507 
508 /*
509  * The system is being resumed from a cpr suspend operation and this
510  * device's attach entry will be called shortly.  The driver will set
511  * the device's power to a conventional starting value, and we need to
512  * stay in sync and set our private copy to the same value.
513  */
514 /* ARGSUSED */
515 static void
xcppm_resume_ctlop(dev_info_t * dip,power_req_t * reqp)516 xcppm_resume_ctlop(dev_info_t *dip, power_req_t *reqp)
517 {
518 	ppm_domain_t *domp;
519 	ppm_dev_t *ppmd;
520 	int powered;
521 
522 	ppmd = PPM_GET_PRIVATE(dip);
523 	if (ppmd == NULL)
524 		return;
525 
526 	/*
527 	 * Maintain correct powered count for domain which cares
528 	 */
529 	powered = 0;
530 	domp = ppmd->domp;
531 	mutex_enter(&domp->lock);
532 	if (domp == &xcppm_fet) {
533 		for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
534 			if (ppmd->dip == dip && ppmd->level)
535 				powered++;
536 		}
537 
538 		/*
539 		 * If this device was powered off when the system was
540 		 * suspended, this resume acts like a power-on transition,
541 		 * so we adjust the count.
542 		 */
543 		if (powered == 0)
544 			domp->pwr_cnt++;
545 	}
546 
547 	for (ppmd = domp->devlist; ppmd; ppmd = ppmd->next) {
548 		if (ppmd->dip == dip)
549 			ppmd->level = ppmd->rplvl = PM_LEVEL_UNKNOWN;
550 	}
551 	mutex_exit(&domp->lock);
552 }
553 
554 
555 /*
556  * Change the power level for a component of a device.  If the change
557  * arg is true, we call the framework to actually change the device's
558  * power; otherwise, we just update our own copy of the power level.
559  */
560 static int
xcppm_set_level(ppm_dev_t * ppmd,int cmpt,int level,boolean_t change)561 xcppm_set_level(ppm_dev_t *ppmd, int cmpt, int level, boolean_t change)
562 {
563 #ifdef DEBUG
564 	char *str = "xcppm_set_level";
565 #endif
566 	int ret;
567 
568 	ret = DDI_SUCCESS;
569 	if (change)
570 		ret = pm_power(ppmd->dip, cmpt, level);
571 
572 	DPRINTF(D_SETLVL, ("%s: \"%s\" change=%d, old %d, new %d, ret %d\n",
573 	    str, ppmd->path, change, ppmd->level, level, ret));
574 
575 	if (ret == DDI_SUCCESS) {
576 		ppmd->level = level;
577 		ppmd->rplvl = PM_LEVEL_UNKNOWN;
578 	}
579 
580 	return (ret);
581 }
582 
583 
584 static int
xcppm_change_power_level(ppm_dev_t * ppmd,int cmpt,int level)585 xcppm_change_power_level(ppm_dev_t *ppmd, int cmpt, int level)
586 {
587 	return (xcppm_set_level(ppmd, cmpt, level, B_TRUE));
588 }
589 
590 
591 static int
xcppm_record_level_change(ppm_dev_t * ppmd,int cmpt,int level)592 xcppm_record_level_change(ppm_dev_t *ppmd, int cmpt, int level)
593 {
594 	return (xcppm_set_level(ppmd, cmpt, level, B_FALSE));
595 }
596 
597 
598 static uint8_t
xcppm_gpio_port2(int action,uint8_t pos)599 xcppm_gpio_port2(int action, uint8_t pos)
600 {
601 #ifdef DEBUG
602 	char *str = "xcppm_gpio_port2";
603 #endif
604 	xcppm_unit_t *unitp;
605 	uint8_t data8, buf8;
606 	uint8_t	ret;
607 
608 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
609 	mutex_enter(&unitp->gpio_lock);
610 
611 	data8 = buf8 = XCPPM_GET8(unitp->hndls.gpio_data_ports,
612 	    unitp->regs.gpio_port2_data);
613 
614 	switch (action) {
615 	case XCPPM_GETBIT:
616 		ret = data8 & pos;
617 		DPRINTF(D_GPIO, ("%s: READ: GPIO Bank2 value 0x%x\n",
618 		    str, buf8));
619 		break;
620 
621 	case XCPPM_SETBIT:
622 	case XCPPM_CLRBIT:
623 		if (action == XCPPM_SETBIT)
624 			data8 |= pos;
625 		else
626 			data8 &= ~pos;
627 		XCPPM_SETGET8(unitp->hndls.gpio_data_ports,
628 		    unitp->regs.gpio_port2_data, data8);
629 		ret = data8 & pos;
630 		DPRINTF(D_GPIO, ("%s: %s: GPIO Bank2 "
631 		    "bit 0x%x changed from 0x%x to 0x%x\n",
632 		    str, (action == XCPPM_SETBIT) ? "UP" : "DOWN",
633 		    pos, buf8, data8));
634 		break;
635 
636 	default:
637 		cmn_err(CE_PANIC, "xcalppm: unrecognized register "
638 		    "IO command %d\n", action);
639 		break;
640 	}
641 	mutex_exit(&unitp->gpio_lock);
642 
643 	return (ret);
644 }
645 
646 
647 /*
648  * Raise the power level of a subrange of cpus.  Used when cpu driver
649  * failed an attempt to lower the power of a cpu (probably because
650  * it got busy).  Need to revert the ones we already changed.
651  *
652  * ecpup = the ppm_dev_t for the cpu which failed to lower power
653  * level = power level to reset prior cpus to
654  */
655 static void
xcppm_revert_cpu_power(ppm_dev_t * ecpup,int level)656 xcppm_revert_cpu_power(ppm_dev_t *ecpup, int level)
657 {
658 	ppm_dev_t *cpup;
659 
660 	for (cpup = xcppm_cpu.devlist; cpup != ecpup; cpup = cpup->next) {
661 		DPRINTF(D_CPU, ("xrcp: \"%s\", revert to level %d\n",
662 		    cpup->path, level));
663 		(void) xcppm_change_power_level(cpup, 0, level);
664 	}
665 }
666 
667 /*
668  * Switch the DC/DC converter.  Clearing the GPIO bit in SuperI/O puts
669  * the converter in low power mode and setting the bit puts it back in
670  * normal mode.
671  */
672 static void
xcppm_switch_dcdc_converter(int action)673 xcppm_switch_dcdc_converter(int action)
674 {
675 	int tries = XCPPM_VCL_TRIES;
676 	uint_t spl;
677 	uint64_t stick_begin, stick_end;
678 	uint64_t tick_begin, tick_end;
679 	uint64_t cur_speed_ratio, full_speed_ratio;
680 	static int xcppm_dcdc_lpm;
681 
682 	switch (action) {
683 	case XCPPM_SETBIT:
684 		if (xcppm_dcdc_lpm) {
685 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
686 			    "switch to normal power mode.\n"));
687 			(void) xcppm_gpio_port2(action, HIGHPWR);
688 			xcppm_dcdc_lpm = 0;
689 		}
690 		break;
691 	case XCPPM_CLRBIT:
692 		/*
693 		 * In some fast CPU configurations, DC/DC converter was
694 		 * put in low power mode before CPUs made the transition
695 		 * to 1/32 of clock speed.  In those cases, system was
696 		 * shut down by hardware for protection.  To resolve that
697 		 * problem, we make sure CPUs have made the clock transition
698 		 * before the DC/DC converter has been put to low power mode.
699 		 */
700 		ASSERT(xcppm_dcdc_lpm == 0);
701 		kpreempt_disable();
702 		full_speed_ratio = cpunodes[CPU->cpu_id].clock_freq /
703 		    sys_tick_freq;
704 		while (tries) {
705 			spl = ddi_enter_critical();
706 			tick_begin = gettick_counter();
707 			stick_timestamp((int64_t *)&stick_begin);
708 			ddi_exit_critical(spl);
709 			drv_usecwait(XCPPM_VCL_DELAY);
710 			spl = ddi_enter_critical();
711 			tick_end = gettick_counter();
712 			stick_timestamp((int64_t *)&stick_end);
713 			ddi_exit_critical(spl);
714 			cur_speed_ratio = (tick_end - tick_begin) /
715 			    (stick_end - stick_begin);
716 
717 			/*
718 			 * tick/stick at current speed should at most be
719 			 * equal to full-speed tick/stick, adjusted with
720 			 * full/lowest clock speed ratio.  If not, speed
721 			 * transition has not happened yet.
722 			 */
723 			if (cur_speed_ratio <= ((full_speed_ratio /
724 			    XCPPM_VCL_DIVISOR) + 1)) {
725 				DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: "
726 				    "switch to low power mode.\n"));
727 				(void) xcppm_gpio_port2(action, HIGHPWR);
728 				xcppm_dcdc_lpm = 1;
729 				break;
730 			}
731 			DPRINTF(D_CPU, ("xcppm_switch_dcdc_converter: CPU "
732 			    "has not made transition to lowest speed yet "
733 			    "(%d)\n", tries));
734 			tries--;
735 		}
736 		kpreempt_enable();
737 		break;
738 	}
739 }
740 
741 static void
xcppm_rio_mode(xcppm_unit_t * unitp,int mode)742 xcppm_rio_mode(xcppm_unit_t *unitp, int mode)
743 {
744 	uint32_t data32, buf32;
745 
746 	mutex_enter(&unitp->gpio_lock);
747 	data32 = buf32 = XCPPM_GET32(unitp->hndls.rio_mode_auxio,
748 	    unitp->regs.rio_mode_auxio);
749 	if (mode == XCPPM_SETBIT)
750 		data32 |= RIO_BBC_ESTAR_MODE;
751 	else
752 		data32 &= ~RIO_BBC_ESTAR_MODE;
753 	XCPPM_SETGET32(unitp->hndls.rio_mode_auxio,
754 	    unitp->regs.rio_mode_auxio, data32);
755 	mutex_exit(&unitp->gpio_lock);
756 
757 	DPRINTF(D_CPU, ("xcppm_rio_mode: %s: change from 0x%x to 0x%x\n",
758 	    (mode == XCPPM_SETBIT) ? "DOWN" : "UP", buf32, data32));
759 }
760 
761 
762 /*
763  * change the power level of all cpus to the arg value;
764  * the caller needs to ensure that a legal transition is requested.
765  */
766 static int
xcppm_change_cpu_power(int newlevel)767 xcppm_change_cpu_power(int newlevel)
768 {
769 #ifdef DEBUG
770 	char *str = "xcppm_ccp";
771 #endif
772 	int index, level, oldlevel;
773 	int lowest, highest;
774 	int undo_flag, ret;
775 	int speedup, incr;
776 	uint32_t data32;
777 	uint16_t data16;
778 	xcppm_unit_t *unitp;
779 	ppm_dev_t *cpup;
780 	dev_info_t *dip;
781 	char *chstr;
782 
783 	unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
784 	ASSERT(unitp);
785 	cpup = xcppm_cpu.devlist;
786 	lowest = cpup->lowest;
787 	highest = cpup->highest;
788 
789 	/*
790 	 * not all cpus may have transitioned to a known level by this time
791 	 */
792 	oldlevel = (cpup->level == PM_LEVEL_UNKNOWN) ? highest : cpup->level;
793 	dip = cpup->dip;
794 	ASSERT(dip);
795 
796 	DPRINTF(D_CPU, ("%s: old %d, new %d, highest %d, lowest %d\n",
797 	    str, oldlevel, newlevel, highest, lowest));
798 
799 	if (newlevel > oldlevel) {
800 		chstr = "UP";
801 		speedup = 1;
802 		incr = 1;
803 	} else if (newlevel < oldlevel) {
804 		chstr = "DOWN";
805 		speedup = 0;
806 		incr = -1;
807 	} else
808 		return (DDI_SUCCESS);
809 
810 	undo_flag = 0;
811 	if (speedup) {
812 		/*
813 		 * If coming up from lowest power level, set the E*
814 		 * mode bit in GPIO to make power supply efficient
815 		 * at normal power.
816 		 */
817 		if (oldlevel == cpup->lowest) {
818 			xcppm_switch_dcdc_converter(XCPPM_SETBIT);
819 			undo_flag = 1;
820 		}
821 	} else {
822 		/*
823 		 * set BBC Estar mode bit in RIO AUXIO register
824 		 */
825 		if (oldlevel == highest) {
826 			xcppm_rio_mode(unitp, XCPPM_SETBIT);
827 			undo_flag = 1;
828 		}
829 	}
830 
831 	/*
832 	 * this loop will execute 1x or 2x depending on
833 	 * number of times we need to change clock rates
834 	 */
835 	for (level = oldlevel+incr; level != newlevel+incr; level += incr) {
836 		for (cpup = xcppm_cpu.devlist; cpup; cpup = cpup->next) {
837 			if (cpup->level == level)
838 				continue;
839 			ret = xcppm_change_power_level(cpup, 0, level);
840 			DPRINTF(D_CPU, ("%s: \"%s\", %s to level %d, ret %d\n",
841 			    str, cpup->path, chstr, cpup->level, ret));
842 			if (ret == DDI_SUCCESS)
843 				continue;
844 
845 			/*
846 			 * if the driver was unable to lower cpu speed,
847 			 * the cpu probably got busy; set the previous
848 			 * cpus back to the original level
849 			 */
850 			if (speedup == 0)
851 				xcppm_revert_cpu_power(cpup, level + 1);
852 
853 			if (undo_flag) {
854 				if (speedup)
855 					xcppm_switch_dcdc_converter(
856 					    XCPPM_CLRBIT);
857 				else
858 					xcppm_rio_mode(unitp, XCPPM_CLRBIT);
859 			}
860 			return (ret);
861 		}
862 
863 		index = level - 1;
864 		spm_change_schizo_speed(index);
865 		DPRINTF(D_CPU, ("%s: safari config reg changed\n", str));
866 
867 		/*
868 		 * set the delay times for changing to this rate
869 		 */
870 		data32 = XCPPM_BBC_DELAY(index);
871 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
872 		    (caddr_t)unitp->regs.bbc_assert_change, data32);
873 		DPRINTF(D_CPU, ("%s: %s: Wrote E* Assert Change Time "
874 		    "(t1) = 0x%x\n", str, chstr, data32));
875 
876 		data32 = XCPPM_BBC_DELAY(index);
877 		XCPPM_SETGET32(unitp->hndls.bbc_estar_ctrl,
878 		    (caddr_t)unitp->regs.bbc_pll_settle, data32);
879 		DPRINTF(D_CPU, ("%s: %s: Wrote E* PLL Settle Time "
880 		    "(t4) = 0x%x\n", str, chstr, data32));
881 
882 		data16 = bbc_estar_control_masks[index];
883 		XCPPM_SETGET16(unitp->hndls.bbc_estar_ctrl,
884 		    (caddr_t)unitp->regs.bbc_estar_ctrl, data16);
885 		DPRINTF(D_CPU, ("%s: %s: Wrote BCC E* Control = 0x%x\n",
886 		    str, chstr, data16));
887 	}
888 
889 	/*
890 	 * clear CPU Estar Mode bit in the gpio register
891 	 */
892 	if (speedup) {
893 		if (newlevel == highest)
894 			xcppm_rio_mode(unitp, XCPPM_CLRBIT);
895 	} else {
896 		if (newlevel == lowest)
897 			xcppm_switch_dcdc_converter(XCPPM_CLRBIT);
898 	}
899 
900 	return (DDI_SUCCESS);
901 }
902 
903 
904 /*
905  * Process a request to change the power level of a cpu.  If all cpus
906  * don't want to be at the same power yet, or if we are currently
907  * refusing slowdown requests due to thermal stress, just cache the
908  * request.  Otherwise, make the change for all cpus.
909  */
910 /* ARGSUSED */
911 static int
xcppm_manage_cpus(dev_info_t * dip,power_req_t * reqp,int * result)912 xcppm_manage_cpus(dev_info_t *dip, power_req_t *reqp, int *result)
913 {
914 #ifdef DEBUG
915 	char *str = "xcppm_manage_cpus";
916 #endif
917 	int old, new, ret, kmflag;
918 	ppm_dev_t *ppmd;
919 	pm_ppm_devlist_t *devlist = NULL, *p;
920 	int		do_rescan = 0;
921 	dev_info_t	*rescan_dip;
922 
923 	*result = DDI_SUCCESS;
924 	switch (reqp->request_type) {
925 	case PMR_PPM_SET_POWER:
926 		break;
927 	case PMR_PPM_POWER_CHANGE_NOTIFY:
928 		/* cpu driver can`t change cpu power level by itself */
929 	default:
930 		return (DDI_FAILURE);
931 	}
932 
933 	ppmd = PPM_GET_PRIVATE(dip);
934 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
935 	old = reqp->req.ppm_set_power_req.old_level;
936 	new = reqp->req.ppm_set_power_req.new_level;
937 
938 	/*
939 	 * At power on, the cpus are at full speed.  There is no hardware
940 	 * transition needed for going from unknown to full.  However, the
941 	 * state of the pm framework and cpu driver needs to be adjusted.
942 	 */
943 	if (ppmd->level == PM_LEVEL_UNKNOWN && new == ppmd->highest) {
944 		*result = ret = xcppm_change_power_level(ppmd, 0, new);
945 		if (ret != DDI_SUCCESS) {
946 			DPRINTF(D_CPU, ("%s: Failed to change "
947 			    "power level to %d\n", str, new));
948 		}
949 		return (ret);
950 	}
951 
952 	if (new == ppmd->level) {
953 		DPRINTF(D_CPU, ("%s: already at power level %d\n", str, new));
954 		return (DDI_SUCCESS);
955 	}
956 
957 	ppmd->rplvl = new;
958 
959 	/*
960 	 * A request from lower to higher level transition is granted and
961 	 * made effective on both cpus. For more than two cpu platform model,
962 	 * the following code needs to be modified to remember the rest of
963 	 * the unsoliciting cpus to be rescan'ed.
964 	 * A request from higher to lower must be agreed by all cpus.
965 	 */
966 	for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
967 		if (ppmd->rplvl == new)
968 			continue;
969 
970 		if (new < old) {
971 			DPRINTF(D_SOME, ("%s: not all cpus want to go down to "
972 			    "level %d yet\n", str, new));
973 			return (DDI_SUCCESS);
974 		}
975 
976 		/*
977 		 * If a single cpu requests power up, honor the request
978 		 * by powering up both cpus.
979 		 */
980 		if (new > old) {
981 			DPRINTF(D_SOME, ("%s: powering up device(%s@%s, %p) "
982 			    "because of request from dip(%s@%s, %p), "
983 			    "need pm_rescan\n", str, PM_NAME(ppmd->dip),
984 			    PM_ADDR(ppmd->dip), (void *)ppmd->dip,
985 			    PM_NAME(dip), PM_ADDR(dip), (void *)dip))
986 			do_rescan++;
987 			rescan_dip = ppmd->dip;
988 			break;
989 		}
990 	}
991 
992 	ret = xcppm_change_cpu_power(new);
993 	*result = ret;
994 
995 	if (ret == DDI_SUCCESS) {
996 		if (reqp->req.ppm_set_power_req.canblock == PM_CANBLOCK_BLOCK)
997 			kmflag = KM_SLEEP;
998 		else
999 			kmflag = KM_NOSLEEP;
1000 
1001 		for (ppmd = xcppm_cpu.devlist; ppmd; ppmd = ppmd->next) {
1002 			if (ppmd->dip == dip)
1003 				continue;
1004 
1005 			if ((p = kmem_zalloc(sizeof (pm_ppm_devlist_t),
1006 			    kmflag)) == NULL) {
1007 				break;
1008 			}
1009 			p->ppd_who = ppmd->dip;
1010 			p->ppd_cmpt = ppmd->cmpt;
1011 			p->ppd_old_level = old;
1012 			p->ppd_new_level = new;
1013 			p->ppd_next = devlist;
1014 
1015 			devlist = p;
1016 		}
1017 		reqp->req.ppm_set_power_req.cookie = (void *) devlist;
1018 
1019 		if (do_rescan > 0)
1020 			pm_rescan(rescan_dip);
1021 	}
1022 
1023 	return (ret);
1024 }
1025 
1026 
1027 /*
1028  * If powering off and all devices in this domain will now be off,
1029  * shut off common power.  If powering up and no devices up yet,
1030  * turn on common power.  Always make the requested power level
1031  * change for the target device.
1032  */
1033 static int
xcppm_manage_fet(dev_info_t * dip,power_req_t * reqp,int * result)1034 xcppm_manage_fet(dev_info_t *dip, power_req_t *reqp, int *result)
1035 {
1036 #ifdef DEBUG
1037 	char *str = "xcppm_manage_fet";
1038 #endif
1039 	int (*pwr_func)(ppm_dev_t *, int, int);
1040 	int new, old, cmpt, incr = 0;
1041 	ppm_dev_t *ppmd;
1042 
1043 	ppmd = PPM_GET_PRIVATE(dip);
1044 	DPRINTF(D_FET, ("%s: \"%s\", req %s\n", str,
1045 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1046 
1047 	*result = DDI_SUCCESS;	/* change later for failures */
1048 	switch (reqp->request_type) {
1049 	case PMR_PPM_SET_POWER:
1050 		pwr_func = xcppm_change_power_level;
1051 		old = reqp->req.ppm_set_power_req.old_level;
1052 		new = reqp->req.ppm_set_power_req.new_level;
1053 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1054 		break;
1055 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1056 		pwr_func = xcppm_record_level_change;
1057 		old = reqp->req.ppm_notify_level_req.old_level;
1058 		new = reqp->req.ppm_notify_level_req.new_level;
1059 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1060 		break;
1061 	default:
1062 		return (*result = DDI_FAILURE);
1063 
1064 	}
1065 
1066 	/* This is common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1067 	DPRINTF(D_FET, ("%s: \"%s\", old %d, new %d\n",
1068 	    str, ppmd->path, old, new));
1069 
1070 	ASSERT(old == ppmd->level);
1071 	if (new == ppmd->level)
1072 		return (DDI_SUCCESS);
1073 
1074 	PPM_LOCK_DOMAIN(ppmd->domp);
1075 	/*
1076 	 * Devices in this domain are known to have 0 (off) as their
1077 	 * lowest power level.  We use this fact to simplify the logic.
1078 	 */
1079 	if (new > 0) {
1080 		if (ppmd->domp->pwr_cnt == 0)
1081 			(void) xcppm_gpio_port2(XCPPM_SETBIT, DRVON);
1082 		if (old == 0) {
1083 			ppmd->domp->pwr_cnt++;
1084 			incr = 1;
1085 			DPRINTF(D_FET, ("%s: UP cnt = %d\n",
1086 			    str, ppmd->domp->pwr_cnt));
1087 		}
1088 	}
1089 
1090 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1091 
1092 	ASSERT(ppmd->domp->pwr_cnt > 0);
1093 
1094 	if ((*result = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1095 		DPRINTF(D_FET, ("%s: \"%s\" power change failed \n",
1096 		    str, ppmd->path));
1097 	}
1098 
1099 	PPM_LOCK_DOMAIN(ppmd->domp);
1100 
1101 	/*
1102 	 * Decr the power count in two cases:
1103 	 *
1104 	 *   1) request was to power device down and was successful
1105 	 *   2) request was to power up (we pre-incremented count), but failed.
1106 	 */
1107 	if ((*result == DDI_SUCCESS && ppmd->level == 0) ||
1108 	    (*result != DDI_SUCCESS && incr)) {
1109 		ASSERT(ppmd->domp->pwr_cnt > 0);
1110 		ppmd->domp->pwr_cnt--;
1111 		DPRINTF(D_FET, ("%s: DN cnt = %d\n", str, ppmd->domp->pwr_cnt));
1112 		if (ppmd->domp->pwr_cnt == 0)
1113 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, DRVON);
1114 	}
1115 
1116 	PPM_UNLOCK_DOMAIN(ppmd->domp);
1117 	ASSERT(ppmd->domp->pwr_cnt >= 0);
1118 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1119 }
1120 
1121 
1122 /*
1123  * Since UPA64S relies on PCI B staying at nominal 33MHz in order to
1124  * have its interrupt pulse function properly, we ensure
1125  * - Lowering PCI B only if UPA64S is at low power, otherwise defer
1126  *   the action until UPA64S goes down; hence right after UPA64S goes
1127  *   down, perform the deferred action for PCI B;
1128  * - Always raise PCI B power prior to raising UPA64S power.
1129  *
1130  * Both UPA64S and PCI B devices are considered each other's dependency
1131  * device whenever actual power transition is handled (PMR_PPM_SET_POWER).
1132  */
1133 static int
xcppm_manage_pciupa(dev_info_t * dip,power_req_t * reqp,int * result)1134 xcppm_manage_pciupa(dev_info_t *dip, power_req_t *reqp, int *result)
1135 {
1136 #ifdef DEBUG
1137 	char *str = "xcppm_manage_pciupa";
1138 #endif
1139 	int (*pwr_func)(ppm_dev_t *, int, int);
1140 	uint_t flags = 0, co_flags = 0;
1141 	ppm_dev_t *ppmd, *codev;
1142 	int new, cmpt, retval;
1143 
1144 	ppmd = PPM_GET_PRIVATE(dip);
1145 	DPRINTF(D_PCIUPA, ("%s: \"%s\", req %s\n", str,
1146 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1147 
1148 	*result = DDI_SUCCESS;
1149 
1150 	switch (reqp->request_type) {
1151 	case PMR_PPM_SET_POWER:
1152 		pwr_func = xcppm_change_power_level;
1153 		new = reqp->req.ppm_set_power_req.new_level;
1154 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1155 		break;
1156 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1157 		pwr_func = xcppm_record_level_change;
1158 		new = reqp->req.ppm_notify_level_req.new_level;
1159 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1160 		break;
1161 	default:
1162 		*result = DDI_FAILURE;
1163 		return (DDI_FAILURE);
1164 	}
1165 
1166 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1167 	ASSERT(ppmd);	/* since it should be locked already */
1168 
1169 	if (new == ppmd->level)
1170 		return (DDI_SUCCESS);
1171 
1172 	DPRINTF(D_PCIUPA, ("%s: \"%s\", levels: current %d, new %d\n",
1173 	    str, ppmd->path, ppmd->level, new));
1174 
1175 	/*
1176 	 * find power-wise co-related device
1177 	 */
1178 	flags =  ppmd->flags;
1179 
1180 #ifdef DEBUG
1181 	if (flags & ~(XCPPMF_PCIB|XCPPMF_UPA))
1182 		DPRINTF(D_ERROR, ("%s: invalid ppmd->flags value 0x%x\n", str,
1183 		    ppmd->flags));
1184 #endif
1185 
1186 	if (flags == XCPPMF_UPA)
1187 		co_flags = XCPPMF_PCIB;
1188 	else if (flags == XCPPMF_PCIB)
1189 		co_flags = XCPPMF_UPA;
1190 
1191 	for (codev = ppmd->domp->devlist; codev; codev = codev->next)
1192 		if ((codev->cmpt == 0) && (codev->flags == co_flags))
1193 			break;
1194 
1195 	if (new > ppmd->level) {
1196 		/*
1197 		 * Raise power level -
1198 		 * pre-raising: upa ensure pci is powered up.
1199 		 */
1200 		if ((flags == XCPPMF_UPA) && codev &&
1201 		    (codev->level != codev->highest)) {
1202 			if ((retval = xcppm_change_power_level(codev,
1203 			    0, codev->highest)) != DDI_SUCCESS &&
1204 			    codev->level != codev->highest) {
1205 				*result = retval;
1206 				return (DDI_FAILURE);
1207 			}
1208 		}
1209 		if ((retval = (*pwr_func)(ppmd, 0, new)) != DDI_SUCCESS) {
1210 			*result = retval;
1211 			return (DDI_FAILURE);
1212 		}
1213 	} else if (new < ppmd->level) {
1214 		/*
1215 		 * Lower power level
1216 		 *
1217 		 * once upa is attached, pci checks upa level:
1218 		 * if upa is at high level, defer the request and return.
1219 		 * otherwise, set power level then check and lower pci level.
1220 		 */
1221 		if ((flags == XCPPMF_PCIB) && codev &&
1222 		    (codev->level != codev->lowest)) {
1223 			ppmd->rplvl = new;
1224 			return (DDI_SUCCESS);
1225 		}
1226 		if ((retval = (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1227 		    ppmd->level != new) {
1228 			*result = retval;
1229 			return (DDI_FAILURE);
1230 		}
1231 
1232 		if (flags == XCPPMF_UPA) {
1233 			if (codev && (codev->rplvl != PM_LEVEL_UNKNOWN) &&
1234 			    (codev->rplvl < codev->level)) {
1235 				DPRINTF(D_PCIUPA, ("%s: codev \"%s\" "
1236 				    "rplvl %d level %d\n", str, codev->path,
1237 				    codev->rplvl, codev->level));
1238 				if ((retval = xcppm_change_power_level(
1239 				    codev, 0, codev->rplvl)) != DDI_SUCCESS) {
1240 					*result = retval;
1241 					return (DDI_FAILURE);
1242 				}
1243 			}
1244 		}
1245 	}
1246 
1247 	return (DDI_SUCCESS);
1248 }
1249 
1250 
1251 /*
1252  * When all of the children of the 1394 nexus are idle, a call will be
1253  * made to the nexus driver's own power entry point to lower power.  Ppm
1254  * intercepts this and kills 1394 cable power (since the driver doesn't
1255  * have access to the required register).  Similar logic applies when
1256  * coming up from the state where all the children were off.
1257  */
1258 static int
xcppm_manage_1394(dev_info_t * dip,power_req_t * reqp,int * result)1259 xcppm_manage_1394(dev_info_t *dip, power_req_t *reqp, int *result)
1260 {
1261 #ifdef DEBUG
1262 	char *str = "xcppm_manage_1394";
1263 #endif
1264 	int (*pwr_func)(ppm_dev_t *, int, int);
1265 	int new, old, cmpt;
1266 	ppm_dev_t *ppmd;
1267 
1268 	ppmd = PPM_GET_PRIVATE(dip);
1269 	DPRINTF(D_1394, ("%s: \"%s\", req %s\n", str,
1270 	    ppmd->path, ppm_get_ctlstr(reqp->request_type, ~0)));
1271 
1272 	switch (reqp->request_type) {
1273 	case PMR_PPM_SET_POWER:
1274 		pwr_func = xcppm_change_power_level;
1275 		old = reqp->req.ppm_set_power_req.old_level;
1276 		new = reqp->req.ppm_set_power_req.new_level;
1277 		cmpt = reqp->req.ppm_set_power_req.cmpt;
1278 		break;
1279 	case PMR_PPM_POWER_CHANGE_NOTIFY:
1280 		pwr_func = xcppm_record_level_change;
1281 		old = reqp->req.ppm_notify_level_req.old_level;
1282 		new = reqp->req.ppm_notify_level_req.new_level;
1283 		cmpt = reqp->req.ppm_notify_level_req.cmpt;
1284 		break;
1285 	default:
1286 		return (*result = DDI_FAILURE);
1287 	}
1288 
1289 
1290 	/* Common code for SET_POWER and POWER_CHANGE_NOTIFY cases */
1291 	DPRINTF(D_1394, ("%s: dev %s@%s, old %d new %d\n", str,
1292 	    ddi_binding_name(dip), ddi_get_name_addr(dip), old, new));
1293 
1294 	ASSERT(ppmd);	/* since it must already be locked */
1295 	ASSERT(old == ppmd->level);
1296 
1297 	if (new == ppmd->level)
1298 		return (*result = DDI_SUCCESS);
1299 
1300 	/* the reduce power case */
1301 	if (cmpt == 0 && new < ppmd->level) {
1302 		if ((*result =
1303 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS) {
1304 			return (DDI_FAILURE);
1305 		}
1306 		if (new == ppmd->lowest)
1307 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1308 		ppmd->level = new;
1309 		return (DDI_SUCCESS);
1310 	}
1311 
1312 	/* the increase power case */
1313 	if (cmpt == 0 && new > ppmd->level) {
1314 		if (ppmd->level == ppmd->lowest) {
1315 			(void) xcppm_gpio_port2(XCPPM_SETBIT, CPEN);
1316 			delay(1);
1317 		}
1318 		/*
1319 		 * Even if pwr_func fails we need to check current level again
1320 		 * because it could have been changed by an intervening
1321 		 * POWER_CHANGE_NOTIFY operation.
1322 		 */
1323 		if ((*result =
1324 		    (*pwr_func)(ppmd, cmpt, new)) != DDI_SUCCESS &&
1325 		    ppmd->level == ppmd->lowest) {
1326 			(void) xcppm_gpio_port2(XCPPM_CLRBIT, CPEN);
1327 		} else {
1328 			ppmd->level = new;
1329 		}
1330 
1331 		return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1332 	}
1333 
1334 	/*
1335 	 * We get here if component was non-zero.  This is not what we
1336 	 * expect.  Let the device deal with it and just pass back the
1337 	 * result.
1338 	 */
1339 	*result = xcppm_change_power_level(ppmd, cmpt, new);
1340 	return (*result == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
1341 }
1342 
1343 
1344 /*
1345  * lock, unlock, or trylock for one power mutex
1346  */
1347 static void
xcppm_lock_one(ppm_dev_t * ppmd,power_req_t * reqp,int * iresp)1348 xcppm_lock_one(ppm_dev_t *ppmd, power_req_t *reqp, int *iresp)
1349 {
1350 	switch (reqp->request_type) {
1351 	case PMR_PPM_LOCK_POWER:
1352 		pm_lock_power_single(ppmd->dip);
1353 		break;
1354 
1355 	case PMR_PPM_UNLOCK_POWER:
1356 		pm_unlock_power_single(ppmd->dip);
1357 		break;
1358 
1359 	case PMR_PPM_TRY_LOCK_POWER:
1360 		*iresp = pm_try_locking_power_single(ppmd->dip);
1361 		break;
1362 	}
1363 }
1364 
1365 
1366 /*
1367  * lock, unlock, or trylock all devices within a domain.
1368  */
1369 static void
xcppm_lock_all(ppm_domain_t * domp,power_req_t * reqp,int * iresp)1370 xcppm_lock_all(ppm_domain_t *domp, power_req_t *reqp, int *iresp)
1371 {
1372 	/*
1373 	 * To simplify the implementation we let all the devices
1374 	 * in the domain be represented by a single device (dip).
1375 	 * We use the first device in the domain's devlist.  This
1376 	 * is safe because we return with the domain lock held
1377 	 * which prevents the list from changing.
1378 	 */
1379 	if (reqp->request_type == PMR_PPM_LOCK_POWER) {
1380 		if (!MUTEX_HELD(&domp->lock))
1381 			mutex_enter(&domp->lock);
1382 		domp->refcnt++;
1383 		ASSERT(domp->devlist != NULL);
1384 		pm_lock_power_single(domp->devlist->dip);
1385 		/* domain lock remains held */
1386 		return;
1387 	} else if (reqp->request_type == PMR_PPM_UNLOCK_POWER) {
1388 		ASSERT(MUTEX_HELD(&domp->lock));
1389 		ASSERT(domp->devlist != NULL);
1390 		pm_unlock_power_single(domp->devlist->dip);
1391 		if (--domp->refcnt == 0)
1392 			mutex_exit(&domp->lock);
1393 		return;
1394 	}
1395 
1396 	ASSERT(reqp->request_type == PMR_PPM_TRY_LOCK_POWER);
1397 	if (!MUTEX_HELD(&domp->lock))
1398 		if (!mutex_tryenter(&domp->lock)) {
1399 			*iresp = 0;
1400 			return;
1401 		}
1402 	*iresp = pm_try_locking_power_single(domp->devlist->dip);
1403 	if (*iresp)
1404 		domp->refcnt++;
1405 	else
1406 		mutex_exit(&domp->lock);
1407 }
1408 
1409 
1410 /*
1411  * The pm framework calls us here to manage power for a device.
1412  * We maintain state which tells us whether we need to turn off/on
1413  * system board power components based on the status of all the devices
1414  * sharing a component.
1415  *
1416  */
1417 /* ARGSUSED */
1418 static int
xcppm_ctlops(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t ctlop,void * arg,void * result)1419 xcppm_ctlops(dev_info_t *dip, dev_info_t *rdip,
1420     ddi_ctl_enum_t ctlop, void *arg, void *result)
1421 {
1422 	power_req_t *reqp = arg;
1423 	xcppm_unit_t *unitp;
1424 	ppm_domain_t *domp;
1425 	ppm_dev_t *ppmd;
1426 
1427 #ifdef DEBUG
1428 	char path[MAXPATHLEN], *ctlstr, *str = "xcppm_ctlops";
1429 	uint_t mask = ppm_debug & (D_CTLOPS1 | D_CTLOPS2);
1430 	if (mask && (ctlstr = ppm_get_ctlstr(reqp->request_type, mask))) {
1431 		prom_printf("%s: \"%s\", %s\n", str,
1432 		    ddi_pathname(rdip, path), ctlstr);
1433 	}
1434 #endif
1435 
1436 	if (ctlop != DDI_CTLOPS_POWER)
1437 		return (DDI_FAILURE);
1438 
1439 	switch (reqp->request_type) {
1440 	case PMR_PPM_UNMANAGE:
1441 	case PMR_PPM_PRE_PROBE:
1442 	case PMR_PPM_POST_PROBE:
1443 	case PMR_PPM_PRE_ATTACH:
1444 	case PMR_PPM_PRE_DETACH:
1445 		return (DDI_SUCCESS);
1446 
1447 	/*
1448 	 * There is no hardware configuration required to be done on this
1449 	 * platform prior to installing drivers.
1450 	 */
1451 	case PMR_PPM_INIT_CHILD:
1452 	case PMR_PPM_UNINIT_CHILD:
1453 		return (DDI_SUCCESS);
1454 
1455 	case PMR_PPM_ALL_LOWEST:
1456 		DPRINTF(D_LOWEST, ("%s: all devices at lowest power = %d\n",
1457 		    str, reqp->req.ppm_all_lowest_req.mode));
1458 		if (reqp->req.ppm_all_lowest_req.mode == PM_ALL_LOWEST) {
1459 			unitp = ddi_get_soft_state(ppm_statep, ppm_inst);
1460 			mutex_enter(&unitp->unit_lock);
1461 			if (unitp->state & XCPPM_ST_SUSPENDED) {
1462 				mutex_exit(&unitp->unit_lock);
1463 				return (DDI_SUCCESS);
1464 			}
1465 
1466 			xcppm_set_led(PPM_LEDON);
1467 			unitp->led_tid = timeout(xcppm_blink_led,
1468 			    (void *)PPM_LEDON, PPM_LEDON_INTERVAL);
1469 			mutex_exit(&unitp->unit_lock);
1470 			DPRINTF(D_LOWEST, ("%s: LED blink started\n", str));
1471 		} else {
1472 			xcppm_freeze_led((void *)PPM_LEDON);
1473 			DPRINTF(D_LOWEST, ("%s: LED freeze ON\n", str));
1474 		}
1475 		return (DDI_SUCCESS);
1476 
1477 	case PMR_PPM_POST_ATTACH:
1478 		/*
1479 		 * After a successful attach, if we haven't already created
1480 		 * our private data structure for this device, ppm_get_dev()
1481 		 * will force it to be created.
1482 		 */
1483 		ppmd = PPM_GET_PRIVATE(rdip);
1484 		if (reqp->req.ppm_config_req.result != DDI_SUCCESS) {
1485 			if (ppmd)
1486 				ppm_rem_dev(rdip);
1487 		} else if (!ppmd) {
1488 			domp = ppm_lookup_dev(rdip);
1489 			ASSERT(domp);
1490 			(void) ppm_get_dev(rdip, domp);
1491 		}
1492 		return (DDI_SUCCESS);
1493 
1494 	case PMR_PPM_POST_DETACH:
1495 		xcppm_detach_ctlop(rdip, reqp);
1496 		*(int *)result = DDI_SUCCESS;
1497 		return (DDI_SUCCESS);
1498 
1499 	case PMR_PPM_PRE_RESUME:
1500 		xcppm_resume_ctlop(rdip, reqp);
1501 		return (DDI_SUCCESS);
1502 
1503 	case PMR_PPM_UNLOCK_POWER:
1504 	case PMR_PPM_TRY_LOCK_POWER:
1505 	case PMR_PPM_LOCK_POWER:
1506 		ppmd = PPM_GET_PRIVATE(rdip);
1507 		if (ppmd)
1508 			domp = ppmd->domp;
1509 		else if (reqp->request_type != PMR_PPM_UNLOCK_POWER) {
1510 			domp = ppm_lookup_dev(rdip);
1511 			ASSERT(domp);
1512 			ppmd = ppm_get_dev(rdip, domp);
1513 		}
1514 
1515 		ASSERT(domp->dflags == PPMD_LOCK_ALL ||
1516 		    domp->dflags == PPMD_LOCK_ONE);
1517 		DPRINTF(D_LOCKS, ("xcppm_lock_%s: \"%s\", %s\n",
1518 		    (domp->dflags == PPMD_LOCK_ALL) ? "all" : "one",
1519 		    ppmd->path, ppm_get_ctlstr(reqp->request_type, D_LOCKS)));
1520 
1521 		if (domp->dflags == PPMD_LOCK_ALL)
1522 			xcppm_lock_all(domp, reqp, result);
1523 		else
1524 			xcppm_lock_one(ppmd, reqp, result);
1525 		return (DDI_SUCCESS);
1526 
1527 	case PMR_PPM_POWER_LOCK_OWNER:
1528 		ASSERT(reqp->req.ppm_power_lock_owner_req.who == rdip);
1529 		ppmd = PPM_GET_PRIVATE(rdip);
1530 		if (ppmd)
1531 			domp = ppmd->domp;
1532 		else {
1533 			domp = ppm_lookup_dev(rdip);
1534 			ASSERT(domp);
1535 			ppmd = ppm_get_dev(rdip, domp);
1536 		}
1537 
1538 		/*
1539 		 * In case of LOCK_ALL, effective owner of the power lock
1540 		 * is the owner of the domain lock. otherwise, it is the owner
1541 		 * of the power lock.
1542 		 */
1543 		if (domp->dflags & PPMD_LOCK_ALL)
1544 			reqp->req.ppm_power_lock_owner_req.owner =
1545 			    mutex_owner(&domp->lock);
1546 		else {
1547 			reqp->req.ppm_power_lock_owner_req.owner =
1548 			    DEVI(rdip)->devi_busy_thread;
1549 		}
1550 		return (DDI_SUCCESS);
1551 
1552 	default:
1553 		ppmd = PPM_GET_PRIVATE(rdip);
1554 		if (ppmd == NULL) {
1555 			domp = ppm_lookup_dev(rdip);
1556 			ASSERT(domp);
1557 			ppmd = ppm_get_dev(rdip, domp);
1558 		}
1559 
1560 #ifdef DEBUG
1561 		if ((reqp->request_type == PMR_PPM_SET_POWER) &&
1562 		    (ppm_debug & D_SETPWR)) {
1563 			prom_printf("%s: \"%s\", PMR_PPM_SET_POWER\n",
1564 			    str, ppmd->path);
1565 		}
1566 #endif
1567 
1568 		if (ppmd->domp == &xcppm_cpu)
1569 			return (xcppm_manage_cpus(rdip, reqp, result));
1570 		else if (ppmd->domp == &xcppm_fet)
1571 			return (xcppm_manage_fet(rdip, reqp, result));
1572 		else if (ppmd->domp == &xcppm_upa)
1573 			return (xcppm_manage_pciupa(rdip, reqp, result));
1574 		else {
1575 			ASSERT(ppmd->domp == &xcppm_1394);
1576 			return (xcppm_manage_1394(rdip, reqp, result));
1577 		}
1578 	}
1579 }
1580 
1581 
1582 /*
1583  * Initialize our private version of real power level
1584  * as well as lowest and highest levels the device supports;
1585  * see ppmf and ppm_add_dev
1586  */
1587 static void
xcppm_dev_init(ppm_dev_t * ppmd)1588 xcppm_dev_init(ppm_dev_t *ppmd)
1589 {
1590 	struct pm_component *dcomps;
1591 	struct pm_comp *pm_comp;
1592 	dev_info_t *dip;
1593 	int maxi;
1594 
1595 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1596 	ppmd->level = PM_LEVEL_UNKNOWN;
1597 	ppmd->rplvl = PM_LEVEL_UNKNOWN;
1598 
1599 	dip = ppmd->dip;
1600 	/*
1601 	 * ppm exists to handle power-manageable devices which require
1602 	 * special handling on the current platform.  However, a
1603 	 * driver for such a device may choose not to support power
1604 	 * management on a particular load/attach.  In this case we
1605 	 * we create a structure to represent a single-component device
1606 	 * for which "level" = PM_LEVEL_UNKNOWN and "lowest" = 0
1607 	 * are effectively constant.
1608 	 */
1609 	if (PM_GET_PM_INFO(dip)) {
1610 		dcomps = DEVI(dip)->devi_pm_components;
1611 		pm_comp = &dcomps[ppmd->cmpt].pmc_comp;
1612 
1613 		ppmd->lowest = pm_comp->pmc_lvals[0];
1614 		ASSERT(ppmd->lowest >= 0);
1615 		maxi = pm_comp->pmc_numlevels - 1;
1616 		ppmd->highest = pm_comp->pmc_lvals[maxi];
1617 	}
1618 
1619 	/*
1620 	 * add any domain-specific initialization here
1621 	 */
1622 	if (ppmd->domp == &xcppm_fet) {
1623 		/*
1624 		 * when a new device is added to domain_powefet
1625 		 * it is counted here as being powered up.
1626 		 */
1627 		ppmd->domp->pwr_cnt++;
1628 		DPRINTF(D_FET, ("xcppm_dev_init: UP cnt = %d\n",
1629 		    ppmd->domp->pwr_cnt));
1630 	} else if (ppmd->domp == &xcppm_upa) {
1631 		/*
1632 		 * There may be a better way to determine the device type
1633 		 * instead of comparing to hard coded string names.
1634 		 */
1635 		if (strstr(ppmd->path, "pci@8,700000"))
1636 			ppmd->flags = XCPPMF_PCIB;
1637 		else if (strstr(ppmd->path, "upa@8,480000"))
1638 			ppmd->flags = XCPPMF_UPA;
1639 	}
1640 }
1641 
1642 
1643 /*
1644  * see ppmf and ppm_rem_dev
1645  */
1646 static void
xcppm_dev_fini(ppm_dev_t * ppmd)1647 xcppm_dev_fini(ppm_dev_t *ppmd)
1648 {
1649 	ASSERT(MUTEX_HELD(&ppmd->domp->lock));
1650 	if (ppmd->domp == &xcppm_fet) {
1651 		if (ppmd->level != ppmd->lowest) {
1652 			ppmd->domp->pwr_cnt--;
1653 			DPRINTF(D_FET, ("xcppm_dev_fini: DN cnt = %d\n",
1654 			    ppmd->domp->pwr_cnt));
1655 		};
1656 	}
1657 }
1658 
1659 
1660 /*
1661  * see ppmf and ppm_ioctl, PPMIOCSET
1662  */
1663 static void
xcppm_iocset(uint8_t value)1664 xcppm_iocset(uint8_t value)
1665 {
1666 	int action;
1667 
1668 	if (value == PPM_IDEV_POWER_ON)
1669 		action = XCPPM_SETBIT;
1670 	else if (value == PPM_IDEV_POWER_OFF)
1671 		action = XCPPM_CLRBIT;
1672 	(void) xcppm_gpio_port2(action, DRVON);
1673 }
1674 
1675 
1676 /*
1677  * see ppmf and ppm_ioctl, PPMIOCGET
1678  */
1679 static uint8_t
xcppm_iocget(void)1680 xcppm_iocget(void)
1681 {
1682 	uint8_t bit;
1683 
1684 	bit = xcppm_gpio_port2(XCPPM_GETBIT, DRVON);
1685 	return ((bit == DRVON) ? PPM_IDEV_POWER_ON : PPM_IDEV_POWER_OFF);
1686 }
1687