xref: /titanic_52/usr/src/uts/sun4u/io/pci/pci_pwr.c (revision 9512fe850e98fdd448c638ca63fdd92a8a510255)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/kmem.h>
31 #include <sys/async.h>
32 #include <sys/sysmacros.h>
33 #include <sys/sunddi.h>
34 #include <sys/sunndi.h>
35 #include <sys/ddi_impldefs.h>
36 #include <sys/ddi_implfuncs.h>
37 #include <sys/pci/pci_obj.h>
38 #include <sys/pci/pci_pwr.h>
39 #include <sys/pci.h>
40 
41 static void pci_pwr_update_comp(pci_pwr_t *pwr_p, pci_pwr_chld_t *p, int comp,
42 	int lvl);
43 
44 #ifdef DEBUG
45 static char *pci_pwr_bus_label[] = {"PM_LEVEL_B3", "PM_LEVEL_B2", \
46 	"PM_LEVEL_B1", "PM_LEVEL_B0"};
47 #endif
48 
49 /*LINTLIBRARY*/
50 
51 /*
52  * Retreive the pci_pwr_chld_t structure for a given devinfo node.
53  */
54 pci_pwr_chld_t *
55 pci_pwr_get_info(pci_pwr_t *pwr_p, dev_info_t *dip)
56 {
57 	pci_pwr_chld_t *p;
58 
59 	ASSERT(PM_CAPABLE(pwr_p));
60 	ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex));
61 
62 	for (p = pwr_p->pwr_info; p != NULL; p = p->next) {
63 		if (p->dip == dip) {
64 
65 			return (p);
66 		}
67 	}
68 
69 	cmn_err(CE_PANIC, "unable to find pwr info data for %s@%s",
70 	    ddi_node_name(dip), ddi_get_name_addr(dip));
71 
72 	/*NOTREACHED*/
73 	return (NULL);
74 }
75 
76 /*
77  * Create a pci_pwr_chld_t structure for a given devinfo node.
78  */
79 void
80 pci_pwr_create_info(pci_pwr_t *pwr_p, dev_info_t *dip)
81 {
82 	pci_pwr_chld_t *p;
83 
84 	ASSERT(PM_CAPABLE(pwr_p));
85 
86 	DEBUG2(DBG_PWR, ddi_get_parent(dip), "ADDING NEW PWR_INFO %s@%s\n",
87 	    ddi_node_name(dip), ddi_get_name_addr(dip));
88 
89 	p = kmem_zalloc(sizeof (struct pci_pwr_chld), KM_SLEEP);
90 	p->dip = dip;
91 
92 	mutex_enter(&pwr_p->pwr_mutex);
93 
94 	/*
95 	 * Until components are created for this device, bus
96 	 * should be at full power since power of child device
97 	 * is unknown.  Increment # children requiring "full power"
98 	 */
99 	p->flags |= PWR_FP_HOLD;
100 	pwr_p->pwr_fp++;
101 
102 	p->next =  pwr_p->pwr_info;
103 	pwr_p->pwr_info = p;
104 
105 	pci_pwr_change(pwr_p, pwr_p->current_lvl, pci_pwr_new_lvl(pwr_p));
106 
107 	mutex_exit(&pwr_p->pwr_mutex);
108 }
109 
110 void
111 pci_pwr_rm_info(pci_pwr_t *pwr_p, dev_info_t *cdip)
112 {
113 	pci_pwr_chld_t **prev_infop;
114 	pci_pwr_chld_t *infop = NULL;
115 	int i;
116 
117 	ASSERT(PM_CAPABLE(pwr_p));
118 
119 	mutex_enter(&pwr_p->pwr_mutex);
120 
121 	for (prev_infop = &pwr_p->pwr_info; *prev_infop != NULL;
122 	    prev_infop = &((*prev_infop)->next)) {
123 		if ((*prev_infop)->dip == cdip) {
124 			infop = *prev_infop;
125 			break;
126 		}
127 	}
128 
129 	if (infop == NULL) {
130 
131 		mutex_exit(&pwr_p->pwr_mutex);
132 		return;
133 	}
134 
135 	*prev_infop =  infop->next;
136 
137 	/*
138 	 * Remove any reference counts for this child.
139 	 */
140 	if (infop->comp_pwr != NULL) {
141 		for (i = 0; i < infop->num_comps; i++) {
142 			pci_pwr_update_comp(pwr_p, infop, i, PM_LEVEL_NOLEVEL);
143 		}
144 
145 		kmem_free(infop->comp_pwr, sizeof (int) * infop->num_comps);
146 	}
147 
148 	if (infop->flags & PWR_FP_HOLD) {
149 		pwr_p->pwr_fp--;
150 	}
151 
152 	pci_pwr_change(pwr_p, pwr_p->current_lvl, pci_pwr_new_lvl(pwr_p));
153 	mutex_exit(&pwr_p->pwr_mutex);
154 	kmem_free(infop, sizeof (struct pci_pwr_chld));
155 }
156 
157 /*
158  * Allocate space for component state information in pci_pwr_chld_t
159  */
160 void
161 pci_pwr_add_components(pci_pwr_t *pwr_p, dev_info_t *cdip, pci_pwr_chld_t *p)
162 {
163 	int num_comps = PM_NUMCMPTS(cdip);
164 	int i;
165 
166 	ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex));
167 	/*
168 	 * Assume the power level of a component is UNKNOWN until
169 	 * notified otherwise.
170 	 */
171 	if (num_comps > 0) {
172 		p->comp_pwr =
173 		    kmem_alloc(sizeof (int) * num_comps, KM_SLEEP);
174 		p->num_comps = num_comps;
175 
176 		DEBUG3(DBG_PWR, ddi_get_parent(cdip),
177 		    "ADDING %d COMPONENTS FOR %s@%s\n", num_comps,
178 		    ddi_node_name(cdip), ddi_get_name_addr(cdip));
179 	} else {
180 		cmn_err(CE_WARN, "%s%d device has %d components",
181 		    ddi_driver_name(cdip), ddi_get_instance(cdip),
182 		    num_comps);
183 
184 		return;
185 	}
186 
187 	/*
188 	 * Release the fp hold that was made when the device
189 	 * was created.
190 	 */
191 	ASSERT((p->flags & PWR_FP_HOLD) == PWR_FP_HOLD);
192 	p->flags &= ~PWR_FP_HOLD;
193 	pwr_p->pwr_fp--;
194 
195 	for (i = 0; i < num_comps; i++) {
196 		/*
197 		 * Initialize the component lvl so that the
198 		 * state reference counts will be updated correctly.
199 		 */
200 		p->comp_pwr[i] = PM_LEVEL_NOLEVEL;
201 		pci_pwr_update_comp(pwr_p, p, i, PM_LEVEL_UNKNOWN);
202 	}
203 }
204 
205 /*
206  * Update the current power level for component.  Then adjust the
207  * bus reference counter for given state.
208  */
209 static void
210 pci_pwr_update_comp(pci_pwr_t *pwr_p, pci_pwr_chld_t *p, int comp,
211 			int lvl)
212 {
213 	ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex));
214 
215 	/*
216 	 * Remove old pwr state count for old PM level.
217 	 */
218 	switch (p->comp_pwr[comp]) {
219 	case PM_LEVEL_UNKNOWN:
220 		pwr_p->pwr_uk--;
221 		p->u01--;
222 		ASSERT(pwr_p->pwr_uk >= 0);
223 		break;
224 	case PM_LEVEL_D0:
225 		pwr_p->pwr_d0--;
226 		p->u01--;
227 		ASSERT(pwr_p->pwr_d0 >= 0);
228 		break;
229 	case PM_LEVEL_D1:
230 		pwr_p->pwr_d1--;
231 		p->u01--;
232 		ASSERT(pwr_p->pwr_d1 >= 0);
233 		break;
234 	case PM_LEVEL_D2:
235 		pwr_p->pwr_d2--;
236 		ASSERT(pwr_p->pwr_d2 >= 0);
237 		break;
238 	case PM_LEVEL_D3:
239 		pwr_p->pwr_d3--;
240 		ASSERT(pwr_p->pwr_d3 >= 0);
241 		break;
242 	default:
243 		break;
244 	}
245 
246 	p->comp_pwr[comp] = lvl;
247 	/*
248 	 * Add new pwr state count for the new PM level.
249 	 */
250 	switch (lvl) {
251 	case PM_LEVEL_UNKNOWN:
252 		pwr_p->pwr_uk++;
253 		p->u01++;
254 		break;
255 	case PM_LEVEL_D0:
256 		pwr_p->pwr_d0++;
257 		p->u01++;
258 		break;
259 	case PM_LEVEL_D1:
260 		pwr_p->pwr_d1++;
261 		p->u01++;
262 		break;
263 	case PM_LEVEL_D2:
264 		pwr_p->pwr_d2++;
265 		break;
266 	case PM_LEVEL_D3:
267 		pwr_p->pwr_d3++;
268 		break;
269 	default:
270 		break;
271 	}
272 
273 }
274 
275 /*
276  * Knowing the current state of all devices on the bus, return the
277  * appropriate supported bus speed.
278  */
279 int
280 pci_pwr_new_lvl(pci_pwr_t *pwr_p)
281 {
282 	int b_lvl;
283 
284 	ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex));
285 
286 	if (pwr_p->pwr_fp > 0) {
287 		DEBUG1(DBG_PWR, pwr_p->pwr_dip, "new_lvl: "
288 		    "returning PM_LEVEL_B0 pwr_fp = %d\n", pwr_p->pwr_fp);
289 
290 		return (PM_LEVEL_B0);
291 	}
292 
293 	/*
294 	 * If any components are at unknown power levels, the
295 	 * highest power level has to be assumed for the device (D0).
296 	 */
297 	if (pwr_p->pwr_uk > 0) {
298 		DEBUG1(DBG_PWR, pwr_p->pwr_dip, "new_lvl: unknown "
299 		    "count is %d. returning PM_LEVEL_B0\n", pwr_p->pwr_uk);
300 
301 		return (PM_LEVEL_B0);
302 	}
303 
304 	/*
305 	 * Find the lowest theoretical level
306 	 * the bus can operate at.
307 	 */
308 	if (pwr_p->pwr_d0 > 0) {
309 		b_lvl = PM_LEVEL_B0;
310 		DEBUG1(DBG_PWR, pwr_p->pwr_dip,
311 		    "new_lvl: PM_LEVEL_B0 d0 count = %d\n",
312 		    pwr_p->pwr_d0);
313 	} else if (pwr_p->pwr_d1 > 0) {
314 		b_lvl = PM_LEVEL_B1;
315 		DEBUG1(DBG_PWR, pwr_p->pwr_dip,
316 		    "new_lvl: PM_LEVEL_B1 d1 count = %d\n",
317 		    pwr_p->pwr_d1);
318 	} else if (pwr_p->pwr_d2 > 0) {
319 		b_lvl = PM_LEVEL_B2;
320 		DEBUG1(DBG_PWR, pwr_p->pwr_dip,
321 		    "new_lvl: PM_LEVEL_B2 d2 count = %d\n",
322 		    pwr_p->pwr_d2);
323 	} else if (pwr_p->pwr_d3 > 0) {
324 		b_lvl = PM_LEVEL_B3;
325 		DEBUG1(DBG_PWR, pwr_p->pwr_dip,
326 		    "new_lvl: PM_LEVEL_B3 d3 count = %d\n",
327 		    pwr_p->pwr_d3);
328 	} else {
329 		DEBUG0(DBG_PWR, pwr_p->pwr_dip,
330 		    "new_lvl: PM_LEVEL_B3: all counts are 0\n");
331 		b_lvl = PM_LEVEL_B3;
332 	}
333 
334 	/*
335 	 * Now find the closest supported level available.
336 	 * If the level isn't available, have to find the
337 	 * next highest power level (or lowest in B# terms).
338 	 */
339 	switch (b_lvl) {
340 	case PM_LEVEL_B3:
341 		if (pwr_p->pwr_flags & PCI_PWR_B3_CAPABLE) {
342 			break;
343 		}
344 		/*FALLTHROUGH*/
345 	case PM_LEVEL_B2:
346 		if (pwr_p->pwr_flags & PCI_PWR_B2_CAPABLE) {
347 			b_lvl = PM_LEVEL_B2;
348 			break;
349 		}
350 		/*FALLTHROUGH*/
351 	case PM_LEVEL_B1:
352 		if (pwr_p->pwr_flags & PCI_PWR_B1_CAPABLE) {
353 			b_lvl = PM_LEVEL_B1;
354 			break;
355 		}
356 		/*FALLTHROUGH*/
357 	case PM_LEVEL_B0:
358 		/*
359 		 * This level always supported
360 		 */
361 		b_lvl = PM_LEVEL_B0;
362 		break;
363 	}
364 	DEBUG1(DBG_PWR, pwr_p->pwr_dip,
365 	    "new_lvl: Adjusted Level is %s\n",
366 	    pci_pwr_bus_label[b_lvl]);
367 
368 	return (b_lvl);
369 
370 }
371 
372 int
373 pci_raise_power(pci_pwr_t *pwr_p, int current, int new, void *impl_arg,
374     pm_bp_nexus_pwrup_t bpn)
375 {
376 	int ret = DDI_SUCCESS, pwrup_res;
377 
378 	ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex));
379 
380 	pci_pwr_component_busy(pwr_p);
381 	mutex_exit(&pwr_p->pwr_mutex);
382 	ret = pm_busop_bus_power(pwr_p->pwr_dip, impl_arg,
383 	    BUS_POWER_NEXUS_PWRUP, (void *) &bpn,
384 	    (void *) &pwrup_res);
385 	if (ret != DDI_SUCCESS || pwrup_res != DDI_SUCCESS) {
386 		mutex_enter(&pwr_p->pwr_mutex);
387 		pci_pwr_component_idle(pwr_p);
388 		mutex_exit(&pwr_p->pwr_mutex);
389 		cmn_err(CE_WARN, "%s%d pci_raise_power failed",
390 		    ddi_driver_name(pwr_p->pwr_dip),
391 		    ddi_get_instance(pwr_p->pwr_dip));
392 	}
393 
394 	return (ret);
395 }
396 
397 int
398 pci_pwr_ops(pci_pwr_t *pwr_p, dev_info_t *dip, void *impl_arg,
399     pm_bus_power_op_t op, void *arg, void *result)
400 {
401 	pci_pwr_chld_t *p_chld;
402 	pm_bp_nexus_pwrup_t bpn;
403 	pm_bp_child_pwrchg_t *bpc = (pm_bp_child_pwrchg_t *)arg;
404 	dev_info_t *rdip = bpc->bpc_dip;
405 	int new_level, *res = (int *)result, ret = DDI_SUCCESS;
406 
407 	mutex_enter(&pwr_p->pwr_mutex);
408 	switch (op) {
409 	case BUS_POWER_HAS_CHANGED:
410 		p_chld = pci_pwr_get_info(pwr_p, rdip);
411 		DEBUG5(DBG_PWR, dip, "%s@%s CHANGED_POWER cmp = %d "
412 		    "old = %d new = %d\n",
413 			ddi_node_name(rdip), ddi_get_name_addr(rdip),
414 		    bpc->bpc_comp, bpc->bpc_olevel, bpc->bpc_nlevel);
415 
416 		if (*res == DDI_FAILURE) {
417 			DEBUG0(DBG_PWR, rdip, "changed_power_req FAILED\n");
418 			break;
419 		} else {
420 
421 			/*
422 			 * pci_pwr_add_components must be called here if
423 			 * comp_pwr hasn't been set up yet.  It has to be done
424 			 * here rather than in post-attach, since it is possible
425 			 * for power() of child to get called before attach
426 			 * completes.
427 			 */
428 			if (p_chld->comp_pwr == NULL)
429 				pci_pwr_add_components(pwr_p, rdip, p_chld);
430 
431 			pci_pwr_update_comp(pwr_p, p_chld,
432 			    bpc->bpc_comp, bpc->bpc_nlevel);
433 		}
434 
435 		new_level = pci_pwr_new_lvl(pwr_p);
436 		bpn.bpn_dip = pwr_p->pwr_dip;
437 		bpn.bpn_comp = PCI_PM_COMP_0;
438 		bpn.bpn_level = new_level;
439 		bpn.bpn_private = bpc->bpc_private;
440 
441 		if (new_level > pwr_p->current_lvl)
442 			return (pci_raise_power(pwr_p, pwr_p->current_lvl,
443 			    new_level, impl_arg, bpn));
444 		else
445 			pci_pwr_change(pwr_p, pwr_p->current_lvl,
446 			    new_level);
447 		break;
448 
449 	case BUS_POWER_PRE_NOTIFICATION:
450 		DEBUG5(DBG_PWR, dip, "PRE %s@%s cmp = %d old = %d "
451 		    "new = %d. TEMP FULL POWER\n",
452 		    ddi_node_name(rdip), ddi_get_name_addr(rdip),
453 		    bpc->bpc_comp, bpc->bpc_olevel, bpc->bpc_nlevel);
454 
455 		/*
456 		 * Any state changes require that the bus be at full
457 		 * power (B0) so that the device configuration
458 		 * registers can be accessed.  Make a fp hold here
459 		 * so device remains at full power during power
460 		 * configuration.
461 		 */
462 
463 		pwr_p->pwr_fp++;
464 		DEBUG1(DBG_PWR, pwr_p->pwr_dip,
465 		    "incremented fp is %d in PRE_NOTE\n\n", pwr_p->pwr_fp);
466 
467 		bpn.bpn_dip = pwr_p->pwr_dip;
468 		bpn.bpn_comp = PCI_PM_COMP_0;
469 		bpn.bpn_level = PM_LEVEL_B0;
470 		bpn.bpn_private = bpc->bpc_private;
471 
472 		if (PM_LEVEL_B0 > pwr_p->current_lvl)
473 			return (pci_raise_power(pwr_p, pwr_p->current_lvl,
474 			    PM_LEVEL_B0, impl_arg, bpn));
475 
476 		break;
477 
478 	case BUS_POWER_POST_NOTIFICATION:
479 		p_chld = pci_pwr_get_info(pwr_p, rdip);
480 		DEBUG5(DBG_PWR, dip, "POST %s@%s cmp = %d old = %d new = %d\n",
481 		    ddi_node_name(rdip), ddi_get_name_addr(rdip),
482 		    bpc->bpc_comp, bpc->bpc_olevel, bpc->bpc_nlevel);
483 
484 		if (*res == DDI_FAILURE) {
485 			DEBUG0(DBG_PWR, rdip, "child's power routine FAILED\n");
486 		} else {
487 
488 			/*
489 			 * pci_pwr_add_components must be called here if
490 			 * comp_pwr hasen't been set up yet.  It has to be done
491 			 * here rather than in post-attach, since it is possible
492 			 * for power() of child to get called before attach
493 			 * completes.
494 			 */
495 			if (p_chld->comp_pwr == NULL)
496 				pci_pwr_add_components(pwr_p, rdip, p_chld);
497 
498 			pci_pwr_update_comp(pwr_p, p_chld,
499 			    bpc->bpc_comp, bpc->bpc_nlevel);
500 
501 		}
502 
503 		pwr_p->pwr_fp--;
504 		DEBUG1(DBG_PWR, pwr_p->pwr_dip,
505 		    "decremented fp is %d in POST_NOTE\n\n", pwr_p->pwr_fp);
506 
507 		new_level = pci_pwr_new_lvl(pwr_p);
508 		bpn.bpn_dip = pwr_p->pwr_dip;
509 		bpn.bpn_comp = PCI_PM_COMP_0;
510 		bpn.bpn_level = new_level;
511 		bpn.bpn_private = bpc->bpc_private;
512 
513 		if (new_level > pwr_p->current_lvl)
514 			return (pci_raise_power(pwr_p, pwr_p->current_lvl,
515 			    new_level, impl_arg, bpn));
516 		else
517 			pci_pwr_change(pwr_p, pwr_p->current_lvl,
518 			    new_level);
519 
520 		break;
521 	default:
522 		mutex_exit(&pwr_p->pwr_mutex);
523 		return (pm_busop_bus_power(dip, impl_arg, op, arg, result));
524 	}
525 
526 	mutex_exit(&pwr_p->pwr_mutex);
527 
528 	return (ret);
529 }
530 
531 void
532 pci_pwr_resume(dev_info_t *dip, pci_pwr_t *pwr_p)
533 {
534 	dev_info_t *cdip;
535 
536 	/*
537 	 * Inform the PM framework of the current state of the device.
538 	 * (it is unknown to PM framework at this point).
539 	 */
540 	if (PM_CAPABLE(pwr_p)) {
541 		pwr_p->current_lvl = pci_pwr_current_lvl(pwr_p);
542 		pm_power_has_changed(dip, PCI_PM_COMP_0,
543 		    pwr_p->current_lvl);
544 	}
545 
546 	/*
547 	 * Restore config registers for children that did not save
548 	 * their own registers.  Children pwr states are UNKNOWN after
549 	 * a resume since it is possible for the PM framework to call
550 	 * resume without an actual power cycle. (ie if suspend fails).
551 	 */
552 	for (cdip = ddi_get_child(dip); cdip != NULL;
553 		cdip = ddi_get_next_sibling(cdip)) {
554 
555 		/*
556 		 * Not interested in children who are not already
557 		 * init'ed.  They will be set up by init_child().
558 		 */
559 		if (i_ddi_node_state(cdip) < DS_INITIALIZED) {
560 			DEBUG2(DBG_DETACH, dip,
561 			    "DDI_RESUME: skipping %s%d not in CF1\n",
562 			    ddi_driver_name(cdip), ddi_get_instance(cdip));
563 
564 			continue;
565 		}
566 
567 		/*
568 		 * Only restore config registers if saved by nexus.
569 		 */
570 		if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
571 		    NEXUS_SAVED) == 1) {
572 			(void) pci_restore_config_regs(cdip);
573 
574 			DEBUG2(DBG_PWR, dip,
575 			    "DDI_RESUME: nexus restoring %s%d config regs\n",
576 			    ddi_driver_name(cdip), ddi_get_instance(cdip));
577 
578 
579 			if (ndi_prop_remove(DDI_DEV_T_NONE, cdip,
580 			    NEXUS_SAVED) != DDI_PROP_SUCCESS) {
581 				cmn_err(CE_WARN, "%s%d can't remove prop %s",
582 				    ddi_driver_name(cdip),
583 				    ddi_get_instance(cdip),
584 				    NEXUS_SAVED);
585 			}
586 		}
587 	}
588 }
589 
590 void
591 pci_pwr_suspend(dev_info_t *dip, pci_pwr_t *pwr_p)
592 {
593 	dev_info_t *cdip;
594 
595 	/*
596 	 * Save the state of the configuration headers of child
597 	 * nodes.
598 	 */
599 
600 	for (cdip = ddi_get_child(dip); cdip != NULL;
601 	    cdip = ddi_get_next_sibling(cdip)) {
602 		pci_pwr_chld_t *p;
603 		int i;
604 		int num_comps;
605 		int ret;
606 		/*
607 		 * Not interested in children who are not already
608 		 * init'ed.  They will be set up in init_child().
609 		 */
610 		if (i_ddi_node_state(cdip) < DS_INITIALIZED) {
611 			DEBUG2(DBG_DETACH, dip, "DDI_SUSPEND: skipping "
612 			    "%s%d not in CF1\n", ddi_driver_name(cdip),
613 			    ddi_get_instance(cdip));
614 
615 			continue;
616 		}
617 
618 		/*
619 		 * Only save config registers if not already saved by child.
620 		 */
621 		if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
622 		    SAVED_CONFIG_REGS) == 1) {
623 
624 			continue;
625 		}
626 
627 		/*
628 		 * The nexus needs to save config registers.  Create a property
629 		 * so it knows to restore on resume.
630 		 */
631 		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, cdip,
632 		    NEXUS_SAVED);
633 
634 		if (ret != DDI_PROP_SUCCESS) {
635 			cmn_err(CE_WARN, "%s%d can't update prop %s",
636 			    ddi_driver_name(cdip), ddi_get_instance(cdip),
637 			    NEXUS_SAVED);
638 		}
639 
640 		if (!PM_CAPABLE(pwr_p)) {
641 			(void) pci_save_config_regs(cdip);
642 
643 			continue;
644 		}
645 
646 		mutex_enter(&pwr_p->pwr_mutex);
647 		p = pci_pwr_get_info(pwr_p, cdip);
648 		num_comps = p->num_comps;
649 
650 		/*
651 		 * If a device has components, reset the power level
652 		 * to unknown.  This will ensure that the bus is full
653 		 * power so that saving register won't panic (if
654 		 * the device is already powered off, the child should
655 		 * have already done the save, but an incorrect driver
656 		 * may have forgotten).  If resetting power levels
657 		 * to unknown isn't done here, it would have to be done
658 		 * in resume since pci driver has no way of knowing
659 		 * actual state of HW (power cycle may not have
660 		 * occurred, and it was decided that poking into a
661 		 * child's config space should be avoided unless
662 		 * absolutely necessary).
663 		 */
664 		if (p->comp_pwr == NULL) {
665 			(void) pci_save_config_regs(cdip);
666 		} else {
667 
668 			for (i = 0; i < num_comps; i++) {
669 				pci_pwr_update_comp(pwr_p, p, i,
670 				    PM_LEVEL_UNKNOWN);
671 			}
672 			/*
673 			 * ensure bus power is on before saving
674 			 * config regs.
675 			 */
676 			pci_pwr_change(pwr_p, pwr_p->current_lvl,
677 			    pci_pwr_new_lvl(pwr_p));
678 
679 			(void) pci_save_config_regs(cdip);
680 		}
681 		mutex_exit(&pwr_p->pwr_mutex);
682 	}
683 }
684 
685 void
686 pci_pwr_component_busy(pci_pwr_t *p)
687 {
688 	ASSERT(MUTEX_HELD(&p->pwr_mutex));
689 	if ((p->pwr_flags & PCI_PWR_COMP_BUSY) == 0) {
690 		if (pm_busy_component(p->pwr_dip, PCI_PM_COMP_0) ==
691 		    DDI_FAILURE) {
692 			cmn_err(CE_WARN,
693 			    "%s%d pm_busy_component failed",
694 			    ddi_driver_name(p->pwr_dip),
695 			    ddi_get_instance(p->pwr_dip));
696 		} else {
697 			DEBUG0(DBG_PWR, p->pwr_dip,
698 			    "called PM_BUSY_COMPONENT().  BUSY BIT SET\n");
699 			p->pwr_flags |= PCI_PWR_COMP_BUSY;
700 		}
701 	} else {
702 		DEBUG0(DBG_PWR, p->pwr_dip, "BUSY BIT ALREADY SET\n");
703 	}
704 }
705 
706 void
707 pci_pwr_component_idle(pci_pwr_t *p)
708 {
709 	ASSERT(MUTEX_HELD(&p->pwr_mutex));
710 	if (p->pwr_flags & PCI_PWR_COMP_BUSY) {
711 		if (pm_idle_component(p->pwr_dip, PCI_PM_COMP_0) ==
712 		    DDI_FAILURE) {
713 			cmn_err(CE_WARN,
714 			    "%s%d pm_idle_component failed",
715 			    ddi_driver_name(p->pwr_dip),
716 			    ddi_get_instance(p->pwr_dip));
717 		} else {
718 			DEBUG0(DBG_PWR, p->pwr_dip,
719 			    "called PM_IDLE_COMPONENT() BUSY BIT CLEARED\n");
720 			p->pwr_flags &= ~PCI_PWR_COMP_BUSY;
721 		}
722 	} else {
723 		DEBUG0(DBG_PWR, p->pwr_dip, "BUSY BIT ALREADY CLEARED\n");
724 	}
725 }
726 
727 void
728 pci_pwr_change(pci_pwr_t *pwr_p, int current, int new)
729 {
730 	ASSERT(MUTEX_HELD(&pwr_p->pwr_mutex));
731 	if (current == new) {
732 		DEBUG2(DBG_PWR, pwr_p->pwr_dip,
733 		    "No change in power required. Should be "
734 		    "busy. (current=%d) == (new=%d)\n",
735 		    current, new);
736 		pci_pwr_component_busy(pwr_p);
737 
738 		return;
739 	}
740 
741 	if (new < current) {
742 		DEBUG2(DBG_PWR, pwr_p->pwr_dip,
743 		    "should be idle (new=%d) < (current=%d)\n",
744 		    new, current);
745 		pci_pwr_component_idle(pwr_p);
746 
747 		return;
748 	}
749 
750 	if (new > current) {
751 		DEBUG2(DBG_PWR, pwr_p->pwr_dip, "pwr_change: "
752 		    "pm_raise_power() and should be busy. "
753 		    "(new=%d) > (current=%d)\n", new, current);
754 		pci_pwr_component_busy(pwr_p);
755 		mutex_exit(&pwr_p->pwr_mutex);
756 		if (pm_raise_power(pwr_p->pwr_dip, PCI_PM_COMP_0,
757 		    new) == DDI_FAILURE) {
758 			cmn_err(CE_WARN, "%s%d pm_raise_power failed",
759 			    ddi_driver_name(pwr_p->pwr_dip),
760 			    ddi_get_instance(pwr_p->pwr_dip));
761 		}
762 		mutex_enter(&pwr_p->pwr_mutex);
763 
764 		return;
765 	}
766 }
767