xref: /titanic_51/usr/src/uts/sun4/io/px/px.c (revision 42487ff1899d230ad6c2d55cf9573489ca5eb770)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PCI Express nexus driver interface
31  */
32 
33 #include <sys/types.h>
34 #include <sys/conf.h>		/* nulldev */
35 #include <sys/stat.h>		/* devctl */
36 #include <sys/kmem.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/hotplug/pci/pcihp.h>
40 #include <sys/ddi_impldefs.h>
41 #include <sys/ddi_subrdefs.h>
42 #include <sys/spl.h>
43 #include <sys/epm.h>
44 #include <sys/iommutsb.h>
45 #include <sys/hotplug/pci/pcihp.h>
46 #include <sys/hotplug/pci/pciehpc.h>
47 #include "px_obj.h"
48 #include <sys/pci_tools.h>
49 #include "px_tools_ext.h"
50 #include "pcie_pwr.h"
51 
52 /*LINTLIBRARY*/
53 
54 /*
55  * function prototypes for dev ops routines:
56  */
57 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
58 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
59 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
60 	void *arg, void **result);
61 static int px_pwr_setup(dev_info_t *dip);
62 static void px_pwr_teardown(dev_info_t *dip);
63 
64 /*
65  * function prototypes for hotplug routines:
66  */
67 static uint_t px_init_hotplug(px_t *px_p);
68 static uint_t px_uninit_hotplug(dev_info_t *dip);
69 
70 /*
71  * bus ops and dev ops structures:
72  */
73 static struct bus_ops px_bus_ops = {
74 	BUSO_REV,
75 	px_map,
76 	0,
77 	0,
78 	0,
79 	i_ddi_map_fault,
80 	px_dma_setup,
81 	px_dma_allochdl,
82 	px_dma_freehdl,
83 	px_dma_bindhdl,
84 	px_dma_unbindhdl,
85 	px_lib_dma_sync,
86 	px_dma_win,
87 	px_dma_ctlops,
88 	px_ctlops,
89 	ddi_bus_prop_op,
90 	ndi_busop_get_eventcookie,
91 	ndi_busop_add_eventcall,
92 	ndi_busop_remove_eventcall,
93 	ndi_post_event,
94 	NULL,
95 	NULL,			/* (*bus_config)(); */
96 	NULL,			/* (*bus_unconfig)(); */
97 	px_fm_init_child,	/* (*bus_fm_init)(); */
98 	NULL,			/* (*bus_fm_fini)(); */
99 	px_bus_enter,		/* (*bus_fm_access_enter)(); */
100 	px_bus_exit,		/* (*bus_fm_access_fini)(); */
101 	pcie_bus_power,		/* (*bus_power)(); */
102 	px_intr_ops		/* (*bus_intr_op)(); */
103 };
104 
105 extern struct cb_ops px_cb_ops;
106 
107 static struct dev_ops px_ops = {
108 	DEVO_REV,
109 	0,
110 	px_info,
111 	nulldev,
112 	0,
113 	px_attach,
114 	px_detach,
115 	nodev,
116 	&px_cb_ops,
117 	&px_bus_ops,
118 	nulldev
119 };
120 
121 /*
122  * module definitions:
123  */
124 #include <sys/modctl.h>
125 extern struct mod_ops mod_driverops;
126 
127 static struct modldrv modldrv = {
128 	&mod_driverops, 		/* Type of module - driver */
129 	"PCI Express nexus driver %I%",	/* Name of module. */
130 	&px_ops,			/* driver ops */
131 };
132 
133 static struct modlinkage modlinkage = {
134 	MODREV_1, (void *)&modldrv, NULL
135 };
136 
137 /* driver soft state */
138 void *px_state_p;
139 
140 int
141 _init(void)
142 {
143 	int e;
144 
145 	/*
146 	 * Initialize per-px bus soft state pointer.
147 	 */
148 	e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1);
149 	if (e != DDI_SUCCESS)
150 		return (e);
151 
152 	/*
153 	 * Install the module.
154 	 */
155 	e = mod_install(&modlinkage);
156 	if (e != DDI_SUCCESS)
157 		ddi_soft_state_fini(&px_state_p);
158 	return (e);
159 }
160 
161 int
162 _fini(void)
163 {
164 	int e;
165 
166 	/*
167 	 * Remove the module.
168 	 */
169 	e = mod_remove(&modlinkage);
170 	if (e != DDI_SUCCESS)
171 		return (e);
172 
173 	/* Free px soft state */
174 	ddi_soft_state_fini(&px_state_p);
175 
176 	return (e);
177 }
178 
179 int
180 _info(struct modinfo *modinfop)
181 {
182 	return (mod_info(&modlinkage, modinfop));
183 }
184 
185 /* ARGSUSED */
186 static int
187 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
188 {
189 	int	instance = getminor((dev_t)arg);
190 	px_t	*px_p = INST_TO_STATE(instance);
191 
192 	/*
193 	 * Allow hotplug to deal with ones it manages
194 	 * Hot Plug will be done later.
195 	 */
196 	if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE))
197 		return (pcihp_info(dip, infocmd, arg, result));
198 
199 	/* non-hotplug or not attached */
200 	switch (infocmd) {
201 	case DDI_INFO_DEVT2INSTANCE:
202 		*result = (void *)(intptr_t)instance;
203 		return (DDI_SUCCESS);
204 
205 	case DDI_INFO_DEVT2DEVINFO:
206 		if (px_p == NULL)
207 			return (DDI_FAILURE);
208 		*result = (void *)px_p->px_dip;
209 		return (DDI_SUCCESS);
210 
211 	default:
212 		return (DDI_FAILURE);
213 	}
214 }
215 
216 /* device driver entry points */
217 /*
218  * attach entry point:
219  */
220 /*ARGSUSED*/
221 static int
222 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
223 {
224 	px_t		*px_p;	/* per bus state pointer */
225 	int		instance = DIP_TO_INST(dip);
226 	int		ret = DDI_SUCCESS;
227 	devhandle_t	dev_hdl = NULL;
228 
229 	switch (cmd) {
230 	case DDI_ATTACH:
231 		DBG(DBG_ATTACH, dip, "DDI_ATTACH\n");
232 
233 		/*
234 		 * Allocate and get the per-px soft state structure.
235 		 */
236 		if (ddi_soft_state_zalloc(px_state_p, instance)
237 		    != DDI_SUCCESS) {
238 			cmn_err(CE_WARN, "%s%d: can't allocate px state",
239 				ddi_driver_name(dip), instance);
240 			goto err_bad_px_softstate;
241 		}
242 		px_p = INST_TO_STATE(instance);
243 		px_p->px_dip = dip;
244 		mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL);
245 		px_p->px_soft_state = PX_SOFT_STATE_CLOSED;
246 		px_p->px_open_count = 0;
247 
248 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
249 				"device_type", "pciex");
250 		/*
251 		 * Get key properties of the pci bridge node and
252 		 * determine it's type (psycho, schizo, etc ...).
253 		 */
254 		if (px_get_props(px_p, dip) == DDI_FAILURE)
255 			goto err_bad_px_prop;
256 
257 		if ((px_fm_attach(px_p)) != DDI_SUCCESS)
258 			goto err_bad_fm;
259 
260 		if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS)
261 			goto err_bad_dev_init;
262 
263 		/* Initilize device handle */
264 		px_p->px_dev_hdl = dev_hdl;
265 
266 		/*
267 		 * Initialize interrupt block.  Note that this
268 		 * initialize error handling for the PEC as well.
269 		 */
270 		if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS)
271 			goto err_bad_ib;
272 
273 		if (px_cb_attach(px_p) != DDI_SUCCESS)
274 			goto err_bad_cb;
275 
276 		/*
277 		 * Start creating the modules.
278 		 * Note that attach() routines should
279 		 * register and enable their own interrupts.
280 		 */
281 
282 		if ((px_mmu_attach(px_p)) != DDI_SUCCESS)
283 			goto err_bad_mmu;
284 
285 		if ((px_msiq_attach(px_p)) != DDI_SUCCESS)
286 			goto err_bad_msiq;
287 
288 		if ((px_msi_attach(px_p)) != DDI_SUCCESS)
289 			goto err_bad_msi;
290 
291 		if ((px_pec_attach(px_p)) != DDI_SUCCESS)
292 			goto err_bad_pec;
293 
294 		if ((px_dma_attach(px_p)) != DDI_SUCCESS)
295 			goto err_bad_pec; /* nothing to uninitialize on DMA */
296 
297 		/*
298 		 * All of the error handlers have been registered
299 		 * by now so it's time to activate the interrupt.
300 		 */
301 		if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS)
302 			goto err_bad_pec_add_intr;
303 
304 		(void) px_init_hotplug(px_p);
305 
306 		/*
307 		 * Create the "devctl" node for hotplug and pcitool support.
308 		 * For non-hotplug bus, we still need ":devctl" to
309 		 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls.
310 		 */
311 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
312 		    PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR),
313 		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
314 			goto err_bad_devctl_node;
315 		}
316 
317 		if (pxtool_init(dip) != DDI_SUCCESS)
318 			goto err_bad_pcitool_node;
319 
320 		/*
321 		 * power management setup. Even if it fails, attach will
322 		 * succeed as this is a optional feature. Since we are
323 		 * always at full power, this is not critical.
324 		 */
325 		if (pwr_common_setup(dip) != DDI_SUCCESS) {
326 			DBG(DBG_PWR, dip, "pwr_common_setup failed\n");
327 		} else if (px_pwr_setup(dip) != DDI_SUCCESS) {
328 			DBG(DBG_PWR, dip, "px_pwr_setup failed \n");
329 			pwr_common_teardown(dip);
330 		}
331 
332 		/*
333 		 * add cpr callback
334 		 */
335 		px_cpr_add_callb(px_p);
336 
337 		ddi_report_dev(dip);
338 
339 		px_p->px_state = PX_ATTACHED;
340 		DBG(DBG_ATTACH, dip, "attach success\n");
341 		break;
342 
343 err_bad_pcitool_node:
344 		ddi_remove_minor_node(dip, "devctl");
345 err_bad_devctl_node:
346 		px_err_rem_intr(&px_p->px_fault);
347 err_bad_pec_add_intr:
348 		px_pec_detach(px_p);
349 err_bad_pec:
350 		px_msi_detach(px_p);
351 err_bad_msi:
352 		px_msiq_detach(px_p);
353 err_bad_msiq:
354 		px_mmu_detach(px_p);
355 err_bad_mmu:
356 		px_cb_detach(px_p);
357 err_bad_cb:
358 		px_ib_detach(px_p);
359 err_bad_ib:
360 		(void) px_lib_dev_fini(dip);
361 err_bad_dev_init:
362 		px_fm_detach(px_p);
363 err_bad_fm:
364 		px_free_props(px_p);
365 err_bad_px_prop:
366 		mutex_destroy(&px_p->px_mutex);
367 		ddi_soft_state_free(px_state_p, instance);
368 err_bad_px_softstate:
369 		ret = DDI_FAILURE;
370 		break;
371 
372 	case DDI_RESUME:
373 		DBG(DBG_ATTACH, dip, "DDI_RESUME\n");
374 
375 		px_p = INST_TO_STATE(instance);
376 
377 		mutex_enter(&px_p->px_mutex);
378 
379 		/* suspend might have not succeeded */
380 		if (px_p->px_state != PX_SUSPENDED) {
381 			DBG(DBG_ATTACH, px_p->px_dip,
382 			    "instance NOT suspended\n");
383 			ret = DDI_FAILURE;
384 			break;
385 		}
386 
387 		px_lib_resume(dip);
388 		(void) pcie_pwr_resume(dip);
389 		px_p->px_state = PX_ATTACHED;
390 
391 		mutex_exit(&px_p->px_mutex);
392 
393 		break;
394 	default:
395 		DBG(DBG_ATTACH, dip, "unsupported attach op\n");
396 		ret = DDI_FAILURE;
397 		break;
398 	}
399 
400 	return (ret);
401 }
402 
403 /*
404  * detach entry point:
405  */
406 /*ARGSUSED*/
407 static int
408 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
409 {
410 	int instance = ddi_get_instance(dip);
411 	px_t *px_p = INST_TO_STATE(instance);
412 	int ret;
413 
414 	/*
415 	 * Make sure we are currently attached
416 	 */
417 	if (px_p->px_state != PX_ATTACHED) {
418 		DBG(DBG_DETACH, dip, "failed - instance not attached\n");
419 		return (DDI_FAILURE);
420 	}
421 
422 	mutex_enter(&px_p->px_mutex);
423 
424 	switch (cmd) {
425 	case DDI_DETACH:
426 		DBG(DBG_DETACH, dip, "DDI_DETACH\n");
427 
428 		/*
429 		 * remove cpr callback
430 		 */
431 		px_cpr_rem_callb(px_p);
432 
433 		if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)
434 			if (px_uninit_hotplug(dip) != DDI_SUCCESS) {
435 				mutex_exit(&px_p->px_mutex);
436 				return (DDI_FAILURE);
437 			}
438 
439 		/*
440 		 * things which used to be done in obj_destroy
441 		 * are now in-lined here.
442 		 */
443 
444 		px_p->px_state = PX_DETACHED;
445 
446 		pxtool_uninit(dip);
447 
448 		ddi_remove_minor_node(dip, "devctl");
449 		px_err_rem_intr(&px_p->px_fault);
450 		px_pec_detach(px_p);
451 		px_pwr_teardown(dip);
452 		pwr_common_teardown(dip);
453 		px_msi_detach(px_p);
454 		px_msiq_detach(px_p);
455 		px_mmu_detach(px_p);
456 		px_cb_detach(px_p);
457 		px_ib_detach(px_p);
458 		(void) px_lib_dev_fini(dip);
459 		px_fm_detach(px_p);
460 
461 		/*
462 		 * Free the px soft state structure and the rest of the
463 		 * resources it's using.
464 		 */
465 		px_free_props(px_p);
466 		mutex_exit(&px_p->px_mutex);
467 		mutex_destroy(&px_p->px_mutex);
468 
469 		/* Free the interrupt-priorities prop if we created it. */ {
470 			int len;
471 
472 			if (ddi_getproplen(DDI_DEV_T_ANY, dip,
473 			    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
474 			    "interrupt-priorities", &len) == DDI_PROP_SUCCESS)
475 				(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
476 				    "interrupt-priorities");
477 		}
478 
479 		px_p->px_dev_hdl = NULL;
480 		ddi_soft_state_free(px_state_p, instance);
481 
482 		return (DDI_SUCCESS);
483 
484 	case DDI_SUSPEND:
485 		if (pcie_pwr_suspend(dip) != DDI_SUCCESS) {
486 			mutex_exit(&px_p->px_mutex);
487 			return (DDI_FAILURE);
488 		}
489 		if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS)
490 			px_p->px_state = PX_SUSPENDED;
491 		mutex_exit(&px_p->px_mutex);
492 
493 		return (ret);
494 
495 	default:
496 		DBG(DBG_DETACH, dip, "unsupported detach op\n");
497 		mutex_exit(&px_p->px_mutex);
498 		return (DDI_FAILURE);
499 	}
500 }
501 
502 /*
503  * power management related initialization specific to px
504  * called by px_attach()
505  */
506 static int
507 px_pwr_setup(dev_info_t *dip)
508 {
509 	pcie_pwr_t *pwr_p;
510 	int instance = ddi_get_instance(dip);
511 	px_t *px_p = INST_TO_STATE(instance);
512 	ddi_intr_handle_impl_t hdl;
513 
514 	ASSERT(PCIE_PMINFO(dip));
515 	pwr_p = PCIE_NEXUS_PMINFO(dip);
516 	ASSERT(pwr_p);
517 
518 	/*
519 	 * indicate support LDI (Layered Driver Interface)
520 	 * Create the property, if it is not already there
521 	 */
522 	if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
523 	    DDI_KERNEL_IOCTL)) {
524 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
525 		    DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) {
526 			DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n");
527 			return (DDI_FAILURE);
528 		}
529 	}
530 	/* No support for device PM. We are always at full power */
531 	pwr_p->pwr_func_lvl = PM_LEVEL_D0;
532 
533 	mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER,
534 	    DDI_INTR_PRI(px_pwr_pil));
535 	cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL);
536 
537 
538 
539 	/* Initilize handle */
540 	hdl.ih_cb_arg1 = px_p;
541 	hdl.ih_cb_arg2 = NULL;
542 	hdl.ih_ver = DDI_INTR_VERSION;
543 	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
544 	hdl.ih_dip = dip;
545 	hdl.ih_inum = 0;
546 	hdl.ih_pri = px_pwr_pil;
547 
548 	/* Add PME_TO_ACK message handler */
549 	hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr;
550 	if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC,
551 	    (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) {
552 		DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add "
553 		    " PME_TO_ACK intr\n");
554 		goto pwr_setup_err1;
555 	}
556 	px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id);
557 	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID);
558 
559 	if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
560 	    px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id),
561 	    PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) {
562 		DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt"
563 		    " state failed\n");
564 		goto px_pwrsetup_err_state;
565 	}
566 
567 	return (DDI_SUCCESS);
568 
569 px_pwrsetup_err_state:
570 	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
571 	(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
572 	    px_p->px_pm_msiq_id);
573 pwr_setup_err1:
574 	mutex_destroy(&px_p->px_l23ready_lock);
575 	cv_destroy(&px_p->px_l23ready_cv);
576 
577 	return (DDI_FAILURE);
578 }
579 
580 /*
581  * undo whatever is done in px_pwr_setup. called by px_detach()
582  */
583 static void
584 px_pwr_teardown(dev_info_t *dip)
585 {
586 	int instance = ddi_get_instance(dip);
587 	px_t *px_p = INST_TO_STATE(instance);
588 	ddi_intr_handle_impl_t	hdl;
589 
590 	if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip))
591 		return;
592 
593 	/* Initilize handle */
594 	hdl.ih_ver = DDI_INTR_VERSION;
595 	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
596 	hdl.ih_dip = dip;
597 	hdl.ih_inum = 0;
598 
599 	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
600 	(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
601 	    px_p->px_pm_msiq_id);
602 
603 	(void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
604 	    px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id),
605 	    PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG);
606 
607 	px_p->px_pm_msiq_id = -1;
608 
609 	cv_destroy(&px_p->px_l23ready_cv);
610 	mutex_destroy(&px_p->px_l23ready_lock);
611 }
612 
613 /* bus driver entry points */
614 
615 /*
616  * bus map entry point:
617  *
618  * 	if map request is for an rnumber
619  *		get the corresponding regspec from device node
620  * 	build a new regspec in our parent's format
621  *	build a new map_req with the new regspec
622  *	call up the tree to complete the mapping
623  */
624 int
625 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
626 	off_t off, off_t len, caddr_t *addrp)
627 {
628 	px_t *px_p = DIP_TO_STATE(dip);
629 	struct regspec p_regspec;
630 	ddi_map_req_t p_mapreq;
631 	int reglen, rval, r_no;
632 	pci_regspec_t reloc_reg, *rp = &reloc_reg;
633 
634 	DBG(DBG_MAP, dip, "rdip=%s%d:",
635 		ddi_driver_name(rdip), ddi_get_instance(rdip));
636 
637 	if (mp->map_flags & DDI_MF_USER_MAPPING)
638 		return (DDI_ME_UNIMPLEMENTED);
639 
640 	switch (mp->map_type) {
641 	case DDI_MT_REGSPEC:
642 		reloc_reg = *(pci_regspec_t *)mp->map_obj.rp;	/* dup whole */
643 		break;
644 
645 	case DDI_MT_RNUMBER:
646 		r_no = mp->map_obj.rnumber;
647 		DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no);
648 
649 		if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
650 			"reg", (caddr_t)&rp, &reglen) != DDI_SUCCESS)
651 				return (DDI_ME_RNUMBER_RANGE);
652 
653 		if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) {
654 			kmem_free(rp, reglen);
655 			return (DDI_ME_RNUMBER_RANGE);
656 		}
657 		rp += r_no;
658 		break;
659 
660 	default:
661 		return (DDI_ME_INVAL);
662 	}
663 	DBG(DBG_MAP | DBG_CONT, dip, "\n");
664 
665 	if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) {
666 		/*
667 		 * There may be a need to differentiate between PCI
668 		 * and PCI-Ex devices so the following range check is
669 		 * done correctly, depending on the implementation of
670 		 * px_pci bridge nexus driver.
671 		 */
672 		if ((off >= PCIE_CONF_HDR_SIZE) ||
673 				(len > PCIE_CONF_HDR_SIZE) ||
674 				(off + len > PCIE_CONF_HDR_SIZE))
675 			return (DDI_ME_INVAL);
676 		/*
677 		 * the following function returning a DDI_FAILURE assumes
678 		 * that there are no virtual config space access services
679 		 * defined in this layer. Otherwise it is availed right
680 		 * here and we return.
681 		 */
682 		rval = px_lib_map_vconfig(dip, mp, off, rp, addrp);
683 		if (rval == DDI_SUCCESS)
684 			goto done;
685 	}
686 
687 	/*
688 	 * No virtual config space services or we are mapping
689 	 * a region of memory mapped config/IO/memory space, so proceed
690 	 * to the parent.
691 	 */
692 
693 	/* relocate within 64-bit pci space through "assigned-addresses" */
694 	if (rval = px_reloc_reg(dip, rdip, px_p, rp))
695 		goto done;
696 
697 	if (len)	/* adjust regspec according to mapping request */
698 		rp->pci_size_low = len;	/* MIN ? */
699 	rp->pci_phys_low += off;
700 
701 	/* translate relocated pci regspec into parent space through "ranges" */
702 	if (rval = px_xlate_reg(px_p, rp, &p_regspec))
703 		goto done;
704 
705 	p_mapreq = *mp;		/* dup the whole structure */
706 	p_mapreq.map_type = DDI_MT_REGSPEC;
707 	p_mapreq.map_obj.rp = &p_regspec;
708 	px_lib_map_attr_check(&p_mapreq);
709 	rval = ddi_map(dip, &p_mapreq, 0, 0, addrp);
710 
711 	if (rval == DDI_SUCCESS) {
712 		/*
713 		 * Set-up access functions for FM access error capable drivers.
714 		 */
715 		if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
716 		    mp->map_handlep->ah_acc.devacc_attr_access !=
717 		    DDI_DEFAULT_ACC)
718 			px_fm_acc_setup(mp, rdip);
719 	}
720 
721 done:
722 	if (mp->map_type == DDI_MT_RNUMBER)
723 		kmem_free(rp - r_no, reglen);
724 
725 	return (rval);
726 }
727 
728 /*
729  * bus dma map entry point
730  * return value:
731  *	DDI_DMA_PARTIAL_MAP	 1
732  *	DDI_DMA_MAPOK		 0
733  *	DDI_DMA_MAPPED		 0
734  *	DDI_DMA_NORESOURCES	-1
735  *	DDI_DMA_NOMAPPING	-2
736  *	DDI_DMA_TOOBIG		-3
737  */
738 int
739 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq,
740 	ddi_dma_handle_t *handlep)
741 {
742 	px_t *px_p = DIP_TO_STATE(dip);
743 	px_mmu_t *mmu_p = px_p->px_mmu_p;
744 	ddi_dma_impl_t *mp;
745 	int ret;
746 
747 	DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n",
748 		ddi_driver_name(rdip), ddi_get_instance(rdip),
749 		handlep ? "alloc" : "advisory");
750 
751 	if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq)))
752 		return (DDI_DMA_NORESOURCES);
753 	if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING)
754 		return (DDI_DMA_NOMAPPING);
755 	if (ret = px_dma_type(px_p, dmareq, mp))
756 		goto freehandle;
757 	if (ret = px_dma_pfn(px_p, dmareq, mp))
758 		goto freehandle;
759 
760 	switch (PX_DMA_TYPE(mp)) {
761 	case PX_DMAI_FLAGS_DVMA:	/* LINTED E_EQUALITY_NOT_ASSIGNMENT */
762 		if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep)
763 			goto freehandle;
764 		if (!PX_DMA_CANCACHE(mp)) {	/* try fast track */
765 			if (PX_DMA_CANFAST(mp)) {
766 				if (!px_dvma_map_fast(mmu_p, mp))
767 					break;
768 			/* LINTED E_NOP_ELSE_STMT */
769 			} else {
770 				PX_DVMA_FASTTRAK_PROF(mp);
771 			}
772 		}
773 		if (ret = px_dvma_map(mp, dmareq, mmu_p))
774 			goto freehandle;
775 		break;
776 	case PX_DMAI_FLAGS_PTP:	/* LINTED E_EQUALITY_NOT_ASSIGNMENT */
777 		if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep)
778 			goto freehandle;
779 		break;
780 	case PX_DMAI_FLAGS_BYPASS:
781 	default:
782 		cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x",
783 			ddi_driver_name(rdip), ddi_get_instance(rdip),
784 			PX_DMA_TYPE(mp));
785 		/*NOTREACHED*/
786 	}
787 	*handlep = (ddi_dma_handle_t)mp;
788 	mp->dmai_flags |= PX_DMAI_FLAGS_INUSE;
789 	px_dump_dma_handle(DBG_DMA_MAP, dip, mp);
790 
791 	return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP);
792 freehandle:
793 	if (ret == DDI_DMA_NORESOURCES)
794 		px_dma_freemp(mp); /* don't run_callback() */
795 	else
796 		(void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp);
797 	return (ret);
798 }
799 
800 
801 /*
802  * bus dma alloc handle entry point:
803  */
804 int
805 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
806 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
807 {
808 	px_t *px_p = DIP_TO_STATE(dip);
809 	ddi_dma_impl_t *mp;
810 	int rval;
811 
812 	DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n",
813 		ddi_driver_name(rdip), ddi_get_instance(rdip));
814 
815 	if (attrp->dma_attr_version != DMA_ATTR_V0)
816 		return (DDI_DMA_BADATTR);
817 
818 	if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg)))
819 		return (DDI_DMA_NORESOURCES);
820 
821 	/*
822 	 * Save requestor's information
823 	 */
824 	mp->dmai_attr	= *attrp; /* whole object - augmented later  */
825 	*PX_DEV_ATTR(mp)	= *attrp; /* whole object - device orig attr */
826 	DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp);
827 
828 	/* check and convert dma attributes to handle parameters */
829 	if (rval = px_dma_attr2hdl(px_p, mp)) {
830 		px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp);
831 		*handlep = NULL;
832 		return (rval);
833 	}
834 	*handlep = (ddi_dma_handle_t)mp;
835 	return (DDI_SUCCESS);
836 }
837 
838 
839 /*
840  * bus dma free handle entry point:
841  */
842 /*ARGSUSED*/
843 int
844 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
845 {
846 	DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n",
847 		ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
848 	px_dma_freemp((ddi_dma_impl_t *)handle);
849 
850 	if (px_kmem_clid) {
851 		DBG(DBG_DMA_FREEH, dip, "run handle callback\n");
852 		ddi_run_callback(&px_kmem_clid);
853 	}
854 	return (DDI_SUCCESS);
855 }
856 
857 
858 /*
859  * bus dma bind handle entry point:
860  */
861 int
862 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
863 	ddi_dma_handle_t handle, ddi_dma_req_t *dmareq,
864 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
865 {
866 	px_t *px_p = DIP_TO_STATE(dip);
867 	px_mmu_t *mmu_p = px_p->px_mmu_p;
868 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
869 	int ret;
870 
871 	DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n",
872 		ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq);
873 
874 	if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE)
875 		return (DDI_DMA_INUSE);
876 
877 	ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0);
878 	mp->dmai_flags |= PX_DMAI_FLAGS_INUSE;
879 
880 	if (ret = px_dma_type(px_p, dmareq, mp))
881 		goto err;
882 	if (ret = px_dma_pfn(px_p, dmareq, mp))
883 		goto err;
884 
885 	switch (PX_DMA_TYPE(mp)) {
886 	case PX_DMAI_FLAGS_DVMA:
887 		if (ret = px_dvma_win(px_p, dmareq, mp))
888 			goto map_err;
889 		if (!PX_DMA_CANCACHE(mp)) {	/* try fast track */
890 			if (PX_DMA_CANFAST(mp)) {
891 				if (!px_dvma_map_fast(mmu_p, mp))
892 					goto mapped; /*LINTED E_NOP_ELSE_STMT*/
893 			} else {
894 				PX_DVMA_FASTTRAK_PROF(mp);
895 			}
896 		}
897 		if (ret = px_dvma_map(mp, dmareq, mmu_p))
898 			goto map_err;
899 mapped:
900 		*ccountp = 1;
901 		MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size);
902 		break;
903 	case PX_DMAI_FLAGS_BYPASS:
904 	case PX_DMAI_FLAGS_PTP:
905 		if (ret = px_dma_physwin(px_p, dmareq, mp))
906 			goto map_err;
907 		*ccountp = PX_WINLST(mp)->win_ncookies;
908 		*cookiep =
909 		    *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */
910 		break;
911 	default:
912 		cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type",
913 			ddi_driver_name(rdip), ddi_get_instance(rdip), mp);
914 		/*NOTREACHED*/
915 	}
916 	DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n",
917 		cookiep->dmac_address, cookiep->dmac_size);
918 	px_dump_dma_handle(DBG_DMA_MAP, dip, mp);
919 
920 	/* insert dma handle into FMA cache */
921 	if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR)
922 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL);
923 
924 	return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP);
925 map_err:
926 	px_dma_freepfn(mp);
927 err:
928 	mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE;
929 	return (ret);
930 }
931 
932 
933 /*
934  * bus dma unbind handle entry point:
935  */
936 /*ARGSUSED*/
937 int
938 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
939 {
940 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
941 	px_t *px_p = DIP_TO_STATE(dip);
942 	px_mmu_t *mmu_p = px_p->px_mmu_p;
943 
944 	DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n",
945 		ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
946 	if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) {
947 		DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n");
948 		return (DDI_FAILURE);
949 	}
950 
951 	/* remove dma handle from FMA cache */
952 	if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
953 		if (DEVI(rdip)->devi_fmhdl != NULL &&
954 		    DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) {
955 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, mp);
956 		}
957 	}
958 
959 	/*
960 	 * Here if the handle is using the iommu.  Unload all the iommu
961 	 * translations.
962 	 */
963 	switch (PX_DMA_TYPE(mp)) {
964 	case PX_DMAI_FLAGS_DVMA:
965 		px_mmu_unmap_window(mmu_p, mp);
966 		px_dvma_unmap(mmu_p, mp);
967 		px_dma_freepfn(mp);
968 		break;
969 	case PX_DMAI_FLAGS_BYPASS:
970 	case PX_DMAI_FLAGS_PTP:
971 		px_dma_freewin(mp);
972 		break;
973 	default:
974 		cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p",
975 			ddi_driver_name(rdip), ddi_get_instance(rdip), mp);
976 		/*NOTREACHED*/
977 	}
978 	if (mmu_p->mmu_dvma_clid != 0) {
979 		DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n");
980 		ddi_run_callback(&mmu_p->mmu_dvma_clid);
981 	}
982 	if (px_kmem_clid) {
983 		DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n");
984 		ddi_run_callback(&px_kmem_clid);
985 	}
986 	mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE;
987 
988 	return (DDI_SUCCESS);
989 }
990 
991 /*
992  * bus dma win entry point:
993  */
994 int
995 px_dma_win(dev_info_t *dip, dev_info_t *rdip,
996 	ddi_dma_handle_t handle, uint_t win, off_t *offp,
997 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
998 {
999 	ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)handle;
1000 	int		ret;
1001 
1002 	DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n",
1003 		ddi_driver_name(rdip), ddi_get_instance(rdip));
1004 
1005 	px_dump_dma_handle(DBG_DMA_WIN, dip, mp);
1006 	if (win >= mp->dmai_nwin) {
1007 		DBG(DBG_DMA_WIN, dip, "%x out of range\n", win);
1008 		return (DDI_FAILURE);
1009 	}
1010 
1011 	switch (PX_DMA_TYPE(mp)) {
1012 	case PX_DMAI_FLAGS_DVMA:
1013 		if (win != PX_DMA_CURWIN(mp)) {
1014 			px_t *px_p = DIP_TO_STATE(dip);
1015 			px_mmu_t *mmu_p = px_p->px_mmu_p;
1016 			px_mmu_unmap_window(mmu_p, mp);
1017 
1018 			/* map_window sets dmai_mapping/size/offset */
1019 			px_mmu_map_window(mmu_p, mp, win);
1020 			if ((ret = px_mmu_map_window(mmu_p,
1021 			    mp, win)) != DDI_SUCCESS)
1022 				return (ret);
1023 		}
1024 		if (cookiep)
1025 			MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping,
1026 				mp->dmai_size);
1027 		if (ccountp)
1028 			*ccountp = 1;
1029 		break;
1030 	case PX_DMAI_FLAGS_PTP:
1031 	case PX_DMAI_FLAGS_BYPASS: {
1032 		int i;
1033 		ddi_dma_cookie_t *ck_p;
1034 		px_dma_win_t *win_p = mp->dmai_winlst;
1035 
1036 		for (i = 0; i < win; win_p = win_p->win_next, i++);
1037 		ck_p = (ddi_dma_cookie_t *)(win_p + 1);
1038 		*cookiep = *ck_p;
1039 		mp->dmai_offset = win_p->win_offset;
1040 		mp->dmai_size   = win_p->win_size;
1041 		mp->dmai_mapping = ck_p->dmac_laddress;
1042 		mp->dmai_cookie = ck_p + 1;
1043 		win_p->win_curseg = 0;
1044 		if (ccountp)
1045 			*ccountp = win_p->win_ncookies;
1046 		}
1047 		break;
1048 	default:
1049 		cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x",
1050 			ddi_driver_name(rdip), ddi_get_instance(rdip),
1051 			PX_DMA_TYPE(mp));
1052 		return (DDI_FAILURE);
1053 	}
1054 	if (cookiep)
1055 		DBG(DBG_DMA_WIN, dip,
1056 			"cookie - dmac_address=%x dmac_size=%x\n",
1057 			cookiep->dmac_address, cookiep->dmac_size);
1058 	if (offp)
1059 		*offp = (off_t)mp->dmai_offset;
1060 	if (lenp)
1061 		*lenp = mp->dmai_size;
1062 	return (DDI_SUCCESS);
1063 }
1064 
1065 #ifdef	DEBUG
1066 static char *px_dmactl_str[] = {
1067 	"DDI_DMA_FREE",
1068 	"DDI_DMA_SYNC",
1069 	"DDI_DMA_HTOC",
1070 	"DDI_DMA_KVADDR",
1071 	"DDI_DMA_MOVWIN",
1072 	"DDI_DMA_REPWIN",
1073 	"DDI_DMA_GETERR",
1074 	"DDI_DMA_COFF",
1075 	"DDI_DMA_NEXTWIN",
1076 	"DDI_DMA_NEXTSEG",
1077 	"DDI_DMA_SEGTOC",
1078 	"DDI_DMA_RESERVE",
1079 	"DDI_DMA_RELEASE",
1080 	"DDI_DMA_RESETH",
1081 	"DDI_DMA_CKSYNC",
1082 	"DDI_DMA_IOPB_ALLOC",
1083 	"DDI_DMA_IOPB_FREE",
1084 	"DDI_DMA_SMEM_ALLOC",
1085 	"DDI_DMA_SMEM_FREE",
1086 	"DDI_DMA_SET_SBUS64"
1087 };
1088 #endif	/* DEBUG */
1089 
1090 /*
1091  * bus dma control entry point:
1092  */
1093 /*ARGSUSED*/
1094 int
1095 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1096 	enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp,
1097 	uint_t cache_flags)
1098 {
1099 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1100 
1101 #ifdef	DEBUG
1102 	DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd],
1103 		ddi_driver_name(rdip), ddi_get_instance(rdip));
1104 #endif	/* DEBUG */
1105 
1106 	switch (cmd) {
1107 	case DDI_DMA_FREE:
1108 		(void) px_dma_unbindhdl(dip, rdip, handle);
1109 		(void) px_dma_freehdl(dip, rdip, handle);
1110 		return (DDI_SUCCESS);
1111 	case DDI_DMA_RESERVE: {
1112 		px_t *px_p = DIP_TO_STATE(dip);
1113 		return (px_fdvma_reserve(dip, rdip, px_p,
1114 			(ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp));
1115 		}
1116 	case DDI_DMA_RELEASE: {
1117 		px_t *px_p = DIP_TO_STATE(dip);
1118 		return (px_fdvma_release(dip, px_p, mp));
1119 		}
1120 	default:
1121 		break;
1122 	}
1123 
1124 	switch (PX_DMA_TYPE(mp)) {
1125 	case PX_DMAI_FLAGS_DVMA:
1126 		return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp,
1127 			cache_flags));
1128 	case PX_DMAI_FLAGS_PTP:
1129 	case PX_DMAI_FLAGS_BYPASS:
1130 		return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp,
1131 			cache_flags));
1132 	default:
1133 		cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x",
1134 			ddi_driver_name(rdip), ddi_get_instance(rdip), cmd,
1135 			mp->dmai_flags);
1136 		/*NOTREACHED*/
1137 	}
1138 	return (0);
1139 }
1140 
1141 /*
1142  * control ops entry point:
1143  *
1144  * Requests handled completely:
1145  *	DDI_CTLOPS_INITCHILD	see init_child() for details
1146  *	DDI_CTLOPS_UNINITCHILD
1147  *	DDI_CTLOPS_REPORTDEV	see report_dev() for details
1148  *	DDI_CTLOPS_IOMIN	cache line size if streaming otherwise 1
1149  *	DDI_CTLOPS_REGSIZE
1150  *	DDI_CTLOPS_NREGS
1151  *	DDI_CTLOPS_DVMAPAGESIZE
1152  *	DDI_CTLOPS_POKE
1153  *	DDI_CTLOPS_PEEK
1154  *
1155  * All others passed to parent.
1156  */
1157 int
1158 px_ctlops(dev_info_t *dip, dev_info_t *rdip,
1159 	ddi_ctl_enum_t op, void *arg, void *result)
1160 {
1161 	px_t *px_p = DIP_TO_STATE(dip);
1162 	struct detachspec *ds;
1163 	struct attachspec *as;
1164 
1165 	switch (op) {
1166 	case DDI_CTLOPS_INITCHILD:
1167 		return (px_init_child(px_p, (dev_info_t *)arg));
1168 
1169 	case DDI_CTLOPS_UNINITCHILD:
1170 		return (px_uninit_child(px_p, (dev_info_t *)arg));
1171 
1172 	case DDI_CTLOPS_ATTACH:
1173 		as = (struct attachspec *)arg;
1174 		switch (as->when) {
1175 		case DDI_PRE:
1176 			if (as->cmd == DDI_ATTACH) {
1177 				DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n",
1178 				    ddi_driver_name(rdip),
1179 				    ddi_get_instance(rdip));
1180 				return (pcie_pm_hold(dip));
1181 			}
1182 			if (as->cmd == DDI_RESUME) {
1183 				ddi_acc_handle_t	config_handle;
1184 				DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n",
1185 				    ddi_driver_name(rdip),
1186 				    ddi_get_instance(rdip));
1187 
1188 				if (pci_config_setup(rdip, &config_handle) ==
1189 				    DDI_SUCCESS) {
1190 					pcie_clear_errors(rdip, config_handle);
1191 					pci_config_teardown(&config_handle);
1192 				}
1193 			}
1194 			return (DDI_SUCCESS);
1195 
1196 		case DDI_POST:
1197 			DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n",
1198 			    ddi_driver_name(rdip), ddi_get_instance(rdip));
1199 			if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS)
1200 				pcie_pm_release(dip);
1201 			return (DDI_SUCCESS);
1202 		default:
1203 			break;
1204 		}
1205 		break;
1206 
1207 	case DDI_CTLOPS_DETACH:
1208 		ds = (struct detachspec *)arg;
1209 		switch (ds->when) {
1210 		case DDI_POST:
1211 			if (ds->cmd == DDI_DETACH &&
1212 			    ds->result == DDI_SUCCESS) {
1213 				DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n",
1214 				    ddi_driver_name(rdip),
1215 				    ddi_get_instance(rdip));
1216 				return (pcie_pm_remove_child(dip, rdip));
1217 			}
1218 			return (DDI_SUCCESS);
1219 		default:
1220 			break;
1221 		}
1222 		break;
1223 
1224 	case DDI_CTLOPS_REPORTDEV:
1225 		return (px_report_dev(rdip));
1226 
1227 	case DDI_CTLOPS_IOMIN:
1228 		return (DDI_SUCCESS);
1229 
1230 	case DDI_CTLOPS_REGSIZE:
1231 		*((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg));
1232 		return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS);
1233 
1234 	case DDI_CTLOPS_NREGS:
1235 		*((uint_t *)result) = px_get_nreg_set(rdip);
1236 		return (DDI_SUCCESS);
1237 
1238 	case DDI_CTLOPS_DVMAPAGESIZE:
1239 		*((ulong_t *)result) = MMU_PAGE_SIZE;
1240 		return (DDI_SUCCESS);
1241 
1242 	case DDI_CTLOPS_POKE:	/* platform dependent implementation. */
1243 		return (px_lib_ctlops_poke(dip, rdip,
1244 		    (peekpoke_ctlops_t *)arg));
1245 
1246 	case DDI_CTLOPS_PEEK:	/* platform dependent implementation. */
1247 		return (px_lib_ctlops_peek(dip, rdip,
1248 		    (peekpoke_ctlops_t *)arg, result));
1249 
1250 	case DDI_CTLOPS_POWER:
1251 	default:
1252 		break;
1253 	}
1254 
1255 	/*
1256 	 * Now pass the request up to our parent.
1257 	 */
1258 	DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n",
1259 		ddi_driver_name(rdip), ddi_get_instance(rdip));
1260 	return (ddi_ctlops(dip, rdip, op, arg, result));
1261 }
1262 
1263 /* ARGSUSED */
1264 int
1265 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1266     ddi_intr_handle_impl_t *hdlp, void *result)
1267 {
1268 	int	intr_types, ret = DDI_SUCCESS;
1269 
1270 	DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n",
1271 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
1272 
1273 	/* Process DDI_INTROP_SUPPORTED_TYPES request here */
1274 	if (intr_op == DDI_INTROP_SUPPORTED_TYPES) {
1275 		*(int *)result = i_ddi_get_nintrs(rdip) ?
1276 		    DDI_INTR_TYPE_FIXED : 0;
1277 
1278 		if ((pci_msi_get_supported_type(rdip,
1279 		    &intr_types)) == DDI_SUCCESS) {
1280 			/*
1281 			 * Double check supported interrupt types vs.
1282 			 * what the host bridge supports.
1283 			 *
1284 			 * NOTE:
1285 			 * Currently MSI-X is disabled since px driver
1286 			 * don't fully support this feature.
1287 			 */
1288 			*(int *)result |= (intr_types & DDI_INTR_TYPE_MSI);
1289 		}
1290 
1291 		return (ret);
1292 	}
1293 
1294 	/*
1295 	 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts.
1296 	 * Return failure if interrupt type is not supported.
1297 	 */
1298 	switch (hdlp->ih_type) {
1299 	case DDI_INTR_TYPE_FIXED:
1300 		ret = px_intx_ops(dip, rdip, intr_op, hdlp, result);
1301 		break;
1302 	case DDI_INTR_TYPE_MSI:
1303 	case DDI_INTR_TYPE_MSIX:
1304 		ret = px_msix_ops(dip, rdip, intr_op, hdlp, result);
1305 		break;
1306 	default:
1307 		ret = DDI_ENOTSUP;
1308 		break;
1309 	}
1310 
1311 	return (ret);
1312 }
1313 
1314 static uint_t
1315 px_init_hotplug(px_t *px_p)
1316 {
1317 	px_bus_range_t bus_range;
1318 	dev_info_t *dip;
1319 	pciehpc_regops_t regops;
1320 
1321 	dip = px_p->px_dip;
1322 
1323 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1324 	    "hotplug-capable") == 0)
1325 		return (DDI_FAILURE);
1326 
1327 	/*
1328 	 * Before initializing hotplug - open up bus range.  The busra
1329 	 * module will initialize its pool of bus numbers from this.
1330 	 * "busra" will be the agent that keeps track of them during
1331 	 * hotplug.  Also, note, that busra will remove any bus numbers
1332 	 * already in use from boot time.
1333 	 */
1334 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1335 	    "bus-range") == 0) {
1336 		cmn_err(CE_WARN, "%s%d: bus-range not found\n",
1337 		    ddi_driver_name(dip), ddi_get_instance(dip));
1338 #ifdef	DEBUG
1339 		bus_range.lo = 0x0;
1340 		bus_range.hi = 0xff;
1341 
1342 		if (ndi_prop_update_int_array(DDI_DEV_T_NONE,
1343 		    dip, "bus-range", (int *)&bus_range, 2)
1344 		    != DDI_PROP_SUCCESS) {
1345 			return (DDI_FAILURE);
1346 		}
1347 #else
1348 		return (DDI_FAILURE);
1349 #endif
1350 	}
1351 
1352 	if (px_lib_hotplug_init(dip, (void *)&regops) != DDI_SUCCESS)
1353 		return (DDI_FAILURE);
1354 
1355 	if (pciehpc_init(dip, &regops) != DDI_SUCCESS) {
1356 		px_lib_hotplug_uninit(dip);
1357 		return (DDI_FAILURE);
1358 	}
1359 
1360 	if (pcihp_init(dip) != DDI_SUCCESS) {
1361 		(void) pciehpc_uninit(dip);
1362 		px_lib_hotplug_uninit(dip);
1363 		return (DDI_FAILURE);
1364 	}
1365 
1366 	if (pcihp_get_cb_ops() != NULL) {
1367 		DBG(DBG_ATTACH, dip, "%s%d hotplug enabled",
1368 		    ddi_driver_name(dip), ddi_get_instance(dip));
1369 		px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE;
1370 	}
1371 
1372 	return (DDI_SUCCESS);
1373 }
1374 
1375 static uint_t
1376 px_uninit_hotplug(dev_info_t *dip)
1377 {
1378 	if (pcihp_uninit(dip) != DDI_SUCCESS)
1379 		return (DDI_FAILURE);
1380 
1381 	if (pciehpc_uninit(dip) != DDI_SUCCESS)
1382 		return (DDI_FAILURE);
1383 
1384 	px_lib_hotplug_uninit(dip);
1385 
1386 	return (DDI_SUCCESS);
1387 }
1388