xref: /illumos-gate/usr/src/uts/sun4/io/px/px.c (revision b00044a2eb43864b8718585d21949611a2ee59ef)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * PCI Express nexus driver interface
28  */
29 
30 #include <sys/types.h>
31 #include <sys/conf.h>		/* nulldev */
32 #include <sys/stat.h>		/* devctl */
33 #include <sys/kmem.h>
34 #include <sys/sunddi.h>
35 #include <sys/sunndi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/ddi_subrdefs.h>
38 #include <sys/spl.h>
39 #include <sys/epm.h>
40 #include <sys/iommutsb.h>
41 #include <sys/hotplug/pci/pcihp.h>
42 #include <sys/hotplug/pci/pciehpc.h>
43 #include "px_obj.h"
44 #include <sys/pci_tools.h>
45 #include "px_tools_ext.h"
46 #include "pcie_pwr.h"
47 
48 /*LINTLIBRARY*/
49 
50 /*
51  * function prototypes for dev ops routines:
52  */
53 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
54 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
55 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
56 	void *arg, void **result);
57 static int px_cb_attach(px_t *);
58 static void px_cb_detach(px_t *);
59 static int px_pwr_setup(dev_info_t *dip);
60 static void px_pwr_teardown(dev_info_t *dip);
61 
62 static void px_set_mps(px_t *px_p);
63 
64 extern int pcie_max_mps;
65 
66 extern errorq_t *pci_target_queue;
67 
68 /*
69  * function prototypes for hotplug routines:
70  */
71 static int px_init_hotplug(px_t *px_p);
72 static int px_uninit_hotplug(dev_info_t *dip);
73 
74 /*
75  * bus ops and dev ops structures:
76  */
77 static struct bus_ops px_bus_ops = {
78 	BUSO_REV,
79 	px_map,
80 	0,
81 	0,
82 	0,
83 	i_ddi_map_fault,
84 	px_dma_setup,
85 	px_dma_allochdl,
86 	px_dma_freehdl,
87 	px_dma_bindhdl,
88 	px_dma_unbindhdl,
89 	px_lib_dma_sync,
90 	px_dma_win,
91 	px_dma_ctlops,
92 	px_ctlops,
93 	ddi_bus_prop_op,
94 	ndi_busop_get_eventcookie,
95 	ndi_busop_add_eventcall,
96 	ndi_busop_remove_eventcall,
97 	ndi_post_event,
98 	NULL,
99 	NULL,			/* (*bus_config)(); */
100 	NULL,			/* (*bus_unconfig)(); */
101 	px_fm_init_child,	/* (*bus_fm_init)(); */
102 	NULL,			/* (*bus_fm_fini)(); */
103 	px_bus_enter,		/* (*bus_fm_access_enter)(); */
104 	px_bus_exit,		/* (*bus_fm_access_fini)(); */
105 	pcie_bus_power,		/* (*bus_power)(); */
106 	px_intr_ops		/* (*bus_intr_op)(); */
107 };
108 
109 extern struct cb_ops px_cb_ops;
110 
111 static struct dev_ops px_ops = {
112 	DEVO_REV,
113 	0,
114 	px_info,
115 	nulldev,
116 	0,
117 	px_attach,
118 	px_detach,
119 	nodev,
120 	&px_cb_ops,
121 	&px_bus_ops,
122 	nulldev
123 };
124 
125 /*
126  * module definitions:
127  */
128 #include <sys/modctl.h>
129 extern struct mod_ops mod_driverops;
130 
131 static struct modldrv modldrv = {
132 	&mod_driverops, 		/* Type of module - driver */
133 	"PCI Express nexus driver",	/* Name of module. */
134 	&px_ops,			/* driver ops */
135 };
136 
137 static struct modlinkage modlinkage = {
138 	MODREV_1, (void *)&modldrv, NULL
139 };
140 
141 /* driver soft state */
142 void *px_state_p;
143 
144 int
145 _init(void)
146 {
147 	int e;
148 
149 	/*
150 	 * Initialize per-px bus soft state pointer.
151 	 */
152 	e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1);
153 	if (e != DDI_SUCCESS)
154 		return (e);
155 
156 	/*
157 	 * Install the module.
158 	 */
159 	e = mod_install(&modlinkage);
160 	if (e != DDI_SUCCESS)
161 		ddi_soft_state_fini(&px_state_p);
162 	return (e);
163 }
164 
165 int
166 _fini(void)
167 {
168 	int e;
169 
170 	/*
171 	 * Remove the module.
172 	 */
173 	e = mod_remove(&modlinkage);
174 	if (e != DDI_SUCCESS)
175 		return (e);
176 	/*
177 	 * Destroy pci_target_queue, and set it to NULL.
178 	 */
179 	if (pci_target_queue)
180 		errorq_destroy(pci_target_queue);
181 
182 	pci_target_queue = NULL;
183 
184 	/* Free px soft state */
185 	ddi_soft_state_fini(&px_state_p);
186 
187 	return (e);
188 }
189 
190 int
191 _info(struct modinfo *modinfop)
192 {
193 	return (mod_info(&modlinkage, modinfop));
194 }
195 
196 /* ARGSUSED */
197 static int
198 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
199 {
200 	int	instance = getminor((dev_t)arg);
201 	px_t	*px_p = INST_TO_STATE(instance);
202 
203 	/*
204 	 * Allow hotplug to deal with ones it manages
205 	 * Hot Plug will be done later.
206 	 */
207 	if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE))
208 		return (pcihp_info(dip, infocmd, arg, result));
209 
210 	/* non-hotplug or not attached */
211 	switch (infocmd) {
212 	case DDI_INFO_DEVT2INSTANCE:
213 		*result = (void *)(intptr_t)instance;
214 		return (DDI_SUCCESS);
215 
216 	case DDI_INFO_DEVT2DEVINFO:
217 		if (px_p == NULL)
218 			return (DDI_FAILURE);
219 		*result = (void *)px_p->px_dip;
220 		return (DDI_SUCCESS);
221 
222 	default:
223 		return (DDI_FAILURE);
224 	}
225 }
226 
227 /* device driver entry points */
228 /*
229  * attach entry point:
230  */
231 /*ARGSUSED*/
232 static int
233 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
234 {
235 	px_t		*px_p;	/* per bus state pointer */
236 	int		instance = DIP_TO_INST(dip);
237 	int		ret = DDI_SUCCESS;
238 	devhandle_t	dev_hdl = NULL;
239 
240 	switch (cmd) {
241 	case DDI_ATTACH:
242 		DBG(DBG_ATTACH, dip, "DDI_ATTACH\n");
243 
244 		/*
245 		 * Allocate and get the per-px soft state structure.
246 		 */
247 		if (ddi_soft_state_zalloc(px_state_p, instance)
248 		    != DDI_SUCCESS) {
249 			cmn_err(CE_WARN, "%s%d: can't allocate px state",
250 			    ddi_driver_name(dip), instance);
251 			goto err_bad_px_softstate;
252 		}
253 		px_p = INST_TO_STATE(instance);
254 		px_p->px_dip = dip;
255 		mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL);
256 		px_p->px_soft_state = PX_SOFT_STATE_CLOSED;
257 		px_p->px_open_count = 0;
258 
259 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
260 		    "device_type", "pciex");
261 
262 		/* Initialize px_dbg for high pil printing */
263 		px_dbg_attach(dip, &px_p->px_dbg_hdl);
264 
265 		/*
266 		 * Get key properties of the pci bridge node and
267 		 * determine it's type (psycho, schizo, etc ...).
268 		 */
269 		if (px_get_props(px_p, dip) == DDI_FAILURE)
270 			goto err_bad_px_prop;
271 
272 		if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS)
273 			goto err_bad_dev_init;
274 
275 		/* Initialize device handle */
276 		px_p->px_dev_hdl = dev_hdl;
277 
278 		/* Cache the BDF of the root port nexus */
279 		px_p->px_bdf = px_lib_get_bdf(px_p);
280 
281 		/*
282 		 * Initialize interrupt block.  Note that this
283 		 * initialize error handling for the PEC as well.
284 		 */
285 		if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS)
286 			goto err_bad_ib;
287 
288 		if (px_cb_attach(px_p) != DDI_SUCCESS)
289 			goto err_bad_cb;
290 
291 		/*
292 		 * Start creating the modules.
293 		 * Note that attach() routines should
294 		 * register and enable their own interrupts.
295 		 */
296 
297 		if ((px_mmu_attach(px_p)) != DDI_SUCCESS)
298 			goto err_bad_mmu;
299 
300 		if ((px_msiq_attach(px_p)) != DDI_SUCCESS)
301 			goto err_bad_msiq;
302 
303 		if ((px_msi_attach(px_p)) != DDI_SUCCESS)
304 			goto err_bad_msi;
305 
306 		if ((px_pec_attach(px_p)) != DDI_SUCCESS)
307 			goto err_bad_pec;
308 
309 		if ((px_dma_attach(px_p)) != DDI_SUCCESS)
310 			goto err_bad_dma; /* nothing to uninitialize on DMA */
311 
312 		if ((px_fm_attach(px_p)) != DDI_SUCCESS)
313 			goto err_bad_dma;
314 
315 		/*
316 		 * All of the error handlers have been registered
317 		 * by now so it's time to activate the interrupt.
318 		 */
319 		if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS)
320 			goto err_bad_intr;
321 
322 		(void) px_init_hotplug(px_p);
323 
324 		(void) px_set_mps(px_p);
325 
326 		/*
327 		 * Create the "devctl" node for hotplug and pcitool support.
328 		 * For non-hotplug bus, we still need ":devctl" to
329 		 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls.
330 		 */
331 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
332 		    PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR),
333 		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
334 			goto err_bad_devctl_node;
335 		}
336 
337 		if (pxtool_init(dip) != DDI_SUCCESS)
338 			goto err_bad_pcitool_node;
339 
340 		/*
341 		 * power management setup. Even if it fails, attach will
342 		 * succeed as this is a optional feature. Since we are
343 		 * always at full power, this is not critical.
344 		 */
345 		if (pwr_common_setup(dip) != DDI_SUCCESS) {
346 			DBG(DBG_PWR, dip, "pwr_common_setup failed\n");
347 		} else if (px_pwr_setup(dip) != DDI_SUCCESS) {
348 			DBG(DBG_PWR, dip, "px_pwr_setup failed \n");
349 			pwr_common_teardown(dip);
350 		}
351 
352 		/*
353 		 * add cpr callback
354 		 */
355 		px_cpr_add_callb(px_p);
356 
357 		ddi_report_dev(dip);
358 
359 		px_p->px_state = PX_ATTACHED;
360 		DBG(DBG_ATTACH, dip, "attach success\n");
361 		break;
362 
363 err_bad_pcitool_node:
364 		ddi_remove_minor_node(dip, "devctl");
365 err_bad_devctl_node:
366 		px_err_rem_intr(&px_p->px_fault);
367 err_bad_intr:
368 		px_fm_detach(px_p);
369 err_bad_dma:
370 		px_pec_detach(px_p);
371 err_bad_pec:
372 		px_msi_detach(px_p);
373 err_bad_msi:
374 		px_msiq_detach(px_p);
375 err_bad_msiq:
376 		px_mmu_detach(px_p);
377 err_bad_mmu:
378 		px_cb_detach(px_p);
379 err_bad_cb:
380 		px_ib_detach(px_p);
381 err_bad_ib:
382 		if (px_lib_dev_fini(dip) != DDI_SUCCESS) {
383 			DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n");
384 		}
385 err_bad_dev_init:
386 		px_free_props(px_p);
387 err_bad_px_prop:
388 		px_dbg_detach(dip, &px_p->px_dbg_hdl);
389 		mutex_destroy(&px_p->px_mutex);
390 		ddi_soft_state_free(px_state_p, instance);
391 err_bad_px_softstate:
392 		ret = DDI_FAILURE;
393 		break;
394 
395 	case DDI_RESUME:
396 		DBG(DBG_ATTACH, dip, "DDI_RESUME\n");
397 
398 		px_p = INST_TO_STATE(instance);
399 
400 		mutex_enter(&px_p->px_mutex);
401 
402 		/* suspend might have not succeeded */
403 		if (px_p->px_state != PX_SUSPENDED) {
404 			DBG(DBG_ATTACH, px_p->px_dip,
405 			    "instance NOT suspended\n");
406 			ret = DDI_FAILURE;
407 			break;
408 		}
409 
410 		px_msiq_resume(px_p);
411 		px_lib_resume(dip);
412 		(void) pcie_pwr_resume(dip);
413 		px_p->px_state = PX_ATTACHED;
414 
415 		mutex_exit(&px_p->px_mutex);
416 
417 		break;
418 	default:
419 		DBG(DBG_ATTACH, dip, "unsupported attach op\n");
420 		ret = DDI_FAILURE;
421 		break;
422 	}
423 
424 	return (ret);
425 }
426 
427 /*
428  * detach entry point:
429  */
430 /*ARGSUSED*/
431 static int
432 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
433 {
434 	int instance = ddi_get_instance(dip);
435 	px_t *px_p = INST_TO_STATE(instance);
436 	int ret;
437 
438 	/*
439 	 * Make sure we are currently attached
440 	 */
441 	if (px_p->px_state != PX_ATTACHED) {
442 		DBG(DBG_DETACH, dip, "Instance not attached\n");
443 		return (DDI_FAILURE);
444 	}
445 
446 	mutex_enter(&px_p->px_mutex);
447 
448 	switch (cmd) {
449 	case DDI_DETACH:
450 		DBG(DBG_DETACH, dip, "DDI_DETACH\n");
451 
452 		/*
453 		 * remove cpr callback
454 		 */
455 		px_cpr_rem_callb(px_p);
456 
457 		if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)
458 			if (px_uninit_hotplug(dip) != DDI_SUCCESS) {
459 				mutex_exit(&px_p->px_mutex);
460 				return (DDI_FAILURE);
461 			}
462 
463 		/*
464 		 * things which used to be done in obj_destroy
465 		 * are now in-lined here.
466 		 */
467 
468 		px_p->px_state = PX_DETACHED;
469 
470 		pxtool_uninit(dip);
471 
472 		ddi_remove_minor_node(dip, "devctl");
473 		px_err_rem_intr(&px_p->px_fault);
474 		px_fm_detach(px_p);
475 		px_pec_detach(px_p);
476 		px_pwr_teardown(dip);
477 		pwr_common_teardown(dip);
478 		px_msi_detach(px_p);
479 		px_msiq_detach(px_p);
480 		px_mmu_detach(px_p);
481 		px_cb_detach(px_p);
482 		px_ib_detach(px_p);
483 		if (px_lib_dev_fini(dip) != DDI_SUCCESS) {
484 			DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n");
485 		}
486 
487 		/*
488 		 * Free the px soft state structure and the rest of the
489 		 * resources it's using.
490 		 */
491 		px_free_props(px_p);
492 		px_dbg_detach(dip, &px_p->px_dbg_hdl);
493 		mutex_exit(&px_p->px_mutex);
494 		mutex_destroy(&px_p->px_mutex);
495 
496 		/* Free the interrupt-priorities prop if we created it. */
497 		{
498 			int len;
499 
500 			if (ddi_getproplen(DDI_DEV_T_ANY, dip,
501 			    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
502 			    "interrupt-priorities", &len) == DDI_PROP_SUCCESS)
503 				(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
504 				    "interrupt-priorities");
505 		}
506 
507 		px_p->px_dev_hdl = NULL;
508 		ddi_soft_state_free(px_state_p, instance);
509 
510 		return (DDI_SUCCESS);
511 
512 	case DDI_SUSPEND:
513 		if (pcie_pwr_suspend(dip) != DDI_SUCCESS) {
514 			mutex_exit(&px_p->px_mutex);
515 			return (DDI_FAILURE);
516 		}
517 		if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS)
518 			px_p->px_state = PX_SUSPENDED;
519 		mutex_exit(&px_p->px_mutex);
520 
521 		return (ret);
522 
523 	default:
524 		DBG(DBG_DETACH, dip, "unsupported detach op\n");
525 		mutex_exit(&px_p->px_mutex);
526 		return (DDI_FAILURE);
527 	}
528 }
529 
530 int
531 px_cb_attach(px_t *px_p)
532 {
533 	px_fault_t	*fault_p = &px_p->px_cb_fault;
534 	dev_info_t	*dip = px_p->px_dip;
535 	sysino_t	sysino;
536 
537 	if (px_lib_intr_devino_to_sysino(dip,
538 	    px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS)
539 		return (DDI_FAILURE);
540 
541 	fault_p->px_fh_dip = dip;
542 	fault_p->px_fh_sysino = sysino;
543 	fault_p->px_err_func = px_err_cb_intr;
544 	fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC];
545 
546 	return (px_cb_add_intr(fault_p));
547 }
548 
549 void
550 px_cb_detach(px_t *px_p)
551 {
552 	px_cb_rem_intr(&px_p->px_cb_fault);
553 }
554 
555 /*
556  * power management related initialization specific to px
557  * called by px_attach()
558  */
559 static int
560 px_pwr_setup(dev_info_t *dip)
561 {
562 	pcie_pwr_t *pwr_p;
563 	int instance = ddi_get_instance(dip);
564 	px_t *px_p = INST_TO_STATE(instance);
565 	ddi_intr_handle_impl_t hdl;
566 
567 	ASSERT(PCIE_PMINFO(dip));
568 	pwr_p = PCIE_NEXUS_PMINFO(dip);
569 	ASSERT(pwr_p);
570 
571 	/*
572 	 * indicate support LDI (Layered Driver Interface)
573 	 * Create the property, if it is not already there
574 	 */
575 	if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
576 	    DDI_KERNEL_IOCTL)) {
577 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
578 		    DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) {
579 			DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n");
580 			return (DDI_FAILURE);
581 		}
582 	}
583 	/* No support for device PM. We are always at full power */
584 	pwr_p->pwr_func_lvl = PM_LEVEL_D0;
585 
586 	mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER,
587 	    DDI_INTR_PRI(px_pwr_pil));
588 	cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL);
589 
590 	/* Initialize handle */
591 	bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
592 	hdl.ih_cb_arg1 = px_p;
593 	hdl.ih_ver = DDI_INTR_VERSION;
594 	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
595 	hdl.ih_dip = dip;
596 	hdl.ih_pri = px_pwr_pil;
597 
598 	/* Add PME_TO_ACK message handler */
599 	hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr;
600 	if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC,
601 	    (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) {
602 		DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add "
603 		    " PME_TO_ACK intr\n");
604 		goto pwr_setup_err1;
605 	}
606 	px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id);
607 	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID);
608 
609 	if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
610 	    px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil,
611 	    PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) {
612 		DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt"
613 		    " state failed\n");
614 		goto px_pwrsetup_err_state;
615 	}
616 
617 	return (DDI_SUCCESS);
618 
619 px_pwrsetup_err_state:
620 	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
621 	(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
622 	    px_p->px_pm_msiq_id);
623 pwr_setup_err1:
624 	mutex_destroy(&px_p->px_l23ready_lock);
625 	cv_destroy(&px_p->px_l23ready_cv);
626 
627 	return (DDI_FAILURE);
628 }
629 
630 /*
631  * undo whatever is done in px_pwr_setup. called by px_detach()
632  */
633 static void
634 px_pwr_teardown(dev_info_t *dip)
635 {
636 	int instance = ddi_get_instance(dip);
637 	px_t *px_p = INST_TO_STATE(instance);
638 	ddi_intr_handle_impl_t	hdl;
639 
640 	if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip))
641 		return;
642 
643 	/* Initialize handle */
644 	bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
645 	hdl.ih_ver = DDI_INTR_VERSION;
646 	hdl.ih_state = DDI_IHDL_STATE_ALLOC;
647 	hdl.ih_dip = dip;
648 	hdl.ih_pri = px_pwr_pil;
649 
650 	px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
651 	(void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
652 	    px_p->px_pm_msiq_id);
653 
654 	(void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
655 	    px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil,
656 	    PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG);
657 
658 	px_p->px_pm_msiq_id = (msiqid_t)-1;
659 
660 	cv_destroy(&px_p->px_l23ready_cv);
661 	mutex_destroy(&px_p->px_l23ready_lock);
662 }
663 
664 /* bus driver entry points */
665 
666 /*
667  * bus map entry point:
668  *
669  * 	if map request is for an rnumber
670  *		get the corresponding regspec from device node
671  * 	build a new regspec in our parent's format
672  *	build a new map_req with the new regspec
673  *	call up the tree to complete the mapping
674  */
675 int
676 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
677 	off_t off, off_t len, caddr_t *addrp)
678 {
679 	px_t *px_p = DIP_TO_STATE(dip);
680 	struct regspec p_regspec;
681 	ddi_map_req_t p_mapreq;
682 	int reglen, rval, r_no;
683 	pci_regspec_t reloc_reg, *rp = &reloc_reg;
684 
685 	DBG(DBG_MAP, dip, "rdip=%s%d:",
686 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
687 
688 	if (mp->map_flags & DDI_MF_USER_MAPPING)
689 		return (DDI_ME_UNIMPLEMENTED);
690 
691 	switch (mp->map_type) {
692 	case DDI_MT_REGSPEC:
693 		reloc_reg = *(pci_regspec_t *)mp->map_obj.rp;	/* dup whole */
694 		break;
695 
696 	case DDI_MT_RNUMBER:
697 		r_no = mp->map_obj.rnumber;
698 		DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no);
699 
700 		if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
701 		    "reg", (caddr_t)&rp, &reglen) != DDI_SUCCESS)
702 			return (DDI_ME_RNUMBER_RANGE);
703 
704 		if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) {
705 			kmem_free(rp, reglen);
706 			return (DDI_ME_RNUMBER_RANGE);
707 		}
708 		rp += r_no;
709 		break;
710 
711 	default:
712 		return (DDI_ME_INVAL);
713 	}
714 	DBG(DBG_MAP | DBG_CONT, dip, "\n");
715 
716 	if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) {
717 		/*
718 		 * There may be a need to differentiate between PCI
719 		 * and PCI-Ex devices so the following range check is
720 		 * done correctly, depending on the implementation of
721 		 * px_pci bridge nexus driver.
722 		 */
723 		if ((off >= PCIE_CONF_HDR_SIZE) ||
724 		    (len > PCIE_CONF_HDR_SIZE) ||
725 		    (off + len > PCIE_CONF_HDR_SIZE))
726 			return (DDI_ME_INVAL);
727 		/*
728 		 * the following function returning a DDI_FAILURE assumes
729 		 * that there are no virtual config space access services
730 		 * defined in this layer. Otherwise it is availed right
731 		 * here and we return.
732 		 */
733 		rval = px_lib_map_vconfig(dip, mp, off, rp, addrp);
734 		if (rval == DDI_SUCCESS)
735 			goto done;
736 	}
737 
738 	/*
739 	 * No virtual config space services or we are mapping
740 	 * a region of memory mapped config/IO/memory space, so proceed
741 	 * to the parent.
742 	 */
743 
744 	/* relocate within 64-bit pci space through "assigned-addresses" */
745 	if (rval = px_reloc_reg(dip, rdip, px_p, rp))
746 		goto done;
747 
748 	if (len)	/* adjust regspec according to mapping request */
749 		rp->pci_size_low = len;	/* MIN ? */
750 	rp->pci_phys_low += off;
751 
752 	/* translate relocated pci regspec into parent space through "ranges" */
753 	if (rval = px_xlate_reg(px_p, rp, &p_regspec))
754 		goto done;
755 
756 	p_mapreq = *mp;		/* dup the whole structure */
757 	p_mapreq.map_type = DDI_MT_REGSPEC;
758 	p_mapreq.map_obj.rp = &p_regspec;
759 	px_lib_map_attr_check(&p_mapreq);
760 	rval = ddi_map(dip, &p_mapreq, 0, 0, addrp);
761 
762 	if (rval == DDI_SUCCESS) {
763 		/*
764 		 * Set-up access functions for FM access error capable drivers.
765 		 */
766 		if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)))
767 			px_fm_acc_setup(mp, rdip, rp);
768 	}
769 
770 done:
771 	if (mp->map_type == DDI_MT_RNUMBER)
772 		kmem_free(rp - r_no, reglen);
773 
774 	return (rval);
775 }
776 
777 /*
778  * bus dma map entry point
779  * return value:
780  *	DDI_DMA_PARTIAL_MAP	 1
781  *	DDI_DMA_MAPOK		 0
782  *	DDI_DMA_MAPPED		 0
783  *	DDI_DMA_NORESOURCES	-1
784  *	DDI_DMA_NOMAPPING	-2
785  *	DDI_DMA_TOOBIG		-3
786  */
787 int
788 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq,
789 	ddi_dma_handle_t *handlep)
790 {
791 	px_t *px_p = DIP_TO_STATE(dip);
792 	px_mmu_t *mmu_p = px_p->px_mmu_p;
793 	ddi_dma_impl_t *mp;
794 	int ret;
795 
796 	DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n",
797 	    ddi_driver_name(rdip), ddi_get_instance(rdip),
798 	    handlep ? "alloc" : "advisory");
799 
800 	if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq)))
801 		return (DDI_DMA_NORESOURCES);
802 	if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING)
803 		return (DDI_DMA_NOMAPPING);
804 	if (ret = px_dma_type(px_p, dmareq, mp))
805 		goto freehandle;
806 	if (ret = px_dma_pfn(px_p, dmareq, mp))
807 		goto freehandle;
808 
809 	switch (PX_DMA_TYPE(mp)) {
810 	case PX_DMAI_FLAGS_DVMA:	/* LINTED E_EQUALITY_NOT_ASSIGNMENT */
811 		if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep)
812 			goto freehandle;
813 		if (!PX_DMA_CANCACHE(mp)) {	/* try fast track */
814 			if (PX_DMA_CANFAST(mp)) {
815 				if (!px_dvma_map_fast(mmu_p, mp))
816 					break;
817 			/* LINTED E_NOP_ELSE_STMT */
818 			} else {
819 				PX_DVMA_FASTTRAK_PROF(mp);
820 			}
821 		}
822 		if (ret = px_dvma_map(mp, dmareq, mmu_p))
823 			goto freehandle;
824 		break;
825 	case PX_DMAI_FLAGS_PTP:	/* LINTED E_EQUALITY_NOT_ASSIGNMENT */
826 		if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep)
827 			goto freehandle;
828 		break;
829 	case PX_DMAI_FLAGS_BYPASS:
830 	default:
831 		cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x",
832 		    ddi_driver_name(rdip), ddi_get_instance(rdip),
833 		    PX_DMA_TYPE(mp));
834 		/*NOTREACHED*/
835 	}
836 	*handlep = (ddi_dma_handle_t)mp;
837 	mp->dmai_flags |= PX_DMAI_FLAGS_INUSE;
838 	px_dump_dma_handle(DBG_DMA_MAP, dip, mp);
839 
840 	return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP);
841 freehandle:
842 	if (ret == DDI_DMA_NORESOURCES)
843 		px_dma_freemp(mp); /* don't run_callback() */
844 	else
845 		(void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp);
846 	return (ret);
847 }
848 
849 
850 /*
851  * bus dma alloc handle entry point:
852  */
853 int
854 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
855 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
856 {
857 	px_t *px_p = DIP_TO_STATE(dip);
858 	ddi_dma_impl_t *mp;
859 	int rval;
860 
861 	DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n",
862 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
863 
864 	if (attrp->dma_attr_version != DMA_ATTR_V0)
865 		return (DDI_DMA_BADATTR);
866 
867 	if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg)))
868 		return (DDI_DMA_NORESOURCES);
869 
870 	/*
871 	 * Save requestor's information
872 	 */
873 	mp->dmai_attr	= *attrp; /* whole object - augmented later  */
874 	*PX_DEV_ATTR(mp)	= *attrp; /* whole object - device orig attr */
875 	DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp);
876 
877 	/* check and convert dma attributes to handle parameters */
878 	if (rval = px_dma_attr2hdl(px_p, mp)) {
879 		px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp);
880 		*handlep = NULL;
881 		return (rval);
882 	}
883 	*handlep = (ddi_dma_handle_t)mp;
884 	return (DDI_SUCCESS);
885 }
886 
887 
888 /*
889  * bus dma free handle entry point:
890  */
891 /*ARGSUSED*/
892 int
893 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
894 {
895 	DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n",
896 	    ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
897 	px_dma_freemp((ddi_dma_impl_t *)handle);
898 
899 	if (px_kmem_clid) {
900 		DBG(DBG_DMA_FREEH, dip, "run handle callback\n");
901 		ddi_run_callback(&px_kmem_clid);
902 	}
903 	return (DDI_SUCCESS);
904 }
905 
906 
907 /*
908  * bus dma bind handle entry point:
909  */
910 int
911 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
912 	ddi_dma_handle_t handle, ddi_dma_req_t *dmareq,
913 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
914 {
915 	px_t *px_p = DIP_TO_STATE(dip);
916 	px_mmu_t *mmu_p = px_p->px_mmu_p;
917 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
918 	int ret;
919 
920 	DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n",
921 	    ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq);
922 
923 	if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE)
924 		return (DDI_DMA_INUSE);
925 
926 	ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0);
927 	mp->dmai_flags |= PX_DMAI_FLAGS_INUSE;
928 
929 	if (ret = px_dma_type(px_p, dmareq, mp))
930 		goto err;
931 	if (ret = px_dma_pfn(px_p, dmareq, mp))
932 		goto err;
933 
934 	switch (PX_DMA_TYPE(mp)) {
935 	case PX_DMAI_FLAGS_DVMA:
936 		if (ret = px_dvma_win(px_p, dmareq, mp))
937 			goto map_err;
938 		if (!PX_DMA_CANCACHE(mp)) {	/* try fast track */
939 			if (PX_DMA_CANFAST(mp)) {
940 				if (!px_dvma_map_fast(mmu_p, mp))
941 					goto mapped; /*LINTED E_NOP_ELSE_STMT*/
942 			} else {
943 				PX_DVMA_FASTTRAK_PROF(mp);
944 			}
945 		}
946 		if (ret = px_dvma_map(mp, dmareq, mmu_p))
947 			goto map_err;
948 mapped:
949 		*ccountp = 1;
950 		MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size);
951 		break;
952 	case PX_DMAI_FLAGS_BYPASS:
953 	case PX_DMAI_FLAGS_PTP:
954 		if (ret = px_dma_physwin(px_p, dmareq, mp))
955 			goto map_err;
956 		*ccountp = PX_WINLST(mp)->win_ncookies;
957 		*cookiep =
958 		    *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */
959 		break;
960 	default:
961 		cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type",
962 		    ddi_driver_name(rdip), ddi_get_instance(rdip), mp);
963 		/*NOTREACHED*/
964 	}
965 	DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n",
966 	    cookiep->dmac_address, cookiep->dmac_size);
967 	px_dump_dma_handle(DBG_DMA_MAP, dip, mp);
968 
969 	/* insert dma handle into FMA cache */
970 	if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
971 		(void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL);
972 		mp->dmai_error.err_cf = px_err_dma_hdl_check;
973 	}
974 
975 	return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP);
976 map_err:
977 	px_dma_freepfn(mp);
978 err:
979 	mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE;
980 	return (ret);
981 }
982 
983 
984 /*
985  * bus dma unbind handle entry point:
986  */
987 /*ARGSUSED*/
988 int
989 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
990 {
991 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
992 	px_t *px_p = DIP_TO_STATE(dip);
993 	px_mmu_t *mmu_p = px_p->px_mmu_p;
994 
995 	DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n",
996 	    ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
997 	if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) {
998 		DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n");
999 		return (DDI_FAILURE);
1000 	}
1001 
1002 	/* remove dma handle from FMA cache */
1003 	if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) {
1004 		if (DEVI(rdip)->devi_fmhdl != NULL &&
1005 		    DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) {
1006 			(void) ndi_fmc_remove(rdip, DMA_HANDLE, mp);
1007 		}
1008 	}
1009 
1010 	/*
1011 	 * Here if the handle is using the iommu.  Unload all the iommu
1012 	 * translations.
1013 	 */
1014 	switch (PX_DMA_TYPE(mp)) {
1015 	case PX_DMAI_FLAGS_DVMA:
1016 		px_mmu_unmap_window(mmu_p, mp);
1017 		px_dvma_unmap(mmu_p, mp);
1018 		px_dma_freepfn(mp);
1019 		break;
1020 	case PX_DMAI_FLAGS_BYPASS:
1021 	case PX_DMAI_FLAGS_PTP:
1022 		px_dma_freewin(mp);
1023 		break;
1024 	default:
1025 		cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p",
1026 		    ddi_driver_name(rdip), ddi_get_instance(rdip), mp);
1027 		/*NOTREACHED*/
1028 	}
1029 	if (mmu_p->mmu_dvma_clid != 0) {
1030 		DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n");
1031 		ddi_run_callback(&mmu_p->mmu_dvma_clid);
1032 	}
1033 	if (px_kmem_clid) {
1034 		DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n");
1035 		ddi_run_callback(&px_kmem_clid);
1036 	}
1037 	mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE;
1038 
1039 	return (DDI_SUCCESS);
1040 }
1041 
1042 /*
1043  * bus dma win entry point:
1044  */
1045 int
1046 px_dma_win(dev_info_t *dip, dev_info_t *rdip,
1047 	ddi_dma_handle_t handle, uint_t win, off_t *offp,
1048 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1049 {
1050 	ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)handle;
1051 	int		ret;
1052 
1053 	DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n",
1054 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
1055 
1056 	px_dump_dma_handle(DBG_DMA_WIN, dip, mp);
1057 	if (win >= mp->dmai_nwin) {
1058 		DBG(DBG_DMA_WIN, dip, "%x out of range\n", win);
1059 		return (DDI_FAILURE);
1060 	}
1061 
1062 	switch (PX_DMA_TYPE(mp)) {
1063 	case PX_DMAI_FLAGS_DVMA:
1064 		if (win != PX_DMA_CURWIN(mp)) {
1065 			px_t *px_p = DIP_TO_STATE(dip);
1066 			px_mmu_t *mmu_p = px_p->px_mmu_p;
1067 			px_mmu_unmap_window(mmu_p, mp);
1068 
1069 			/* map_window sets dmai_mapping/size/offset */
1070 			px_mmu_map_window(mmu_p, mp, win);
1071 			if ((ret = px_mmu_map_window(mmu_p,
1072 			    mp, win)) != DDI_SUCCESS)
1073 				return (ret);
1074 		}
1075 		if (cookiep)
1076 			MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping,
1077 			    mp->dmai_size);
1078 		if (ccountp)
1079 			*ccountp = 1;
1080 		break;
1081 	case PX_DMAI_FLAGS_PTP:
1082 	case PX_DMAI_FLAGS_BYPASS: {
1083 		int i;
1084 		ddi_dma_cookie_t *ck_p;
1085 		px_dma_win_t *win_p = mp->dmai_winlst;
1086 
1087 		for (i = 0; i < win; win_p = win_p->win_next, i++) {};
1088 		ck_p = (ddi_dma_cookie_t *)(win_p + 1);
1089 		*cookiep = *ck_p;
1090 		mp->dmai_offset = win_p->win_offset;
1091 		mp->dmai_size   = win_p->win_size;
1092 		mp->dmai_mapping = ck_p->dmac_laddress;
1093 		mp->dmai_cookie = ck_p + 1;
1094 		win_p->win_curseg = 0;
1095 		if (ccountp)
1096 			*ccountp = win_p->win_ncookies;
1097 		}
1098 		break;
1099 	default:
1100 		cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x",
1101 		    ddi_driver_name(rdip), ddi_get_instance(rdip),
1102 		    PX_DMA_TYPE(mp));
1103 		return (DDI_FAILURE);
1104 	}
1105 	if (cookiep)
1106 		DBG(DBG_DMA_WIN, dip,
1107 		    "cookie - dmac_address=%x dmac_size=%x\n",
1108 		    cookiep->dmac_address, cookiep->dmac_size);
1109 	if (offp)
1110 		*offp = (off_t)mp->dmai_offset;
1111 	if (lenp)
1112 		*lenp = mp->dmai_size;
1113 	return (DDI_SUCCESS);
1114 }
1115 
1116 #ifdef	DEBUG
1117 static char *px_dmactl_str[] = {
1118 	"DDI_DMA_FREE",
1119 	"DDI_DMA_SYNC",
1120 	"DDI_DMA_HTOC",
1121 	"DDI_DMA_KVADDR",
1122 	"DDI_DMA_MOVWIN",
1123 	"DDI_DMA_REPWIN",
1124 	"DDI_DMA_GETERR",
1125 	"DDI_DMA_COFF",
1126 	"DDI_DMA_NEXTWIN",
1127 	"DDI_DMA_NEXTSEG",
1128 	"DDI_DMA_SEGTOC",
1129 	"DDI_DMA_RESERVE",
1130 	"DDI_DMA_RELEASE",
1131 	"DDI_DMA_RESETH",
1132 	"DDI_DMA_CKSYNC",
1133 	"DDI_DMA_IOPB_ALLOC",
1134 	"DDI_DMA_IOPB_FREE",
1135 	"DDI_DMA_SMEM_ALLOC",
1136 	"DDI_DMA_SMEM_FREE",
1137 	"DDI_DMA_SET_SBUS64"
1138 };
1139 #endif	/* DEBUG */
1140 
1141 /*
1142  * bus dma control entry point:
1143  */
1144 /*ARGSUSED*/
1145 int
1146 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1147 	enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp,
1148 	uint_t cache_flags)
1149 {
1150 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1151 
1152 #ifdef	DEBUG
1153 	DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd],
1154 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
1155 #endif	/* DEBUG */
1156 
1157 	switch (cmd) {
1158 	case DDI_DMA_FREE:
1159 		(void) px_dma_unbindhdl(dip, rdip, handle);
1160 		(void) px_dma_freehdl(dip, rdip, handle);
1161 		return (DDI_SUCCESS);
1162 	case DDI_DMA_RESERVE: {
1163 		px_t *px_p = DIP_TO_STATE(dip);
1164 		return (px_fdvma_reserve(dip, rdip, px_p,
1165 		    (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp));
1166 		}
1167 	case DDI_DMA_RELEASE: {
1168 		px_t *px_p = DIP_TO_STATE(dip);
1169 		return (px_fdvma_release(dip, px_p, mp));
1170 		}
1171 	default:
1172 		break;
1173 	}
1174 
1175 	switch (PX_DMA_TYPE(mp)) {
1176 	case PX_DMAI_FLAGS_DVMA:
1177 		return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp,
1178 		    cache_flags));
1179 	case PX_DMAI_FLAGS_PTP:
1180 	case PX_DMAI_FLAGS_BYPASS:
1181 		return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp,
1182 		    cache_flags));
1183 	default:
1184 		cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x",
1185 		    ddi_driver_name(rdip), ddi_get_instance(rdip), cmd,
1186 		    mp->dmai_flags);
1187 		/*NOTREACHED*/
1188 	}
1189 	return (0);
1190 }
1191 
1192 /*
1193  * control ops entry point:
1194  *
1195  * Requests handled completely:
1196  *	DDI_CTLOPS_INITCHILD	see init_child() for details
1197  *	DDI_CTLOPS_UNINITCHILD
1198  *	DDI_CTLOPS_REPORTDEV	see report_dev() for details
1199  *	DDI_CTLOPS_IOMIN	cache line size if streaming otherwise 1
1200  *	DDI_CTLOPS_REGSIZE
1201  *	DDI_CTLOPS_NREGS
1202  *	DDI_CTLOPS_DVMAPAGESIZE
1203  *	DDI_CTLOPS_POKE
1204  *	DDI_CTLOPS_PEEK
1205  *
1206  * All others passed to parent.
1207  */
1208 int
1209 px_ctlops(dev_info_t *dip, dev_info_t *rdip,
1210 	ddi_ctl_enum_t op, void *arg, void *result)
1211 {
1212 	px_t *px_p = DIP_TO_STATE(dip);
1213 	struct detachspec *ds;
1214 	struct attachspec *as;
1215 
1216 	switch (op) {
1217 	case DDI_CTLOPS_INITCHILD:
1218 		return (px_init_child(px_p, (dev_info_t *)arg));
1219 
1220 	case DDI_CTLOPS_UNINITCHILD:
1221 		return (px_uninit_child(px_p, (dev_info_t *)arg));
1222 
1223 	case DDI_CTLOPS_ATTACH:
1224 		if (!pcie_is_child(dip, rdip))
1225 			return (DDI_SUCCESS);
1226 
1227 		as = (struct attachspec *)arg;
1228 		switch (as->when) {
1229 		case DDI_PRE:
1230 			if (as->cmd == DDI_ATTACH) {
1231 				DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n",
1232 				    ddi_driver_name(rdip),
1233 				    ddi_get_instance(rdip));
1234 				return (pcie_pm_hold(dip));
1235 			}
1236 			if (as->cmd == DDI_RESUME) {
1237 				DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n",
1238 				    ddi_driver_name(rdip),
1239 				    ddi_get_instance(rdip));
1240 
1241 				pcie_clear_errors(rdip);
1242 			}
1243 			return (DDI_SUCCESS);
1244 
1245 		case DDI_POST:
1246 			DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n",
1247 			    ddi_driver_name(rdip), ddi_get_instance(rdip));
1248 			if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS)
1249 				pcie_pm_release(dip);
1250 
1251 			if (as->result == DDI_SUCCESS)
1252 				pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd);
1253 
1254 			(void) pcie_postattach_child(rdip);
1255 
1256 			return (DDI_SUCCESS);
1257 		default:
1258 			break;
1259 		}
1260 		break;
1261 
1262 	case DDI_CTLOPS_DETACH:
1263 		if (!pcie_is_child(dip, rdip))
1264 			return (DDI_SUCCESS);
1265 
1266 		ds = (struct detachspec *)arg;
1267 		switch (ds->when) {
1268 		case DDI_POST:
1269 			if (ds->cmd == DDI_DETACH &&
1270 			    ds->result == DDI_SUCCESS) {
1271 				DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n",
1272 				    ddi_driver_name(rdip),
1273 				    ddi_get_instance(rdip));
1274 				return (pcie_pm_remove_child(dip, rdip));
1275 			}
1276 			return (DDI_SUCCESS);
1277 		case DDI_PRE:
1278 			pf_fini(rdip, ds->cmd);
1279 			return (DDI_SUCCESS);
1280 		default:
1281 			break;
1282 		}
1283 		break;
1284 
1285 	case DDI_CTLOPS_REPORTDEV:
1286 		return (px_report_dev(rdip));
1287 
1288 	case DDI_CTLOPS_IOMIN:
1289 		return (DDI_SUCCESS);
1290 
1291 	case DDI_CTLOPS_REGSIZE:
1292 		*((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg));
1293 		return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS);
1294 
1295 	case DDI_CTLOPS_NREGS:
1296 		*((uint_t *)result) = px_get_nreg_set(rdip);
1297 		return (DDI_SUCCESS);
1298 
1299 	case DDI_CTLOPS_DVMAPAGESIZE:
1300 		*((ulong_t *)result) = MMU_PAGE_SIZE;
1301 		return (DDI_SUCCESS);
1302 
1303 	case DDI_CTLOPS_POKE:	/* platform dependent implementation. */
1304 		return (px_lib_ctlops_poke(dip, rdip,
1305 		    (peekpoke_ctlops_t *)arg));
1306 
1307 	case DDI_CTLOPS_PEEK:	/* platform dependent implementation. */
1308 		return (px_lib_ctlops_peek(dip, rdip,
1309 		    (peekpoke_ctlops_t *)arg, result));
1310 
1311 	case DDI_CTLOPS_POWER:
1312 	default:
1313 		break;
1314 	}
1315 
1316 	/*
1317 	 * Now pass the request up to our parent.
1318 	 */
1319 	DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n",
1320 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
1321 	return (ddi_ctlops(dip, rdip, op, arg, result));
1322 }
1323 
1324 /* ARGSUSED */
1325 int
1326 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1327     ddi_intr_handle_impl_t *hdlp, void *result)
1328 {
1329 	int	intr_types, ret = DDI_SUCCESS;
1330 
1331 	DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n",
1332 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
1333 
1334 	/* Process DDI_INTROP_SUPPORTED_TYPES request here */
1335 	if (intr_op == DDI_INTROP_SUPPORTED_TYPES) {
1336 		*(int *)result = i_ddi_get_intx_nintrs(rdip) ?
1337 		    DDI_INTR_TYPE_FIXED : 0;
1338 
1339 		if ((pci_msi_get_supported_type(rdip,
1340 		    &intr_types)) == DDI_SUCCESS) {
1341 			/*
1342 			 * Double check supported interrupt types vs.
1343 			 * what the host bridge supports.
1344 			 */
1345 			*(int *)result |= intr_types;
1346 		}
1347 
1348 		return (ret);
1349 	}
1350 
1351 	/*
1352 	 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts.
1353 	 * Return failure if interrupt type is not supported.
1354 	 */
1355 	switch (hdlp->ih_type) {
1356 	case DDI_INTR_TYPE_FIXED:
1357 		ret = px_intx_ops(dip, rdip, intr_op, hdlp, result);
1358 		break;
1359 	case DDI_INTR_TYPE_MSI:
1360 	case DDI_INTR_TYPE_MSIX:
1361 		ret = px_msix_ops(dip, rdip, intr_op, hdlp, result);
1362 		break;
1363 	default:
1364 		ret = DDI_ENOTSUP;
1365 		break;
1366 	}
1367 
1368 	return (ret);
1369 }
1370 
1371 static int
1372 px_init_hotplug(px_t *px_p)
1373 {
1374 	px_bus_range_t bus_range;
1375 	dev_info_t *dip;
1376 	pciehpc_regops_t regops;
1377 
1378 	dip = px_p->px_dip;
1379 
1380 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1381 	    "hotplug-capable") == 0)
1382 		return (DDI_FAILURE);
1383 
1384 	/*
1385 	 * Before initializing hotplug - open up bus range.  The busra
1386 	 * module will initialize its pool of bus numbers from this.
1387 	 * "busra" will be the agent that keeps track of them during
1388 	 * hotplug.  Also, note, that busra will remove any bus numbers
1389 	 * already in use from boot time.
1390 	 */
1391 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1392 	    "bus-range") == 0) {
1393 		cmn_err(CE_WARN, "%s%d: bus-range not found\n",
1394 		    ddi_driver_name(dip), ddi_get_instance(dip));
1395 #ifdef	DEBUG
1396 		bus_range.lo = 0x0;
1397 		bus_range.hi = 0xff;
1398 
1399 		if (ndi_prop_update_int_array(DDI_DEV_T_NONE,
1400 		    dip, "bus-range", (int *)&bus_range, 2)
1401 		    != DDI_PROP_SUCCESS) {
1402 			return (DDI_FAILURE);
1403 		}
1404 #else
1405 		return (DDI_FAILURE);
1406 #endif
1407 	}
1408 
1409 	if (px_lib_hotplug_init(dip, (void *)&regops) != DDI_SUCCESS)
1410 		return (DDI_FAILURE);
1411 
1412 	if (pciehpc_init(dip, &regops) != DDI_SUCCESS) {
1413 		px_lib_hotplug_uninit(dip);
1414 		return (DDI_FAILURE);
1415 	}
1416 
1417 	if (pcihp_init(dip) != DDI_SUCCESS) {
1418 		(void) pciehpc_uninit(dip);
1419 		px_lib_hotplug_uninit(dip);
1420 		return (DDI_FAILURE);
1421 	}
1422 
1423 	if (pcihp_get_cb_ops() != NULL) {
1424 		DBG(DBG_ATTACH, dip, "%s%d hotplug enabled",
1425 		    ddi_driver_name(dip), ddi_get_instance(dip));
1426 		px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE;
1427 	}
1428 
1429 	return (DDI_SUCCESS);
1430 }
1431 
1432 static int
1433 px_uninit_hotplug(dev_info_t *dip)
1434 {
1435 	if (pcihp_uninit(dip) != DDI_SUCCESS)
1436 		return (DDI_FAILURE);
1437 
1438 	if (pciehpc_uninit(dip) != DDI_SUCCESS)
1439 		return (DDI_FAILURE);
1440 
1441 	px_lib_hotplug_uninit(dip);
1442 
1443 	return (DDI_SUCCESS);
1444 }
1445 
1446 static void
1447 px_set_mps(px_t *px_p)
1448 {
1449 	dev_info_t	*dip;
1450 	pcie_bus_t	*bus_p;
1451 	int		max_supported;
1452 
1453 	dip = px_p->px_dip;
1454 	bus_p = PCIE_DIP2BUS(dip);
1455 
1456 	bus_p->bus_mps = -1;
1457 
1458 	if (pcie_root_port(dip) == DDI_FAILURE) {
1459 		if (px_lib_get_root_complex_mps(px_p, dip,
1460 		    &max_supported) < 0) {
1461 
1462 			DBG(DBG_MPS, dip, "MPS:  Can not get RC MPS\n");
1463 			return;
1464 		}
1465 
1466 		DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n",
1467 		    max_supported);
1468 
1469 		if (pcie_max_mps < max_supported)
1470 			max_supported = pcie_max_mps;
1471 
1472 		(void) pcie_get_fabric_mps(dip, ddi_get_child(dip),
1473 		    &max_supported);
1474 
1475 		bus_p->bus_mps = max_supported;
1476 
1477 		(void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps);
1478 
1479 		DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n",
1480 		    bus_p->bus_mps);
1481 	}
1482 }
1483