xref: /illumos-gate/usr/src/uts/common/io/pciex/pcieb.c (revision d07db889a707792afa6ee57f6361c13f1f3f471f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Common x86 and SPARC PCI-E to PCI bus bridge nexus driver
27  */
28 
29 #include <sys/sysmacros.h>
30 #include <sys/conf.h>
31 #include <sys/kmem.h>
32 #include <sys/debug.h>
33 #include <sys/modctl.h>
34 #include <sys/autoconf.h>
35 #include <sys/ddi_impldefs.h>
36 #include <sys/pci.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/sunndi.h>
40 #include <sys/fm/util.h>
41 #include <sys/pci_cap.h>
42 #include <sys/pci_impl.h>
43 #include <sys/pcie_impl.h>
44 #include <sys/open.h>
45 #include <sys/stat.h>
46 #include <sys/file.h>
47 #include <sys/promif.h>		/* prom_printf */
48 #include <sys/disp.h>
49 #include <sys/pcie_pwr.h>
50 #include <sys/hotplug/pci/pcie_hp.h>
51 #include "pcieb.h"
52 #ifdef PX_PLX
53 #include <io/pciex/pcieb_plx.h>
54 #endif /* PX_PLX */
55 
56 /*LINTLIBRARY*/
57 
58 /* panic flag */
59 int pcieb_die = PF_ERR_FATAL_FLAGS;
60 
61 /* flag to turn on MSI support */
62 int pcieb_enable_msi = 1;
63 
64 #if defined(DEBUG)
65 uint_t pcieb_dbg_print = 0;
66 
67 static char *pcieb_debug_sym [] = {	/* same sequence as pcieb_debug_bit */
68 	/*  0 */ "attach",
69 	/*  1 */ "pwr",
70 	/*  2 */ "intr"
71 };
72 #endif /* DEBUG */
73 
74 static int pcieb_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *, off_t,
75 	off_t, caddr_t *);
76 static int pcieb_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
77 	void *);
78 static int pcieb_fm_init(pcieb_devstate_t *pcieb_p);
79 static void pcieb_fm_fini(pcieb_devstate_t *pcieb_p);
80 static int pcieb_fm_init_child(dev_info_t *dip, dev_info_t *cdip, int cap,
81     ddi_iblock_cookie_t *ibc_p);
82 static int pcieb_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
83 	ddi_dma_attr_t *attr_p, int (*waitfp)(caddr_t), caddr_t arg,
84 	ddi_dma_handle_t *handlep);
85 static int pcieb_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
86 	ddi_dma_handle_t handle, enum ddi_dma_ctlops cmd, off_t *offp,
87 	size_t *lenp, caddr_t *objp, uint_t cache_flags);
88 static int pcieb_intr_ops(dev_info_t *dip, dev_info_t *rdip,
89 	ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
90 
91 static struct bus_ops pcieb_bus_ops = {
92 	BUSO_REV,
93 	pcieb_bus_map,
94 	0,
95 	0,
96 	0,
97 	i_ddi_map_fault,
98 	ddi_dma_map,
99 	pcieb_dma_allochdl,
100 	ddi_dma_freehdl,
101 	ddi_dma_bindhdl,
102 	ddi_dma_unbindhdl,
103 	ddi_dma_flush,
104 	ddi_dma_win,
105 	pcieb_dma_mctl,
106 	pcieb_ctlops,
107 	ddi_bus_prop_op,
108 	ndi_busop_get_eventcookie,	/* (*bus_get_eventcookie)();	*/
109 	ndi_busop_add_eventcall,	/* (*bus_add_eventcall)();	*/
110 	ndi_busop_remove_eventcall,	/* (*bus_remove_eventcall)();	*/
111 	ndi_post_event,			/* (*bus_post_event)();		*/
112 	NULL,				/* (*bus_intr_ctl)();		*/
113 	NULL,				/* (*bus_config)(); 		*/
114 	NULL,				/* (*bus_unconfig)(); 		*/
115 	pcieb_fm_init_child,		/* (*bus_fm_init)(); 		*/
116 	NULL,				/* (*bus_fm_fini)(); 		*/
117 	i_ndi_busop_access_enter,	/* (*bus_fm_access_enter)(); 	*/
118 	i_ndi_busop_access_exit,	/* (*bus_fm_access_exit)(); 	*/
119 	pcie_bus_power,			/* (*bus_power)(); 	*/
120 	pcieb_intr_ops,			/* (*bus_intr_op)(); 		*/
121 	pcie_hp_common_ops		/* (*bus_hp_op)(); 		*/
122 };
123 
124 static int	pcieb_open(dev_t *, int, int, cred_t *);
125 static int	pcieb_close(dev_t, int, int, cred_t *);
126 static int	pcieb_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
127 static int	pcieb_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
128 static uint_t 	pcieb_intr_handler(caddr_t arg1, caddr_t arg2);
129 
130 /* PM related functions */
131 static int	pcieb_pwr_setup(dev_info_t *dip);
132 static int	pcieb_pwr_init_and_raise(dev_info_t *dip, pcie_pwr_t *pwr_p);
133 static void	pcieb_pwr_teardown(dev_info_t *dip);
134 static int	pcieb_pwr_disable(dev_info_t *dip);
135 
136 /* Hotplug related functions */
137 static void pcieb_id_props(pcieb_devstate_t *pcieb);
138 
139 /*
140  * soft state pointer
141  */
142 void *pcieb_state;
143 
144 static struct cb_ops pcieb_cb_ops = {
145 	pcieb_open,			/* open */
146 	pcieb_close,			/* close */
147 	nodev,				/* strategy */
148 	nodev,				/* print */
149 	nodev,				/* dump */
150 	nodev,				/* read */
151 	nodev,				/* write */
152 	pcieb_ioctl,			/* ioctl */
153 	nodev,				/* devmap */
154 	nodev,				/* mmap */
155 	nodev,				/* segmap */
156 	nochpoll,			/* poll */
157 	pcie_prop_op,			/* cb_prop_op */
158 	NULL,				/* streamtab */
159 	D_NEW | D_MP | D_HOTPLUG,	/* Driver compatibility flag */
160 	CB_REV,				/* rev */
161 	nodev,				/* int (*cb_aread)() */
162 	nodev				/* int (*cb_awrite)() */
163 };
164 
165 static int	pcieb_probe(dev_info_t *);
166 static int	pcieb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
167 static int	pcieb_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
168 
169 static struct dev_ops pcieb_ops = {
170 	DEVO_REV,		/* devo_rev */
171 	0,			/* refcnt  */
172 	pcieb_info,		/* info */
173 	nulldev,		/* identify */
174 	pcieb_probe,		/* probe */
175 	pcieb_attach,		/* attach */
176 	pcieb_detach,		/* detach */
177 	nulldev,		/* reset */
178 	&pcieb_cb_ops,		/* driver operations */
179 	&pcieb_bus_ops,		/* bus operations */
180 	pcie_power,		/* power */
181 	ddi_quiesce_not_needed,		/* quiesce */
182 };
183 
184 /*
185  * Module linkage information for the kernel.
186  */
187 
188 static struct modldrv modldrv = {
189 	&mod_driverops, /* Type of module */
190 	"PCIe bridge/switch driver",
191 	&pcieb_ops,	/* driver ops */
192 };
193 
194 static struct modlinkage modlinkage = {
195 	MODREV_1,
196 	(void *)&modldrv,
197 	NULL
198 };
199 
200 /*
201  * forward function declarations:
202  */
203 static void	pcieb_uninitchild(dev_info_t *);
204 static int 	pcieb_initchild(dev_info_t *child);
205 static void	pcieb_create_ranges_prop(dev_info_t *, ddi_acc_handle_t);
206 static boolean_t pcieb_is_pcie_device_type(dev_info_t *dip);
207 
208 /* interrupt related declarations */
209 static int	pcieb_msi_supported(dev_info_t *);
210 static int	pcieb_intr_attach(pcieb_devstate_t *pcieb);
211 static int	pcieb_intr_init(pcieb_devstate_t *pcieb_p, int intr_type);
212 static void	pcieb_intr_fini(pcieb_devstate_t *pcieb_p);
213 
214 int
215 _init(void)
216 {
217 	int e;
218 
219 	if ((e = ddi_soft_state_init(&pcieb_state, sizeof (pcieb_devstate_t),
220 	    1)) == 0 && (e = mod_install(&modlinkage)) != 0)
221 		ddi_soft_state_fini(&pcieb_state);
222 	return (e);
223 }
224 
225 int
226 _fini(void)
227 {
228 	int e;
229 
230 	if ((e = mod_remove(&modlinkage)) == 0) {
231 		ddi_soft_state_fini(&pcieb_state);
232 	}
233 	return (e);
234 }
235 
236 int
237 _info(struct modinfo *modinfop)
238 {
239 	return (mod_info(&modlinkage, modinfop));
240 }
241 
242 /* ARGSUSED */
243 static int
244 pcieb_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
245 {
246 	minor_t		minor = getminor((dev_t)arg);
247 	int		instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
248 	pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state, instance);
249 	int		ret = DDI_SUCCESS;
250 
251 	switch (infocmd) {
252 	case DDI_INFO_DEVT2INSTANCE:
253 		*result = (void *)(intptr_t)instance;
254 		break;
255 	case DDI_INFO_DEVT2DEVINFO:
256 		if (pcieb == NULL) {
257 			ret = DDI_FAILURE;
258 			break;
259 		}
260 
261 		*result = (void *)pcieb->pcieb_dip;
262 		break;
263 	default:
264 		ret = DDI_FAILURE;
265 		break;
266 	}
267 
268 	return (ret);
269 }
270 
271 
272 /*ARGSUSED*/
273 static int
274 pcieb_probe(dev_info_t *devi)
275 {
276 	return (DDI_PROBE_SUCCESS);
277 }
278 
279 static int
280 pcieb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
281 {
282 	int			instance;
283 	char			device_type[8];
284 	pcieb_devstate_t	*pcieb;
285 	pcie_bus_t		*bus_p = PCIE_DIP2UPBUS(devi);
286 	ddi_acc_handle_t	config_handle = bus_p->bus_cfg_hdl;
287 
288 	switch (cmd) {
289 	case DDI_RESUME:
290 		(void) pcie_pwr_resume(devi);
291 		return (DDI_SUCCESS);
292 
293 	default:
294 		return (DDI_FAILURE);
295 
296 	case DDI_ATTACH:
297 		break;
298 	}
299 
300 	if (!(PCIE_IS_BDG(bus_p))) {
301 		PCIEB_DEBUG(DBG_ATTACH, devi, "This is not a switch or"
302 		" bridge\n");
303 		return (DDI_FAILURE);
304 	}
305 
306 	/*
307 	 * If PCIE_LINKCTL_LINK_DISABLE bit in the PCIe Config
308 	 * Space (PCIe Capability Link Control Register) is set,
309 	 * then do not bind the driver.
310 	 */
311 	if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & PCIE_LINKCTL_LINK_DISABLE)
312 		return (DDI_FAILURE);
313 
314 	/*
315 	 * Allocate and get soft state structure.
316 	 */
317 	instance = ddi_get_instance(devi);
318 	if (ddi_soft_state_zalloc(pcieb_state, instance) != DDI_SUCCESS)
319 		return (DDI_FAILURE);
320 	pcieb = ddi_get_soft_state(pcieb_state, instance);
321 	pcieb->pcieb_dip = devi;
322 
323 	if ((pcieb_fm_init(pcieb)) != DDI_SUCCESS) {
324 		PCIEB_DEBUG(DBG_ATTACH, devi, "Failed in pcieb_fm_init\n");
325 		goto fail;
326 	}
327 	pcieb->pcieb_init_flags |= PCIEB_INIT_FM;
328 
329 	mutex_init(&pcieb->pcieb_mutex, NULL, MUTEX_DRIVER, NULL);
330 	mutex_init(&pcieb->pcieb_err_mutex, NULL, MUTEX_DRIVER,
331 	    (void *)pcieb->pcieb_fm_ibc);
332 	mutex_init(&pcieb->pcieb_peek_poke_mutex, NULL, MUTEX_DRIVER,
333 	    (void *)pcieb->pcieb_fm_ibc);
334 
335 	/* create special properties for device identification */
336 	pcieb_id_props(pcieb);
337 
338 	/*
339 	 * Power management setup. This also makes sure that switch/bridge
340 	 * is at D0 during attach.
341 	 */
342 	if (pwr_common_setup(devi) != DDI_SUCCESS) {
343 		PCIEB_DEBUG(DBG_PWR, devi, "pwr_common_setup failed\n");
344 		goto fail;
345 	}
346 
347 	if (pcieb_pwr_setup(devi) != DDI_SUCCESS) {
348 		PCIEB_DEBUG(DBG_PWR, devi, "pxb_pwr_setup failed \n");
349 		goto fail;
350 	}
351 
352 	/*
353 	 * Make sure the "device_type" property exists.
354 	 */
355 	if (pcieb_is_pcie_device_type(devi))
356 		(void) strcpy(device_type, "pciex");
357 	else
358 		(void) strcpy(device_type, "pci");
359 
360 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
361 	    "device_type", device_type);
362 
363 	/*
364 	 * Check whether the "ranges" property is present.
365 	 * Otherwise create the ranges property by reading
366 	 * the configuration registers
367 	 */
368 	if (ddi_prop_exists(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
369 	    "ranges") == 0) {
370 		pcieb_create_ranges_prop(devi, config_handle);
371 	}
372 
373 	if (PCIE_IS_PCI_BDG(bus_p))
374 		pcieb_set_pci_perf_parameters(devi, config_handle);
375 
376 #ifdef PX_PLX
377 	pcieb_attach_plx_workarounds(pcieb);
378 #endif /* PX_PLX */
379 
380 	if (pcie_init(devi, NULL) != DDI_SUCCESS)
381 		goto fail;
382 
383 	/*
384 	 * Initialize interrupt handlers. Ignore return value.
385 	 */
386 	(void) pcieb_intr_attach(pcieb);
387 
388 	(void) pcie_hpintr_enable(devi);
389 
390 	/* Do any platform specific workarounds needed at this time */
391 	pcieb_plat_attach_workaround(devi);
392 
393 	/*
394 	 * If this is a root port, determine and set the max payload size.
395 	 * Since this will involve scanning the fabric, all error enabling
396 	 * and sw workarounds should be in place before doing this.
397 	 */
398 	if (PCIE_IS_RP(bus_p))
399 		pcie_init_root_port_mps(devi);
400 
401 	ddi_report_dev(devi);
402 	return (DDI_SUCCESS);
403 
404 fail:
405 	(void) pcieb_detach(devi, DDI_DETACH);
406 	return (DDI_FAILURE);
407 }
408 
409 static int
410 pcieb_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
411 {
412 	pcieb_devstate_t *pcieb;
413 	int error = DDI_SUCCESS;
414 
415 	switch (cmd) {
416 	case DDI_SUSPEND:
417 		error = pcie_pwr_suspend(devi);
418 		return (error);
419 
420 	case DDI_DETACH:
421 		break;
422 
423 	default:
424 		return (DDI_FAILURE);
425 	}
426 
427 	pcieb = ddi_get_soft_state(pcieb_state, ddi_get_instance(devi));
428 
429 	/* disable hotplug interrupt */
430 	(void) pcie_hpintr_disable(devi);
431 
432 	/* remove interrupt handlers */
433 	pcieb_intr_fini(pcieb);
434 
435 	/* uninitialize inband PCI-E HPC if present */
436 	(void) pcie_uninit(devi);
437 
438 	(void) ddi_prop_remove(DDI_DEV_T_NONE, devi, "device_type");
439 
440 	(void) ndi_prop_remove(DDI_DEV_T_NONE, pcieb->pcieb_dip,
441 	    "pcie_ce_mask");
442 
443 	if (pcieb->pcieb_init_flags & PCIEB_INIT_FM)
444 		pcieb_fm_fini(pcieb);
445 
446 	pcieb_pwr_teardown(devi);
447 	pwr_common_teardown(devi);
448 
449 	mutex_destroy(&pcieb->pcieb_peek_poke_mutex);
450 	mutex_destroy(&pcieb->pcieb_err_mutex);
451 	mutex_destroy(&pcieb->pcieb_mutex);
452 
453 	/*
454 	 * And finally free the per-pci soft state.
455 	 */
456 	ddi_soft_state_free(pcieb_state, ddi_get_instance(devi));
457 
458 	return (DDI_SUCCESS);
459 }
460 
461 static int
462 pcieb_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
463     off_t offset, off_t len, caddr_t *vaddrp)
464 {
465 	dev_info_t *pdip;
466 
467 	if (PCIE_IS_RP(PCIE_DIP2BUS(dip)) && mp->map_handlep != NULL) {
468 		ddi_acc_impl_t *hdlp =
469 		    (ddi_acc_impl_t *)(mp->map_handlep)->ah_platform_private;
470 
471 		pcieb_set_prot_scan(dip, hdlp);
472 	}
473 	pdip = (dev_info_t *)DEVI(dip)->devi_parent;
474 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, rdip, mp,
475 	    offset, len, vaddrp));
476 }
477 
478 static int
479 pcieb_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
480     void *arg, void *result)
481 {
482 	pci_regspec_t *drv_regp;
483 	int	reglen;
484 	int	rn;
485 	int	totreg;
486 	pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
487 	    ddi_get_instance(dip));
488 	struct detachspec *ds;
489 	struct attachspec *as;
490 
491 	switch (ctlop) {
492 	case DDI_CTLOPS_REPORTDEV:
493 		if (rdip == (dev_info_t *)0)
494 			return (DDI_FAILURE);
495 
496 		if (ddi_get_parent(rdip) == dip) {
497 			cmn_err(CE_CONT, "?PCIE-device: %s@%s, %s%d\n",
498 			    ddi_node_name(rdip), ddi_get_name_addr(rdip),
499 			    ddi_driver_name(rdip), ddi_get_instance(rdip));
500 		}
501 
502 		/* Pass it up for fabric sync */
503 		(void) ddi_ctlops(dip, rdip, ctlop, arg, result);
504 		return (DDI_SUCCESS);
505 
506 	case DDI_CTLOPS_INITCHILD:
507 		return (pcieb_initchild((dev_info_t *)arg));
508 
509 	case DDI_CTLOPS_UNINITCHILD:
510 		pcieb_uninitchild((dev_info_t *)arg);
511 		return (DDI_SUCCESS);
512 
513 	case DDI_CTLOPS_SIDDEV:
514 		return (DDI_SUCCESS);
515 
516 	case DDI_CTLOPS_REGSIZE:
517 	case DDI_CTLOPS_NREGS:
518 		if (rdip == (dev_info_t *)0)
519 			return (DDI_FAILURE);
520 		break;
521 
522 	case DDI_CTLOPS_PEEK:
523 	case DDI_CTLOPS_POKE:
524 		return (pcieb_plat_peekpoke(dip, rdip, ctlop, arg, result));
525 	case DDI_CTLOPS_ATTACH:
526 		if (!pcie_is_child(dip, rdip))
527 			return (DDI_SUCCESS);
528 
529 		as = (struct attachspec *)arg;
530 		switch (as->when) {
531 		case DDI_PRE:
532 			if (as->cmd == DDI_RESUME) {
533 				pcie_clear_errors(rdip);
534 				if (pcieb_plat_ctlops(rdip, ctlop, arg) !=
535 				    DDI_SUCCESS)
536 					return (DDI_FAILURE);
537 			}
538 
539 			if (as->cmd == DDI_ATTACH)
540 				return (pcie_pm_hold(dip));
541 
542 			return (DDI_SUCCESS);
543 
544 		case DDI_POST:
545 			if (as->cmd == DDI_ATTACH &&
546 			    as->result != DDI_SUCCESS) {
547 				/*
548 				 * Attach failed for the child device. The child
549 				 * driver may have made PM calls before the
550 				 * attach failed. pcie_pm_remove_child() should
551 				 * cleanup PM state and holds (if any)
552 				 * associated with the child device.
553 				 */
554 				return (pcie_pm_remove_child(dip, rdip));
555 			}
556 
557 			if (as->result == DDI_SUCCESS) {
558 				pf_init(rdip, (void *)pcieb->pcieb_fm_ibc,
559 				    as->cmd);
560 
561 				(void) pcieb_plat_ctlops(rdip, ctlop, arg);
562 			}
563 
564 			/*
565 			 * For empty hotplug-capable slots, we should explicitly
566 			 * disable the errors, so that we won't panic upon
567 			 * unsupported hotplug messages.
568 			 */
569 			if ((!ddi_prop_exists(DDI_DEV_T_ANY, rdip,
570 			    DDI_PROP_DONTPASS, "hotplug-capable")) ||
571 			    ddi_get_child(rdip)) {
572 				(void) pcie_postattach_child(rdip);
573 				return (DDI_SUCCESS);
574 			}
575 
576 			pcie_disable_errors(rdip);
577 
578 			return (DDI_SUCCESS);
579 		default:
580 			break;
581 		}
582 		return (DDI_SUCCESS);
583 
584 	case DDI_CTLOPS_DETACH:
585 		if (!pcie_is_child(dip, rdip))
586 			return (DDI_SUCCESS);
587 
588 		ds = (struct detachspec *)arg;
589 		switch (ds->when) {
590 		case DDI_PRE:
591 			pf_fini(rdip, ds->cmd);
592 			return (DDI_SUCCESS);
593 
594 		case DDI_POST:
595 			if (pcieb_plat_ctlops(rdip, ctlop, arg) != DDI_SUCCESS)
596 				return (DDI_FAILURE);
597 			if (ds->cmd == DDI_DETACH &&
598 			    ds->result == DDI_SUCCESS) {
599 				return (pcie_pm_remove_child(dip, rdip));
600 			}
601 			return (DDI_SUCCESS);
602 		default:
603 			break;
604 		}
605 		return (DDI_SUCCESS);
606 	default:
607 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
608 	}
609 
610 	*(int *)result = 0;
611 	if (ddi_getlongprop(DDI_DEV_T_ANY, rdip,
612 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "reg", (caddr_t)&drv_regp,
613 	    &reglen) != DDI_SUCCESS)
614 		return (DDI_FAILURE);
615 
616 	totreg = reglen / sizeof (pci_regspec_t);
617 	if (ctlop == DDI_CTLOPS_NREGS)
618 		*(int *)result = totreg;
619 	else if (ctlop == DDI_CTLOPS_REGSIZE) {
620 		rn = *(int *)arg;
621 		if (rn >= totreg) {
622 			kmem_free(drv_regp, reglen);
623 			return (DDI_FAILURE);
624 		}
625 
626 		*(off_t *)result = drv_regp[rn].pci_size_low |
627 		    ((uint64_t)drv_regp[rn].pci_size_hi << 32);
628 	}
629 
630 	kmem_free(drv_regp, reglen);
631 	return (DDI_SUCCESS);
632 }
633 
634 /*
635  * name_child
636  *
637  * This function is called from init_child to name a node. It is
638  * also passed as a callback for node merging functions.
639  *
640  * return value: DDI_SUCCESS, DDI_FAILURE
641  */
642 static int
643 pcieb_name_child(dev_info_t *child, char *name, int namelen)
644 {
645 	pci_regspec_t *pci_rp;
646 	uint_t device, func;
647 	char **unit_addr;
648 	uint_t n;
649 
650 	/*
651 	 * For .conf nodes, use unit-address property as name
652 	 */
653 	if (ndi_dev_is_persistent_node(child) == 0) {
654 		if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child,
655 		    DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) !=
656 		    DDI_PROP_SUCCESS) {
657 			cmn_err(CE_WARN,
658 			    "cannot find unit-address in %s.conf",
659 			    ddi_driver_name(child));
660 			return (DDI_FAILURE);
661 		}
662 		if (n != 1 || *unit_addr == NULL || **unit_addr == 0) {
663 			cmn_err(CE_WARN, "unit-address property in %s.conf"
664 			    " not well-formed", ddi_driver_name(child));
665 			ddi_prop_free(unit_addr);
666 			return (DDI_FAILURE);
667 		}
668 		(void) snprintf(name, namelen, "%s", *unit_addr);
669 		ddi_prop_free(unit_addr);
670 		return (DDI_SUCCESS);
671 	}
672 
673 	/*
674 	 * Get the address portion of the node name based on
675 	 * the function and device number.
676 	 */
677 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child,
678 	    DDI_PROP_DONTPASS, "reg", (int **)&pci_rp, &n) != DDI_SUCCESS) {
679 		return (DDI_FAILURE);
680 	}
681 
682 	/* copy the device identifications */
683 	device = PCI_REG_DEV_G(pci_rp[0].pci_phys_hi);
684 	func = PCI_REG_FUNC_G(pci_rp[0].pci_phys_hi);
685 
686 	if (pcie_ari_is_enabled(ddi_get_parent(child))
687 	    == PCIE_ARI_FORW_ENABLED) {
688 		func = (device << 3) | func;
689 		device = 0;
690 	}
691 
692 	if (func != 0)
693 		(void) snprintf(name, namelen, "%x,%x", device, func);
694 	else
695 		(void) snprintf(name, namelen, "%x", device);
696 
697 	ddi_prop_free(pci_rp);
698 	return (DDI_SUCCESS);
699 }
700 
701 static int
702 pcieb_initchild(dev_info_t *child)
703 {
704 	char name[MAXNAMELEN];
705 	int result = DDI_FAILURE;
706 	pcieb_devstate_t *pcieb =
707 	    (pcieb_devstate_t *)ddi_get_soft_state(pcieb_state,
708 	    ddi_get_instance(ddi_get_parent(child)));
709 
710 	/*
711 	 * Name the child
712 	 */
713 	if (pcieb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) {
714 		result = DDI_FAILURE;
715 		goto done;
716 	}
717 	ddi_set_name_addr(child, name);
718 
719 	/*
720 	 * Pseudo nodes indicate a prototype node with per-instance
721 	 * properties to be merged into the real h/w device node.
722 	 * The interpretation of the unit-address is DD[,F]
723 	 * where DD is the device id and F is the function.
724 	 */
725 	if (ndi_dev_is_persistent_node(child) == 0) {
726 		extern int pci_allow_pseudo_children;
727 
728 		/*
729 		 * Try to merge the properties from this prototype
730 		 * node into real h/w nodes.
731 		 */
732 		if (ndi_merge_node(child, pcieb_name_child) == DDI_SUCCESS) {
733 			/*
734 			 * Merged ok - return failure to remove the node.
735 			 */
736 			ddi_set_name_addr(child, NULL);
737 			result = DDI_FAILURE;
738 			goto done;
739 		}
740 
741 		/* workaround for ddivs to run under PCI-E */
742 		if (pci_allow_pseudo_children) {
743 			result = DDI_SUCCESS;
744 			goto done;
745 		}
746 
747 		/*
748 		 * The child was not merged into a h/w node,
749 		 * but there's not much we can do with it other
750 		 * than return failure to cause the node to be removed.
751 		 */
752 		cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
753 		    ddi_driver_name(child), ddi_get_name_addr(child),
754 		    ddi_driver_name(child));
755 		ddi_set_name_addr(child, NULL);
756 		result = DDI_NOT_WELL_FORMED;
757 		goto done;
758 	}
759 
760 	/* platform specific initchild */
761 	pcieb_plat_initchild(child);
762 
763 	if (pcie_pm_hold(pcieb->pcieb_dip) != DDI_SUCCESS) {
764 		PCIEB_DEBUG(DBG_PWR, pcieb->pcieb_dip,
765 		    "INITCHILD: px_pm_hold failed\n");
766 		result = DDI_FAILURE;
767 		goto done;
768 	}
769 	/* Any return from here must call pcie_pm_release */
770 
771 	/*
772 	 * If configuration registers were previously saved by
773 	 * child (before it entered D3), then let the child do the
774 	 * restore to set up the config regs as it'll first need to
775 	 * power the device out of D3.
776 	 */
777 	if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
778 	    "config-regs-saved-by-child") == 1) {
779 		PCIEB_DEBUG(DBG_PWR, ddi_get_parent(child),
780 		    "INITCHILD: config regs to be restored by child"
781 		    " for %s@%s\n", ddi_node_name(child),
782 		    ddi_get_name_addr(child));
783 
784 		result = DDI_SUCCESS;
785 		goto cleanup;
786 	}
787 
788 	PCIEB_DEBUG(DBG_PWR, ddi_get_parent(child),
789 	    "INITCHILD: config regs setup for %s@%s\n",
790 	    ddi_node_name(child), ddi_get_name_addr(child));
791 
792 	pcie_init_dom(child);
793 
794 	if (pcie_initchild(child) != DDI_SUCCESS) {
795 		result = DDI_FAILURE;
796 		pcie_fini_dom(child);
797 		goto cleanup;
798 	}
799 
800 #ifdef PX_PLX
801 	if (pcieb_init_plx_workarounds(pcieb, child) == DDI_FAILURE) {
802 		result = DDI_FAILURE;
803 		pcie_fini_dom(child);
804 		goto cleanup;
805 	}
806 #endif /* PX_PLX */
807 
808 	result = DDI_SUCCESS;
809 cleanup:
810 	pcie_pm_release(pcieb->pcieb_dip);
811 done:
812 	return (result);
813 }
814 
815 static void
816 pcieb_uninitchild(dev_info_t *dip)
817 {
818 
819 	pcie_uninitchild(dip);
820 
821 	pcieb_plat_uninitchild(dip);
822 
823 	ddi_set_name_addr(dip, NULL);
824 
825 	/*
826 	 * Strip the node to properly convert it back to prototype form
827 	 */
828 	ddi_remove_minor_node(dip, NULL);
829 
830 	ddi_prop_remove_all(dip);
831 }
832 
833 static boolean_t
834 pcieb_is_pcie_device_type(dev_info_t *dip)
835 {
836 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
837 
838 	if (PCIE_IS_SW(bus_p) || PCIE_IS_RP(bus_p) || PCIE_IS_PCI2PCIE(bus_p))
839 		return (B_TRUE);
840 
841 	return (B_FALSE);
842 }
843 
844 static int
845 pcieb_intr_attach(pcieb_devstate_t *pcieb)
846 {
847 	int			intr_types;
848 	dev_info_t		*dip = pcieb->pcieb_dip;
849 
850 	/* Allow platform specific code to do any initialization first */
851 	pcieb_plat_intr_attach(pcieb);
852 
853 	/*
854 	 * Initialize interrupt handlers.
855 	 * If both MSI and FIXED are supported, try to attach MSI first.
856 	 * If MSI fails for any reason, then try FIXED, but only allow one
857 	 * type to be attached.
858 	 */
859 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
860 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_get_supported_types"
861 		    " failed\n");
862 		goto FAIL;
863 	}
864 
865 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
866 	    (pcieb_msi_supported(dip) == DDI_SUCCESS)) {
867 		if (pcieb_intr_init(pcieb, DDI_INTR_TYPE_MSI) == DDI_SUCCESS)
868 			intr_types = DDI_INTR_TYPE_MSI;
869 		else {
870 			PCIEB_DEBUG(DBG_ATTACH, dip, "Unable to attach MSI"
871 			    " handler\n");
872 		}
873 	}
874 
875 	if (intr_types != DDI_INTR_TYPE_MSI) {
876 		/*
877 		 * MSIs are not supported or MSI initialization failed. For Root
878 		 * Ports mark this so error handling might try to fallback to
879 		 * some other mechanism if available (machinecheck etc.).
880 		 */
881 		if (PCIE_IS_RP(PCIE_DIP2UPBUS(dip)))
882 			pcieb->pcieb_no_aer_msi = B_TRUE;
883 	}
884 
885 	if (intr_types & DDI_INTR_TYPE_FIXED) {
886 		if (pcieb_intr_init(pcieb, DDI_INTR_TYPE_FIXED) !=
887 		    DDI_SUCCESS) {
888 			PCIEB_DEBUG(DBG_ATTACH, dip,
889 			    "Unable to attach INTx handler\n");
890 			goto FAIL;
891 		}
892 	}
893 	return (DDI_SUCCESS);
894 
895 FAIL:
896 	return (DDI_FAILURE);
897 }
898 
899 /*
900  * This function initializes internally generated interrupts only.
901  * It does not affect any interrupts generated by downstream devices
902  * or the forwarding of them.
903  *
904  * Enable Device Specific Interrupts or Hotplug features here.
905  * Enabling features may change how many interrupts are requested
906  * by the device.  If features are not enabled first, the
907  * device might not ask for any interrupts.
908  */
909 
910 static int
911 pcieb_intr_init(pcieb_devstate_t *pcieb, int intr_type)
912 {
913 	dev_info_t	*dip = pcieb->pcieb_dip;
914 	int		nintrs, request, count, x;
915 	int		intr_cap = 0;
916 	int		inum = 0;
917 	int		ret, hp_msi_off;
918 	pcie_bus_t	*bus_p = PCIE_DIP2UPBUS(dip);
919 	uint16_t	vendorid = bus_p->bus_dev_ven_id & 0xFFFF;
920 	boolean_t	is_hp = B_FALSE;
921 	boolean_t	is_pme = B_FALSE;
922 
923 	PCIEB_DEBUG(DBG_ATTACH, dip, "pcieb_intr_init: Attaching %s handler\n",
924 	    (intr_type == DDI_INTR_TYPE_MSI) ? "MSI" : "INTx");
925 
926 	request = 0;
927 	if (PCIE_IS_HOTPLUG_ENABLED(dip)) {
928 		request++;
929 		is_hp = B_TRUE;
930 	}
931 
932 	/*
933 	 * Hotplug and PME share the same MSI vector. If hotplug is not
934 	 * supported check if MSI is needed for PME.
935 	 */
936 	if ((intr_type == DDI_INTR_TYPE_MSI) && PCIE_IS_RP(bus_p) &&
937 	    (vendorid == NVIDIA_VENDOR_ID)) {
938 		is_pme = B_TRUE;
939 		if (!is_hp)
940 			request++;
941 	}
942 
943 	/*
944 	 * Setup MSI if this device is a Rootport and has AER. Currently no
945 	 * SPARC Root Port supports fabric errors being reported through it.
946 	 */
947 	if (intr_type == DDI_INTR_TYPE_MSI) {
948 		if (PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p))
949 			request++;
950 	}
951 
952 	if (request == 0)
953 		return (DDI_SUCCESS);
954 
955 	/*
956 	 * Get number of supported interrupts.
957 	 *
958 	 * Several Bridges/Switches will not have this property set, resulting
959 	 * in a FAILURE, if the device is not configured in a way that
960 	 * interrupts are needed. (eg. hotplugging)
961 	 */
962 	ret = ddi_intr_get_nintrs(dip, intr_type, &nintrs);
963 	if ((ret != DDI_SUCCESS) || (nintrs == 0)) {
964 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_get_nintrs ret:%d"
965 		    " req:%d\n", ret, nintrs);
966 		return (DDI_FAILURE);
967 	}
968 
969 	PCIEB_DEBUG(DBG_ATTACH, dip, "bdf 0x%x: ddi_intr_get_nintrs: nintrs %d",
970 	    " request %d\n", bus_p->bus_bdf, nintrs, request);
971 
972 	if (request > nintrs)
973 		request = nintrs;
974 
975 	/* Allocate an array of interrupt handlers */
976 	pcieb->pcieb_htable_size = sizeof (ddi_intr_handle_t) * request;
977 	pcieb->pcieb_htable = kmem_zalloc(pcieb->pcieb_htable_size,
978 	    KM_SLEEP);
979 	pcieb->pcieb_init_flags |= PCIEB_INIT_HTABLE;
980 
981 	ret = ddi_intr_alloc(dip, pcieb->pcieb_htable, intr_type, inum,
982 	    request, &count, DDI_INTR_ALLOC_NORMAL);
983 	if ((ret != DDI_SUCCESS) || (count == 0)) {
984 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_alloc() ret: %d ask: %d"
985 		    " actual: %d\n", ret, request, count);
986 		goto FAIL;
987 	}
988 	pcieb->pcieb_init_flags |= PCIEB_INIT_ALLOC;
989 
990 	/* Save the actual number of interrupts allocated */
991 	pcieb->pcieb_intr_count = count;
992 	if (count < request) {
993 		PCIEB_DEBUG(DBG_ATTACH, dip, "bdf 0%x: Requested Intr: %d"
994 		    " Received: %d\n", bus_p->bus_bdf, request, count);
995 	}
996 
997 	/*
998 	 * NVidia (MCP55 and other) chipsets have a errata that if the number
999 	 * of requested MSI intrs is not allocated we have to fall back to INTx.
1000 	 */
1001 	if (intr_type == DDI_INTR_TYPE_MSI) {
1002 		if (PCIE_IS_RP(bus_p) && (vendorid == NVIDIA_VENDOR_ID)) {
1003 			if (request != count)
1004 				goto FAIL;
1005 		}
1006 	}
1007 
1008 	/* Get interrupt priority */
1009 	ret = ddi_intr_get_pri(pcieb->pcieb_htable[0],
1010 	    &pcieb->pcieb_intr_priority);
1011 	if (ret != DDI_SUCCESS) {
1012 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_get_pri() ret: %d\n",
1013 		    ret);
1014 		goto FAIL;
1015 	}
1016 
1017 	if (pcieb->pcieb_intr_priority >= LOCK_LEVEL) {
1018 		pcieb->pcieb_intr_priority = LOCK_LEVEL - 1;
1019 		ret = ddi_intr_set_pri(pcieb->pcieb_htable[0],
1020 		    pcieb->pcieb_intr_priority);
1021 		if (ret != DDI_SUCCESS) {
1022 			PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_set_pri() ret:"
1023 			" %d\n", ret);
1024 
1025 			goto FAIL;
1026 		}
1027 	}
1028 
1029 	mutex_init(&pcieb->pcieb_intr_mutex, NULL, MUTEX_DRIVER, NULL);
1030 
1031 	pcieb->pcieb_init_flags |= PCIEB_INIT_MUTEX;
1032 
1033 	for (count = 0; count < pcieb->pcieb_intr_count; count++) {
1034 		ret = ddi_intr_add_handler(pcieb->pcieb_htable[count],
1035 		    pcieb_intr_handler, (caddr_t)pcieb,
1036 		    (caddr_t)(uintptr_t)(inum + count));
1037 
1038 		if (ret != DDI_SUCCESS) {
1039 			PCIEB_DEBUG(DBG_ATTACH, dip, "Cannot add "
1040 			    "interrupt(%d)\n", ret);
1041 			break;
1042 		}
1043 	}
1044 
1045 	/* If unsucessful, remove the added handlers */
1046 	if (ret != DDI_SUCCESS) {
1047 		for (x = 0; x < count; x++) {
1048 			(void) ddi_intr_remove_handler(pcieb->pcieb_htable[x]);
1049 		}
1050 		goto FAIL;
1051 	}
1052 
1053 	pcieb->pcieb_init_flags |= PCIEB_INIT_HANDLER;
1054 
1055 	(void) ddi_intr_get_cap(pcieb->pcieb_htable[0], &intr_cap);
1056 
1057 	/*
1058 	 * Get this intr lock because we are not quite ready to handle
1059 	 * interrupts immediately after enabling it. The MSI multi register
1060 	 * gets programmed in ddi_intr_enable after which we need to get the
1061 	 * MSI offsets for Hotplug/AER.
1062 	 */
1063 	mutex_enter(&pcieb->pcieb_intr_mutex);
1064 
1065 	if (intr_cap & DDI_INTR_FLAG_BLOCK) {
1066 		(void) ddi_intr_block_enable(pcieb->pcieb_htable,
1067 		    pcieb->pcieb_intr_count);
1068 		pcieb->pcieb_init_flags |= PCIEB_INIT_BLOCK;
1069 	} else {
1070 		for (count = 0; count < pcieb->pcieb_intr_count; count++) {
1071 			(void) ddi_intr_enable(pcieb->pcieb_htable[count]);
1072 		}
1073 	}
1074 	pcieb->pcieb_init_flags |= PCIEB_INIT_ENABLE;
1075 
1076 	/* Save the interrupt type */
1077 	pcieb->pcieb_intr_type = intr_type;
1078 
1079 	/* Get the MSI offset for hotplug/PME from the PCIe cap reg */
1080 	if (intr_type == DDI_INTR_TYPE_MSI) {
1081 		hp_msi_off = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL,
1082 		    bus_p->bus_pcie_off, PCIE_PCIECAP) &
1083 		    PCIE_PCIECAP_INT_MSG_NUM;
1084 
1085 		if (hp_msi_off >= count) {
1086 			PCIEB_DEBUG(DBG_ATTACH, dip, "MSI number %d in PCIe "
1087 			    "cap > max allocated %d\n", hp_msi_off, count);
1088 			mutex_exit(&pcieb->pcieb_intr_mutex);
1089 			goto FAIL;
1090 		}
1091 
1092 		if (is_hp)
1093 			pcieb->pcieb_isr_tab[hp_msi_off] |= PCIEB_INTR_SRC_HP;
1094 
1095 		if (is_pme)
1096 			pcieb->pcieb_isr_tab[hp_msi_off] |= PCIEB_INTR_SRC_PME;
1097 	} else {
1098 		/* INTx handles only Hotplug interrupts */
1099 		if (is_hp)
1100 			pcieb->pcieb_isr_tab[0] |= PCIEB_INTR_SRC_HP;
1101 	}
1102 
1103 
1104 	/*
1105 	 * Get the MSI offset for errors from the AER Root Error status
1106 	 * register.
1107 	 */
1108 	if ((intr_type == DDI_INTR_TYPE_MSI) && PCIE_IS_RP(bus_p)) {
1109 		if (PCIE_HAS_AER(bus_p)) {
1110 			int aer_msi_off;
1111 			aer_msi_off = (PCI_XCAP_GET32(bus_p->bus_cfg_hdl, NULL,
1112 			    bus_p->bus_aer_off, PCIE_AER_RE_STS) >>
1113 			    PCIE_AER_RE_STS_MSG_NUM_SHIFT) &
1114 			    PCIE_AER_RE_STS_MSG_NUM_MASK;
1115 
1116 			if (aer_msi_off >= count) {
1117 				PCIEB_DEBUG(DBG_ATTACH, dip, "MSI number %d in"
1118 				    " AER cap > max allocated %d\n",
1119 				    aer_msi_off, count);
1120 				mutex_exit(&pcieb->pcieb_intr_mutex);
1121 				goto FAIL;
1122 			}
1123 			pcieb->pcieb_isr_tab[aer_msi_off] |= PCIEB_INTR_SRC_AER;
1124 		} else {
1125 			/*
1126 			 * This RP does not have AER. Fallback to the
1127 			 * SERR+Machinecheck approach if available.
1128 			 */
1129 			pcieb->pcieb_no_aer_msi = B_TRUE;
1130 		}
1131 	}
1132 
1133 	mutex_exit(&pcieb->pcieb_intr_mutex);
1134 	return (DDI_SUCCESS);
1135 
1136 FAIL:
1137 	pcieb_intr_fini(pcieb);
1138 	return (DDI_FAILURE);
1139 }
1140 
1141 static void
1142 pcieb_intr_fini(pcieb_devstate_t *pcieb)
1143 {
1144 	int x;
1145 	int count = pcieb->pcieb_intr_count;
1146 	int flags = pcieb->pcieb_init_flags;
1147 
1148 	if ((flags & PCIEB_INIT_ENABLE) &&
1149 	    (flags & PCIEB_INIT_BLOCK)) {
1150 		(void) ddi_intr_block_disable(pcieb->pcieb_htable, count);
1151 		flags &= ~(PCIEB_INIT_ENABLE |
1152 		    PCIEB_INIT_BLOCK);
1153 	}
1154 
1155 	if (flags & PCIEB_INIT_MUTEX)
1156 		mutex_destroy(&pcieb->pcieb_intr_mutex);
1157 
1158 	for (x = 0; x < count; x++) {
1159 		if (flags & PCIEB_INIT_ENABLE)
1160 			(void) ddi_intr_disable(pcieb->pcieb_htable[x]);
1161 
1162 		if (flags & PCIEB_INIT_HANDLER)
1163 			(void) ddi_intr_remove_handler(pcieb->pcieb_htable[x]);
1164 
1165 		if (flags & PCIEB_INIT_ALLOC)
1166 			(void) ddi_intr_free(pcieb->pcieb_htable[x]);
1167 	}
1168 
1169 	flags &= ~(PCIEB_INIT_ENABLE | PCIEB_INIT_HANDLER | PCIEB_INIT_ALLOC |
1170 	    PCIEB_INIT_MUTEX);
1171 
1172 	if (flags & PCIEB_INIT_HTABLE)
1173 		kmem_free(pcieb->pcieb_htable, pcieb->pcieb_htable_size);
1174 
1175 	flags &= ~PCIEB_INIT_HTABLE;
1176 
1177 	pcieb->pcieb_init_flags &= flags;
1178 }
1179 
1180 /*
1181  * Checks if this device needs MSIs enabled or not.
1182  */
1183 /*ARGSUSED*/
1184 static int
1185 pcieb_msi_supported(dev_info_t *dip)
1186 {
1187 	return ((pcieb_enable_msi && pcieb_plat_msi_supported(dip)) ?
1188 	    DDI_SUCCESS: DDI_FAILURE);
1189 }
1190 
1191 /*ARGSUSED*/
1192 static int
1193 pcieb_fm_init_child(dev_info_t *dip, dev_info_t *tdip, int cap,
1194     ddi_iblock_cookie_t *ibc)
1195 {
1196 	pcieb_devstate_t  *pcieb = ddi_get_soft_state(pcieb_state,
1197 	    ddi_get_instance(dip));
1198 
1199 	ASSERT(ibc != NULL);
1200 	*ibc = pcieb->pcieb_fm_ibc;
1201 
1202 	return (DEVI(dip)->devi_fmhdl->fh_cap | DDI_FM_ACCCHK_CAPABLE |
1203 	    DDI_FM_DMACHK_CAPABLE);
1204 }
1205 
1206 static int
1207 pcieb_fm_init(pcieb_devstate_t *pcieb_p)
1208 {
1209 	dev_info_t	*dip = pcieb_p->pcieb_dip;
1210 	int		fm_cap = DDI_FM_EREPORT_CAPABLE;
1211 
1212 	/*
1213 	 * Request our capability level and get our parents capability
1214 	 * and ibc.
1215 	 */
1216 	ddi_fm_init(dip, &fm_cap, &pcieb_p->pcieb_fm_ibc);
1217 
1218 	return (DDI_SUCCESS);
1219 }
1220 
1221 /*
1222  * Breakdown our FMA resources
1223  */
1224 static void
1225 pcieb_fm_fini(pcieb_devstate_t *pcieb_p)
1226 {
1227 	/*
1228 	 * Clean up allocated fm structures
1229 	 */
1230 	ddi_fm_fini(pcieb_p->pcieb_dip);
1231 }
1232 
1233 static int
1234 pcieb_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1235 {
1236 	int		inst = PCI_MINOR_NUM_TO_INSTANCE(getminor(*devp));
1237 	pcieb_devstate_t	*pcieb = ddi_get_soft_state(pcieb_state, inst);
1238 	int	rv;
1239 
1240 	if (pcieb == NULL)
1241 		return (ENXIO);
1242 
1243 	mutex_enter(&pcieb->pcieb_mutex);
1244 	rv = pcie_open(pcieb->pcieb_dip, devp, flags, otyp, credp);
1245 	mutex_exit(&pcieb->pcieb_mutex);
1246 
1247 	return (rv);
1248 }
1249 
1250 static int
1251 pcieb_close(dev_t dev, int flags, int otyp, cred_t *credp)
1252 {
1253 	int		inst = PCI_MINOR_NUM_TO_INSTANCE(getminor(dev));
1254 	pcieb_devstate_t	*pcieb = ddi_get_soft_state(pcieb_state, inst);
1255 	int	rv;
1256 
1257 	if (pcieb == NULL)
1258 		return (ENXIO);
1259 
1260 	mutex_enter(&pcieb->pcieb_mutex);
1261 	rv = pcie_close(pcieb->pcieb_dip, dev, flags, otyp, credp);
1262 	mutex_exit(&pcieb->pcieb_mutex);
1263 
1264 	return (rv);
1265 }
1266 
1267 static int
1268 pcieb_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1269 	int *rvalp)
1270 {
1271 	int		inst = PCI_MINOR_NUM_TO_INSTANCE(getminor(dev));
1272 	pcieb_devstate_t	*pcieb = ddi_get_soft_state(pcieb_state, inst);
1273 	int		rv;
1274 
1275 	if (pcieb == NULL)
1276 		return (ENXIO);
1277 
1278 	/* To handle devctl and hotplug related ioctls */
1279 	rv = pcie_ioctl(pcieb->pcieb_dip, dev, cmd, arg, mode, credp, rvalp);
1280 
1281 	return (rv);
1282 }
1283 
1284 /*
1285  * Common interrupt handler for hotplug, PME and errors.
1286  */
1287 static uint_t
1288 pcieb_intr_handler(caddr_t arg1, caddr_t arg2)
1289 {
1290 	pcieb_devstate_t *pcieb_p = (pcieb_devstate_t *)arg1;
1291 	dev_info_t	*dip = pcieb_p->pcieb_dip;
1292 	ddi_fm_error_t	derr;
1293 	int		sts = 0;
1294 	int		ret = DDI_INTR_UNCLAIMED;
1295 	int		isrc;
1296 
1297 	if (!(pcieb_p->pcieb_init_flags & PCIEB_INIT_ENABLE))
1298 		goto FAIL;
1299 
1300 	mutex_enter(&pcieb_p->pcieb_intr_mutex);
1301 	isrc = pcieb_p->pcieb_isr_tab[(int)(uintptr_t)arg2];
1302 	mutex_exit(&pcieb_p->pcieb_intr_mutex);
1303 
1304 	PCIEB_DEBUG(DBG_INTR, dip, "Received intr number %d\n",
1305 	    (int)(uintptr_t)arg2);
1306 
1307 	if (isrc == PCIEB_INTR_SRC_UNKNOWN)
1308 		goto FAIL;
1309 
1310 	if (isrc & PCIEB_INTR_SRC_HP)
1311 		ret = pcie_intr(dip);
1312 
1313 	if (isrc & PCIEB_INTR_SRC_PME)
1314 		ret = DDI_INTR_CLAIMED;
1315 
1316 	/* AER Error */
1317 	if (isrc & PCIEB_INTR_SRC_AER) {
1318 		/*
1319 		 *  If MSI is shared with PME/hotplug then check Root Error
1320 		 *  Status Reg before claiming it. For now it's ok since
1321 		 *  we know we get 2 MSIs.
1322 		 */
1323 		ret = DDI_INTR_CLAIMED;
1324 		bzero(&derr, sizeof (ddi_fm_error_t));
1325 		derr.fme_version = DDI_FME_VERSION;
1326 		mutex_enter(&pcieb_p->pcieb_peek_poke_mutex);
1327 		mutex_enter(&pcieb_p->pcieb_err_mutex);
1328 
1329 		pf_eh_enter(PCIE_DIP2BUS(dip));
1330 		PCIE_ROOT_EH_SRC(PCIE_DIP2PFD(dip))->intr_type =
1331 		    PF_INTR_TYPE_AER;
1332 
1333 		if ((DEVI(dip)->devi_fmhdl->fh_cap) & DDI_FM_EREPORT_CAPABLE)
1334 			sts = pf_scan_fabric(dip, &derr, NULL);
1335 		pf_eh_exit(PCIE_DIP2BUS(dip));
1336 
1337 		mutex_exit(&pcieb_p->pcieb_err_mutex);
1338 		mutex_exit(&pcieb_p->pcieb_peek_poke_mutex);
1339 		if (pcieb_die & sts)
1340 			fm_panic("%s-%d: PCI(-X) Express Fatal Error. (0x%x)",
1341 			    ddi_driver_name(dip), ddi_get_instance(dip), sts);
1342 	}
1343 FAIL:
1344 	return (ret);
1345 }
1346 
1347 /*
1348  * Some PCI-X to PCI-E bridges do not support full 64-bit addressing on the
1349  * PCI-X side of the bridge.  We build a special version of this driver for
1350  * those bridges, which uses PCIEB_ADDR_LIMIT_LO and/or PCIEB_ADDR_LIMIT_HI
1351  * to define the range of values which the chip can handle.  The code below
1352  * then clamps the DMA address range supplied by the driver, preventing the
1353  * PCI-E nexus driver from allocating any memory the bridge can't deal
1354  * with.
1355  */
1356 static int
1357 pcieb_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
1358 	ddi_dma_attr_t *attr_p, int (*waitfp)(caddr_t), caddr_t arg,
1359 	ddi_dma_handle_t *handlep)
1360 {
1361 	int		ret;
1362 #ifdef	PCIEB_BCM
1363 	uint64_t	lim;
1364 
1365 	/*
1366 	 * If the leaf device's limits are outside than what the Broadcom
1367 	 * bridge can handle, we need to clip the values passed up the chain.
1368 	 */
1369 	lim = attr_p->dma_attr_addr_lo;
1370 	attr_p->dma_attr_addr_lo = MAX(lim, PCIEB_ADDR_LIMIT_LO);
1371 
1372 	lim = attr_p->dma_attr_addr_hi;
1373 	attr_p->dma_attr_addr_hi = MIN(lim, PCIEB_ADDR_LIMIT_HI);
1374 
1375 #endif	/* PCIEB_BCM */
1376 
1377 	/*
1378 	 * This is a software workaround to fix the Broadcom 5714/5715 PCIe-PCI
1379 	 * bridge prefetch bug. Intercept the DMA alloc handle request and set
1380 	 * PX_DMAI_FLAGS_MAP_BUFZONE flag in the handle. If this flag is set,
1381 	 * the px nexus driver will allocate an extra page & make it valid one,
1382 	 * for any DVMA request that comes from any of the Broadcom bridge child
1383 	 * devices.
1384 	 */
1385 	if ((ret = ddi_dma_allochdl(dip, rdip, attr_p, waitfp, arg,
1386 	    handlep)) == DDI_SUCCESS) {
1387 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)*handlep;
1388 #ifdef	PCIEB_BCM
1389 		mp->dmai_inuse |= PX_DMAI_FLAGS_MAP_BUFZONE;
1390 #endif	/* PCIEB_BCM */
1391 		/*
1392 		 * For a given rdip, update mp->dmai_bdf with the bdf value
1393 		 * of pcieb's immediate child or secondary bus-id of the
1394 		 * PCIe2PCI bridge.
1395 		 */
1396 		mp->dmai_minxfer = pcie_get_bdf_for_dma_xfer(dip, rdip);
1397 	}
1398 
1399 	return (ret);
1400 }
1401 
1402 /*
1403  * FDVMA feature is not supported for any child device of Broadcom 5714/5715
1404  * PCIe-PCI bridge due to prefetch bug. Return failure immediately, so that
1405  * these drivers will switch to regular DVMA path.
1406  */
1407 /*ARGSUSED*/
1408 static int
1409 pcieb_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1410 	enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp,
1411 	uint_t cache_flags)
1412 {
1413 	int	ret;
1414 
1415 #ifdef	PCIEB_BCM
1416 	if (cmd == DDI_DMA_RESERVE)
1417 		return (DDI_FAILURE);
1418 #endif	/* PCIEB_BCM */
1419 
1420 	if (((ret = ddi_dma_mctl(dip, rdip, handle, cmd, offp, lenp, objp,
1421 	    cache_flags)) == DDI_SUCCESS) && (cmd == DDI_DMA_RESERVE)) {
1422 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)*objp;
1423 
1424 		/*
1425 		 * For a given rdip, update mp->dmai_bdf with the bdf value
1426 		 * of pcieb's immediate child or secondary bus-id of the
1427 		 * PCIe2PCI bridge.
1428 		 */
1429 		mp->dmai_minxfer = pcie_get_bdf_for_dma_xfer(dip, rdip);
1430 	}
1431 
1432 	return (ret);
1433 }
1434 
1435 static int
1436 pcieb_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1437     ddi_intr_handle_impl_t *hdlp, void *result)
1438 {
1439 	return (pcieb_plat_intr_ops(dip, rdip, intr_op, hdlp, result));
1440 
1441 }
1442 
1443 /*
1444  * Power management related initialization specific to pcieb.
1445  * Called by pcieb_attach()
1446  */
1447 static int
1448 pcieb_pwr_setup(dev_info_t *dip)
1449 {
1450 	char *comp_array[5];
1451 	int i;
1452 	ddi_acc_handle_t conf_hdl;
1453 	uint16_t pmcap, cap_ptr;
1454 	pcie_pwr_t *pwr_p;
1455 
1456 	/* Some platforms/devices may choose to disable PM */
1457 	if (pcieb_plat_pwr_disable(dip)) {
1458 		(void) pcieb_pwr_disable(dip);
1459 		return (DDI_SUCCESS);
1460 	}
1461 
1462 	ASSERT(PCIE_PMINFO(dip));
1463 	pwr_p = PCIE_NEXUS_PMINFO(dip);
1464 	ASSERT(pwr_p);
1465 
1466 	/* Code taken from pci_pci driver */
1467 	if (pci_config_setup(dip, &pwr_p->pwr_conf_hdl) != DDI_SUCCESS) {
1468 		PCIEB_DEBUG(DBG_PWR, dip, "pcieb_pwr_setup: pci_config_setup "
1469 		    "failed\n");
1470 		return (DDI_FAILURE);
1471 	}
1472 	conf_hdl = pwr_p->pwr_conf_hdl;
1473 
1474 	/*
1475 	 * Walk the capabilities searching for a PM entry.
1476 	 */
1477 	if ((PCI_CAP_LOCATE(conf_hdl, PCI_CAP_ID_PM, &cap_ptr)) ==
1478 	    DDI_FAILURE) {
1479 		PCIEB_DEBUG(DBG_PWR, dip, "switch/bridge does not support PM. "
1480 		    " PCI PM data structure not found in config header\n");
1481 		pci_config_teardown(&conf_hdl);
1482 		return (DDI_SUCCESS);
1483 	}
1484 	/*
1485 	 * Save offset to pmcsr for future references.
1486 	 */
1487 	pwr_p->pwr_pmcsr_offset = cap_ptr + PCI_PMCSR;
1488 	pmcap = PCI_CAP_GET16(conf_hdl, NULL, cap_ptr, PCI_PMCAP);
1489 	if (pmcap & PCI_PMCAP_D1) {
1490 		PCIEB_DEBUG(DBG_PWR, dip, "D1 state supported\n");
1491 		pwr_p->pwr_pmcaps |= PCIE_SUPPORTS_D1;
1492 	}
1493 	if (pmcap & PCI_PMCAP_D2) {
1494 		PCIEB_DEBUG(DBG_PWR, dip, "D2 state supported\n");
1495 		pwr_p->pwr_pmcaps |= PCIE_SUPPORTS_D2;
1496 	}
1497 
1498 	i = 0;
1499 	comp_array[i++] = "NAME=PCIe switch/bridge PM";
1500 	comp_array[i++] = "0=Power Off (D3)";
1501 	if (pwr_p->pwr_pmcaps & PCIE_SUPPORTS_D2)
1502 		comp_array[i++] = "1=D2";
1503 	if (pwr_p->pwr_pmcaps & PCIE_SUPPORTS_D1)
1504 		comp_array[i++] = "2=D1";
1505 	comp_array[i++] = "3=Full Power D0";
1506 
1507 	/*
1508 	 * Create pm-components property, if it does not exist already.
1509 	 */
1510 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1511 	    "pm-components", comp_array, i) != DDI_PROP_SUCCESS) {
1512 		PCIEB_DEBUG(DBG_PWR, dip, "could not create pm-components "
1513 		    " prop\n");
1514 		pci_config_teardown(&conf_hdl);
1515 		return (DDI_FAILURE);
1516 	}
1517 	return (pcieb_pwr_init_and_raise(dip, pwr_p));
1518 }
1519 
1520 /*
1521  * undo whatever is done in pcieb_pwr_setup. called by pcieb_detach()
1522  */
1523 static void
1524 pcieb_pwr_teardown(dev_info_t *dip)
1525 {
1526 	pcie_pwr_t	*pwr_p;
1527 
1528 	if (!PCIE_PMINFO(dip) || !(pwr_p = PCIE_NEXUS_PMINFO(dip)))
1529 		return;
1530 
1531 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "pm-components");
1532 	if (pwr_p->pwr_conf_hdl)
1533 		pci_config_teardown(&pwr_p->pwr_conf_hdl);
1534 }
1535 
1536 /*
1537  * Initializes the power level and raise the power to D0, if it is
1538  * not at D0.
1539  */
1540 static int
1541 pcieb_pwr_init_and_raise(dev_info_t *dip, pcie_pwr_t *pwr_p)
1542 {
1543 	uint16_t pmcsr;
1544 	int ret = DDI_SUCCESS;
1545 
1546 	/*
1547 	 * Intialize our power level from PMCSR. The common code initializes
1548 	 * this to UNKNOWN. There is no guarantee that we will be at full
1549 	 * power at attach. If we are not at D0, raise the power.
1550 	 */
1551 	pmcsr = pci_config_get16(pwr_p->pwr_conf_hdl, pwr_p->pwr_pmcsr_offset);
1552 	pmcsr &= PCI_PMCSR_STATE_MASK;
1553 	switch (pmcsr) {
1554 	case PCI_PMCSR_D0:
1555 		pwr_p->pwr_func_lvl = PM_LEVEL_D0;
1556 		break;
1557 
1558 	case PCI_PMCSR_D1:
1559 		pwr_p->pwr_func_lvl = PM_LEVEL_D1;
1560 		break;
1561 
1562 	case PCI_PMCSR_D2:
1563 		pwr_p->pwr_func_lvl = PM_LEVEL_D2;
1564 		break;
1565 
1566 	case PCI_PMCSR_D3HOT:
1567 		pwr_p->pwr_func_lvl = PM_LEVEL_D3;
1568 		break;
1569 
1570 	default:
1571 		break;
1572 	}
1573 
1574 	/* Raise the power to D0. */
1575 	if (pwr_p->pwr_func_lvl != PM_LEVEL_D0 &&
1576 	    ((ret = pm_raise_power(dip, 0, PM_LEVEL_D0)) != DDI_SUCCESS)) {
1577 		/*
1578 		 * Read PMCSR again. If it is at D0, ignore the return
1579 		 * value from pm_raise_power.
1580 		 */
1581 		pmcsr = pci_config_get16(pwr_p->pwr_conf_hdl,
1582 		    pwr_p->pwr_pmcsr_offset);
1583 		if ((pmcsr & PCI_PMCSR_STATE_MASK) == PCI_PMCSR_D0)
1584 			ret = DDI_SUCCESS;
1585 		else {
1586 			PCIEB_DEBUG(DBG_PWR, dip, "pcieb_pwr_setup: could not "
1587 			    "raise power to D0 \n");
1588 		}
1589 	}
1590 	if (ret == DDI_SUCCESS)
1591 		pwr_p->pwr_func_lvl = PM_LEVEL_D0;
1592 	return (ret);
1593 }
1594 
1595 /*
1596  * Disable PM for x86 and PLX 8532 switch.
1597  * For PLX Transitioning one port on this switch to low power causes links
1598  * on other ports on the same station to die. Due to PLX erratum #34, we
1599  * can't allow the downstream device go to non-D0 state.
1600  */
1601 static int
1602 pcieb_pwr_disable(dev_info_t *dip)
1603 {
1604 	pcie_pwr_t *pwr_p;
1605 
1606 	ASSERT(PCIE_PMINFO(dip));
1607 	pwr_p = PCIE_NEXUS_PMINFO(dip);
1608 	ASSERT(pwr_p);
1609 	PCIEB_DEBUG(DBG_PWR, dip, "pcieb_pwr_disable: disabling PM\n");
1610 	pwr_p->pwr_func_lvl = PM_LEVEL_D0;
1611 	pwr_p->pwr_flags = PCIE_NO_CHILD_PM;
1612 	return (DDI_SUCCESS);
1613 }
1614 
1615 #ifdef DEBUG
1616 int pcieb_dbg_intr_print = 0;
1617 void
1618 pcieb_dbg(uint_t bit, dev_info_t *dip, char *fmt, ...)
1619 {
1620 	va_list ap;
1621 
1622 	if (!pcieb_dbg_print)
1623 		return;
1624 
1625 	if (dip)
1626 		prom_printf("%s(%d): %s", ddi_driver_name(dip),
1627 		    ddi_get_instance(dip), pcieb_debug_sym[bit]);
1628 
1629 	va_start(ap, fmt);
1630 	if (servicing_interrupt()) {
1631 		if (pcieb_dbg_intr_print)
1632 			prom_vprintf(fmt, ap);
1633 	} else {
1634 		prom_vprintf(fmt, ap);
1635 	}
1636 
1637 	va_end(ap);
1638 }
1639 #endif
1640 
1641 static void
1642 pcieb_id_props(pcieb_devstate_t *pcieb)
1643 {
1644 	uint64_t serialid = 0;	/* 40b field of EUI-64 serial no. register */
1645 	uint16_t cap_ptr;
1646 	uint8_t fic = 0;	/* 1 = first in chassis device */
1647 	pcie_bus_t *bus_p = PCIE_DIP2BUS(pcieb->pcieb_dip);
1648 	ddi_acc_handle_t config_handle = bus_p->bus_cfg_hdl;
1649 
1650 	/*
1651 	 * Identify first in chassis.  In the special case of a Sun branded
1652 	 * PLX device, it obviously is first in chassis.  Otherwise, in the
1653 	 * general case, look for an Expansion Slot Register and check its
1654 	 * first-in-chassis bit.
1655 	 */
1656 #ifdef	PX_PLX
1657 	uint16_t vendor_id = bus_p->bus_dev_ven_id & 0xFFFF;
1658 	uint16_t device_id = bus_p->bus_dev_ven_id >> 16;
1659 	if ((vendor_id == PXB_VENDOR_SUN) &&
1660 	    ((device_id == PXB_DEVICE_PLX_PCIX) ||
1661 	    (device_id == PXB_DEVICE_PLX_PCIE))) {
1662 		fic = 1;
1663 	}
1664 #endif	/* PX_PLX */
1665 	if ((fic == 0) && ((PCI_CAP_LOCATE(config_handle,
1666 	    PCI_CAP_ID_SLOT_ID, &cap_ptr)) != DDI_FAILURE)) {
1667 		uint8_t esr = PCI_CAP_GET8(config_handle, NULL,
1668 		    cap_ptr, PCI_CAP_ID_REGS_OFF);
1669 		if (PCI_CAPSLOT_FIC(esr))
1670 			fic = 1;
1671 	}
1672 
1673 	if ((PCI_CAP_LOCATE(config_handle,
1674 	    PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap_ptr)) != DDI_FAILURE) {
1675 		/* Serialid can be 0 thru a full 40b number */
1676 		serialid = PCI_XCAP_GET32(config_handle, NULL,
1677 		    cap_ptr, PCIE_SER_SID_UPPER_DW);
1678 		serialid <<= 32;
1679 		serialid |= PCI_XCAP_GET32(config_handle, NULL,
1680 		    cap_ptr, PCIE_SER_SID_LOWER_DW);
1681 	}
1682 
1683 	if (fic)
1684 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, pcieb->pcieb_dip,
1685 		    "first-in-chassis");
1686 	if (serialid)
1687 		(void) ddi_prop_update_int64(DDI_DEV_T_NONE, pcieb->pcieb_dip,
1688 		    "serialid#", serialid);
1689 }
1690 
1691 static void
1692 pcieb_create_ranges_prop(dev_info_t *dip,
1693 	ddi_acc_handle_t config_handle)
1694 {
1695 	uint32_t base, limit;
1696 	ppb_ranges_t	ranges[PCIEB_RANGE_LEN];
1697 	uint8_t io_base_lo, io_limit_lo;
1698 	uint16_t io_base_hi, io_limit_hi, mem_base, mem_limit;
1699 	int i = 0, rangelen = sizeof (ppb_ranges_t)/sizeof (int);
1700 
1701 	io_base_lo = pci_config_get8(config_handle, PCI_BCNF_IO_BASE_LOW);
1702 	io_limit_lo = pci_config_get8(config_handle, PCI_BCNF_IO_LIMIT_LOW);
1703 	io_base_hi = pci_config_get16(config_handle, PCI_BCNF_IO_BASE_HI);
1704 	io_limit_hi = pci_config_get16(config_handle, PCI_BCNF_IO_LIMIT_HI);
1705 	mem_base = pci_config_get16(config_handle, PCI_BCNF_MEM_BASE);
1706 	mem_limit = pci_config_get16(config_handle, PCI_BCNF_MEM_LIMIT);
1707 
1708 	/*
1709 	 * Create ranges for IO space
1710 	 */
1711 	ranges[i].size_low = ranges[i].size_high = 0;
1712 	ranges[i].parent_mid = ranges[i].child_mid = ranges[i].parent_high = 0;
1713 	ranges[i].child_high = ranges[i].parent_high |=
1714 	    (PCI_REG_REL_M | PCI_ADDR_IO);
1715 	base = PCIEB_16bit_IOADDR(io_base_lo);
1716 	limit = PCIEB_16bit_IOADDR(io_limit_lo);
1717 
1718 	if ((io_base_lo & 0xf) == PCIEB_32BIT_IO) {
1719 		base = PCIEB_LADDR(base, io_base_hi);
1720 	}
1721 	if ((io_limit_lo & 0xf) == PCIEB_32BIT_IO) {
1722 		limit = PCIEB_LADDR(limit, io_limit_hi);
1723 	}
1724 
1725 	if ((io_base_lo & PCIEB_32BIT_IO) && (io_limit_hi > 0)) {
1726 		base = PCIEB_LADDR(base, io_base_hi);
1727 		limit = PCIEB_LADDR(limit, io_limit_hi);
1728 	}
1729 
1730 	/*
1731 	 * Create ranges for 32bit memory space
1732 	 */
1733 	base = PCIEB_32bit_MEMADDR(mem_base);
1734 	limit = PCIEB_32bit_MEMADDR(mem_limit);
1735 	ranges[i].size_low = ranges[i].size_high = 0;
1736 	ranges[i].parent_mid = ranges[i].child_mid = ranges[i].parent_high = 0;
1737 	ranges[i].child_high = ranges[i].parent_high |=
1738 	    (PCI_REG_REL_M | PCI_ADDR_MEM32);
1739 	ranges[i].child_low = ranges[i].parent_low = base;
1740 	if (limit >= base) {
1741 		ranges[i].size_low = limit - base + PCIEB_MEMGRAIN;
1742 		i++;
1743 	}
1744 
1745 	if (i) {
1746 		(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "ranges",
1747 		    (int *)ranges, i * rangelen);
1748 	}
1749 }
1750 
1751 /*
1752  * For PCI and PCI-X devices including PCIe2PCI bridge, initialize
1753  * cache-line-size and latency timer configuration registers.
1754  */
1755 void
1756 pcieb_set_pci_perf_parameters(dev_info_t *dip, ddi_acc_handle_t cfg_hdl)
1757 {
1758 	uint_t	n;
1759 
1760 	/* Initialize cache-line-size configuration register if needed */
1761 	if (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1762 	    "cache-line-size", 0) == 0) {
1763 		pci_config_put8(cfg_hdl, PCI_CONF_CACHE_LINESZ,
1764 		    PCIEB_CACHE_LINE_SIZE);
1765 		n = pci_config_get8(cfg_hdl, PCI_CONF_CACHE_LINESZ);
1766 		if (n != 0) {
1767 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1768 			    "cache-line-size", n);
1769 		}
1770 	}
1771 
1772 	/* Initialize latency timer configuration registers if needed */
1773 	if (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1774 	    "latency-timer", 0) == 0) {
1775 		uchar_t	min_gnt, latency_timer;
1776 		uchar_t header_type;
1777 
1778 		/* Determine the configuration header type */
1779 		header_type = pci_config_get8(cfg_hdl, PCI_CONF_HEADER);
1780 
1781 		if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
1782 			latency_timer = PCIEB_LATENCY_TIMER;
1783 			pci_config_put8(cfg_hdl, PCI_BCNF_LATENCY_TIMER,
1784 			    latency_timer);
1785 		} else {
1786 			min_gnt = pci_config_get8(cfg_hdl, PCI_CONF_MIN_G);
1787 			latency_timer = min_gnt * 8;
1788 		}
1789 
1790 		pci_config_put8(cfg_hdl, PCI_CONF_LATENCY_TIMER,
1791 		    latency_timer);
1792 		n = pci_config_get8(cfg_hdl, PCI_CONF_LATENCY_TIMER);
1793 		if (n != 0) {
1794 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1795 			    "latency-timer", n);
1796 		}
1797 	}
1798 }
1799