xref: /titanic_52/usr/src/uts/common/io/pciex/pcieb.c (revision db9ce1c953f094d292df951aada6b8a85e1ff103)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Common x86 and SPARC PCI-E to PCI bus bridge nexus driver
28  */
29 
30 #include <sys/sysmacros.h>
31 #include <sys/conf.h>
32 #include <sys/kmem.h>
33 #include <sys/debug.h>
34 #include <sys/modctl.h>
35 #include <sys/autoconf.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/pci.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/sunndi.h>
41 #include <sys/fm/util.h>
42 #include <sys/pci_cap.h>
43 #include <sys/pci_impl.h>
44 #include <sys/pcie_impl.h>
45 #include <sys/open.h>
46 #include <sys/stat.h>
47 #include <sys/file.h>
48 #include <sys/promif.h>		/* prom_printf */
49 #include <sys/disp.h>
50 #include <sys/pcie_pwr.h>
51 #include <sys/hotplug/pci/pcie_hp.h>
52 #include "pcieb.h"
53 #ifdef PX_PLX
54 #include <io/pciex/pcieb_plx.h>
55 #endif /* PX_PLX */
56 
57 /*LINTLIBRARY*/
58 
59 /* panic flag */
60 int pcieb_die = PF_ERR_FATAL_FLAGS;
61 
62 /* flag to turn on MSI support */
63 int pcieb_enable_msi = 1;
64 
65 #if defined(DEBUG)
66 uint_t pcieb_dbg_print = 0;
67 
68 static char *pcieb_debug_sym [] = {	/* same sequence as pcieb_debug_bit */
69 	/*  0 */ "attach",
70 	/*  1 */ "pwr",
71 	/*  2 */ "intr"
72 };
73 #endif /* DEBUG */
74 
75 static int pcieb_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *, off_t,
76 	off_t, caddr_t *);
77 static int pcieb_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *,
78 	void *);
79 static int pcieb_fm_init(pcieb_devstate_t *pcieb_p);
80 static void pcieb_fm_fini(pcieb_devstate_t *pcieb_p);
81 static int pcieb_fm_init_child(dev_info_t *dip, dev_info_t *cdip, int cap,
82     ddi_iblock_cookie_t *ibc_p);
83 static int pcieb_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
84 	ddi_dma_attr_t *attr_p, int (*waitfp)(caddr_t), caddr_t arg,
85 	ddi_dma_handle_t *handlep);
86 static int pcieb_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
87 	ddi_dma_handle_t handle, enum ddi_dma_ctlops cmd, off_t *offp,
88 	size_t *lenp, caddr_t *objp, uint_t cache_flags);
89 static int pcieb_intr_ops(dev_info_t *dip, dev_info_t *rdip,
90 	ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result);
91 
92 static struct bus_ops pcieb_bus_ops = {
93 	BUSO_REV,
94 	pcieb_bus_map,
95 	0,
96 	0,
97 	0,
98 	i_ddi_map_fault,
99 	ddi_dma_map,
100 	pcieb_dma_allochdl,
101 	ddi_dma_freehdl,
102 	ddi_dma_bindhdl,
103 	ddi_dma_unbindhdl,
104 	ddi_dma_flush,
105 	ddi_dma_win,
106 	pcieb_dma_mctl,
107 	pcieb_ctlops,
108 	ddi_bus_prop_op,
109 	ndi_busop_get_eventcookie,	/* (*bus_get_eventcookie)();	*/
110 	ndi_busop_add_eventcall,	/* (*bus_add_eventcall)();	*/
111 	ndi_busop_remove_eventcall,	/* (*bus_remove_eventcall)();	*/
112 	ndi_post_event,			/* (*bus_post_event)();		*/
113 	NULL,				/* (*bus_intr_ctl)();		*/
114 	NULL,				/* (*bus_config)(); 		*/
115 	NULL,				/* (*bus_unconfig)(); 		*/
116 	pcieb_fm_init_child,		/* (*bus_fm_init)(); 		*/
117 	NULL,				/* (*bus_fm_fini)(); 		*/
118 	i_ndi_busop_access_enter,	/* (*bus_fm_access_enter)(); 	*/
119 	i_ndi_busop_access_exit,	/* (*bus_fm_access_exit)(); 	*/
120 	pcie_bus_power,			/* (*bus_power)(); 	*/
121 	pcieb_intr_ops,			/* (*bus_intr_op)(); 		*/
122 	pcie_hp_common_ops		/* (*bus_hp_op)(); 		*/
123 };
124 
125 static int	pcieb_open(dev_t *, int, int, cred_t *);
126 static int	pcieb_close(dev_t, int, int, cred_t *);
127 static int	pcieb_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
128 static int	pcieb_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
129 static uint_t 	pcieb_intr_handler(caddr_t arg1, caddr_t arg2);
130 
131 /* PM related functions */
132 static int	pcieb_pwr_setup(dev_info_t *dip);
133 static int	pcieb_pwr_init_and_raise(dev_info_t *dip, pcie_pwr_t *pwr_p);
134 static void	pcieb_pwr_teardown(dev_info_t *dip);
135 static int	pcieb_pwr_disable(dev_info_t *dip);
136 
137 /* Hotplug related functions */
138 static void pcieb_id_props(pcieb_devstate_t *pcieb);
139 
140 /*
141  * soft state pointer
142  */
143 void *pcieb_state;
144 
145 static struct cb_ops pcieb_cb_ops = {
146 	pcieb_open,			/* open */
147 	pcieb_close,			/* close */
148 	nodev,				/* strategy */
149 	nodev,				/* print */
150 	nodev,				/* dump */
151 	nodev,				/* read */
152 	nodev,				/* write */
153 	pcieb_ioctl,			/* ioctl */
154 	nodev,				/* devmap */
155 	nodev,				/* mmap */
156 	nodev,				/* segmap */
157 	nochpoll,			/* poll */
158 	pcie_prop_op,			/* cb_prop_op */
159 	NULL,				/* streamtab */
160 	D_NEW | D_MP | D_HOTPLUG,	/* Driver compatibility flag */
161 	CB_REV,				/* rev */
162 	nodev,				/* int (*cb_aread)() */
163 	nodev				/* int (*cb_awrite)() */
164 };
165 
166 static int	pcieb_probe(dev_info_t *);
167 static int	pcieb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
168 static int	pcieb_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
169 
170 static struct dev_ops pcieb_ops = {
171 	DEVO_REV,		/* devo_rev */
172 	0,			/* refcnt  */
173 	pcieb_info,		/* info */
174 	nulldev,		/* identify */
175 	pcieb_probe,		/* probe */
176 	pcieb_attach,		/* attach */
177 	pcieb_detach,		/* detach */
178 	nulldev,		/* reset */
179 	&pcieb_cb_ops,		/* driver operations */
180 	&pcieb_bus_ops,		/* bus operations */
181 	pcie_power,		/* power */
182 	ddi_quiesce_not_needed,		/* quiesce */
183 };
184 
185 /*
186  * Module linkage information for the kernel.
187  */
188 
189 static struct modldrv modldrv = {
190 	&mod_driverops, /* Type of module */
191 	"PCIe bridge/switch driver",
192 	&pcieb_ops,	/* driver ops */
193 };
194 
195 static struct modlinkage modlinkage = {
196 	MODREV_1,
197 	(void *)&modldrv,
198 	NULL
199 };
200 
201 /*
202  * forward function declarations:
203  */
204 static void	pcieb_uninitchild(dev_info_t *);
205 static int 	pcieb_initchild(dev_info_t *child);
206 static void	pcieb_create_ranges_prop(dev_info_t *, ddi_acc_handle_t);
207 static boolean_t pcieb_is_pcie_device_type(dev_info_t *dip);
208 
209 /* interrupt related declarations */
210 static int	pcieb_msi_supported(dev_info_t *);
211 static int	pcieb_intr_attach(pcieb_devstate_t *pcieb);
212 static int	pcieb_intr_init(pcieb_devstate_t *pcieb_p, int intr_type);
213 static void	pcieb_intr_fini(pcieb_devstate_t *pcieb_p);
214 
215 int
216 _init(void)
217 {
218 	int e;
219 
220 	if ((e = ddi_soft_state_init(&pcieb_state, sizeof (pcieb_devstate_t),
221 	    1)) == 0 && (e = mod_install(&modlinkage)) != 0)
222 		ddi_soft_state_fini(&pcieb_state);
223 	return (e);
224 }
225 
226 int
227 _fini(void)
228 {
229 	int e;
230 
231 	if ((e = mod_remove(&modlinkage)) == 0) {
232 		ddi_soft_state_fini(&pcieb_state);
233 	}
234 	return (e);
235 }
236 
237 int
238 _info(struct modinfo *modinfop)
239 {
240 	return (mod_info(&modlinkage, modinfop));
241 }
242 
243 /* ARGSUSED */
244 static int
245 pcieb_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
246 {
247 	minor_t		minor = getminor((dev_t)arg);
248 	int		instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
249 	pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state, instance);
250 	int		ret = DDI_SUCCESS;
251 
252 	switch (infocmd) {
253 	case DDI_INFO_DEVT2INSTANCE:
254 		*result = (void *)(intptr_t)instance;
255 		break;
256 	case DDI_INFO_DEVT2DEVINFO:
257 		if (pcieb == NULL) {
258 			ret = DDI_FAILURE;
259 			break;
260 		}
261 
262 		*result = (void *)pcieb->pcieb_dip;
263 		break;
264 	default:
265 		ret = DDI_FAILURE;
266 		break;
267 	}
268 
269 	return (ret);
270 }
271 
272 
273 /*ARGSUSED*/
274 static int
275 pcieb_probe(dev_info_t *devi)
276 {
277 	return (DDI_PROBE_SUCCESS);
278 }
279 
280 static int
281 pcieb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
282 {
283 	int			instance;
284 	char			device_type[8];
285 	pcieb_devstate_t	*pcieb;
286 	pcie_bus_t		*bus_p = PCIE_DIP2UPBUS(devi);
287 	ddi_acc_handle_t	config_handle = bus_p->bus_cfg_hdl;
288 
289 	switch (cmd) {
290 	case DDI_RESUME:
291 		(void) pcie_pwr_resume(devi);
292 		return (DDI_SUCCESS);
293 
294 	default:
295 		return (DDI_FAILURE);
296 
297 	case DDI_ATTACH:
298 		break;
299 	}
300 
301 	if (!(PCIE_IS_BDG(bus_p))) {
302 		PCIEB_DEBUG(DBG_ATTACH, devi, "This is not a switch or"
303 		" bridge\n");
304 		return (DDI_FAILURE);
305 	}
306 
307 	/*
308 	 * If PCIE_LINKCTL_LINK_DISABLE bit in the PCIe Config
309 	 * Space (PCIe Capability Link Control Register) is set,
310 	 * then do not bind the driver.
311 	 */
312 	if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & PCIE_LINKCTL_LINK_DISABLE)
313 		return (DDI_FAILURE);
314 
315 	/*
316 	 * Allocate and get soft state structure.
317 	 */
318 	instance = ddi_get_instance(devi);
319 	if (ddi_soft_state_zalloc(pcieb_state, instance) != DDI_SUCCESS)
320 		return (DDI_FAILURE);
321 	pcieb = ddi_get_soft_state(pcieb_state, instance);
322 	pcieb->pcieb_dip = devi;
323 
324 	if ((pcieb_fm_init(pcieb)) != DDI_SUCCESS) {
325 		PCIEB_DEBUG(DBG_ATTACH, devi, "Failed in pcieb_fm_init\n");
326 		goto fail;
327 	}
328 	pcieb->pcieb_init_flags |= PCIEB_INIT_FM;
329 
330 	mutex_init(&pcieb->pcieb_mutex, NULL, MUTEX_DRIVER, NULL);
331 	mutex_init(&pcieb->pcieb_err_mutex, NULL, MUTEX_DRIVER,
332 	    (void *)pcieb->pcieb_fm_ibc);
333 	mutex_init(&pcieb->pcieb_peek_poke_mutex, NULL, MUTEX_DRIVER,
334 	    (void *)pcieb->pcieb_fm_ibc);
335 
336 	/* create special properties for device identification */
337 	pcieb_id_props(pcieb);
338 
339 	/*
340 	 * Power management setup. This also makes sure that switch/bridge
341 	 * is at D0 during attach.
342 	 */
343 	if (pwr_common_setup(devi) != DDI_SUCCESS) {
344 		PCIEB_DEBUG(DBG_PWR, devi, "pwr_common_setup failed\n");
345 		goto fail;
346 	}
347 
348 	if (pcieb_pwr_setup(devi) != DDI_SUCCESS) {
349 		PCIEB_DEBUG(DBG_PWR, devi, "pxb_pwr_setup failed \n");
350 		goto fail;
351 	}
352 
353 	/*
354 	 * Make sure the "device_type" property exists.
355 	 */
356 	if (pcieb_is_pcie_device_type(devi))
357 		(void) strcpy(device_type, "pciex");
358 	else
359 		(void) strcpy(device_type, "pci");
360 
361 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
362 	    "device_type", device_type);
363 
364 	/*
365 	 * Check whether the "ranges" property is present.
366 	 * Otherwise create the ranges property by reading
367 	 * the configuration registers
368 	 */
369 	if (ddi_prop_exists(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
370 	    "ranges") == 0) {
371 		pcieb_create_ranges_prop(devi, config_handle);
372 	}
373 
374 	if (PCIE_IS_PCI_BDG(bus_p))
375 		pcieb_set_pci_perf_parameters(devi, config_handle);
376 
377 #ifdef PX_PLX
378 	pcieb_attach_plx_workarounds(pcieb);
379 #endif /* PX_PLX */
380 
381 	if (pcie_init(devi, NULL) != DDI_SUCCESS)
382 		goto fail;
383 
384 	/*
385 	 * Initialize interrupt handlers. Ignore return value.
386 	 */
387 	(void) pcieb_intr_attach(pcieb);
388 
389 	/* Do any platform specific workarounds needed at this time */
390 	pcieb_plat_attach_workaround(devi);
391 
392 	/*
393 	 * If this is a root port, determine and set the max payload size.
394 	 * Since this will involve scanning the fabric, all error enabling
395 	 * and sw workarounds should be in place before doing this.
396 	 */
397 	if (PCIE_IS_RP(bus_p))
398 		pcie_init_root_port_mps(devi);
399 
400 	ddi_report_dev(devi);
401 	return (DDI_SUCCESS);
402 
403 fail:
404 	(void) pcieb_detach(devi, DDI_DETACH);
405 	return (DDI_FAILURE);
406 }
407 
408 static int
409 pcieb_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
410 {
411 	pcieb_devstate_t *pcieb;
412 	int error = DDI_SUCCESS;
413 
414 	switch (cmd) {
415 	case DDI_SUSPEND:
416 		error = pcie_pwr_suspend(devi);
417 		return (error);
418 
419 	case DDI_DETACH:
420 		break;
421 
422 	default:
423 		return (DDI_FAILURE);
424 	}
425 
426 	pcieb = ddi_get_soft_state(pcieb_state, ddi_get_instance(devi));
427 
428 	/* remove interrupt handlers */
429 	pcieb_intr_fini(pcieb);
430 
431 	/* uninitialize inband PCI-E HPC if present */
432 	(void) pcie_uninit(devi);
433 
434 	(void) ddi_prop_remove(DDI_DEV_T_NONE, devi, "device_type");
435 
436 	(void) ndi_prop_remove(DDI_DEV_T_NONE, pcieb->pcieb_dip,
437 	    "pcie_ce_mask");
438 
439 	if (pcieb->pcieb_init_flags & PCIEB_INIT_FM)
440 		pcieb_fm_fini(pcieb);
441 
442 	pcieb_pwr_teardown(devi);
443 	pwr_common_teardown(devi);
444 
445 	mutex_destroy(&pcieb->pcieb_peek_poke_mutex);
446 	mutex_destroy(&pcieb->pcieb_err_mutex);
447 	mutex_destroy(&pcieb->pcieb_mutex);
448 
449 	/*
450 	 * And finally free the per-pci soft state.
451 	 */
452 	ddi_soft_state_free(pcieb_state, ddi_get_instance(devi));
453 
454 	return (DDI_SUCCESS);
455 }
456 
457 static int
458 pcieb_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
459     off_t offset, off_t len, caddr_t *vaddrp)
460 {
461 	dev_info_t *pdip;
462 
463 	pdip = (dev_info_t *)DEVI(dip)->devi_parent;
464 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip, rdip, mp,
465 	    offset, len, vaddrp));
466 }
467 
468 static int
469 pcieb_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
470     void *arg, void *result)
471 {
472 	pci_regspec_t *drv_regp;
473 	int	reglen;
474 	int	rn;
475 	int	totreg;
476 	pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
477 	    ddi_get_instance(dip));
478 	struct detachspec *ds;
479 	struct attachspec *as;
480 
481 	switch (ctlop) {
482 	case DDI_CTLOPS_REPORTDEV:
483 		if (rdip == (dev_info_t *)0)
484 			return (DDI_FAILURE);
485 		cmn_err(CE_CONT, "?PCIE-device: %s@%s, %s%d\n",
486 		    ddi_node_name(rdip), ddi_get_name_addr(rdip),
487 		    ddi_driver_name(rdip),
488 		    ddi_get_instance(rdip));
489 		return (DDI_SUCCESS);
490 
491 	case DDI_CTLOPS_INITCHILD:
492 		return (pcieb_initchild((dev_info_t *)arg));
493 
494 	case DDI_CTLOPS_UNINITCHILD:
495 		pcieb_uninitchild((dev_info_t *)arg);
496 		return (DDI_SUCCESS);
497 
498 	case DDI_CTLOPS_SIDDEV:
499 		return (DDI_SUCCESS);
500 
501 	case DDI_CTLOPS_REGSIZE:
502 	case DDI_CTLOPS_NREGS:
503 		if (rdip == (dev_info_t *)0)
504 			return (DDI_FAILURE);
505 		break;
506 
507 	case DDI_CTLOPS_PEEK:
508 	case DDI_CTLOPS_POKE:
509 		return (pcieb_plat_peekpoke(dip, rdip, ctlop, arg, result));
510 	case DDI_CTLOPS_ATTACH:
511 		if (!pcie_is_child(dip, rdip))
512 			return (DDI_SUCCESS);
513 
514 		as = (struct attachspec *)arg;
515 		switch (as->when) {
516 		case DDI_PRE:
517 			if (as->cmd == DDI_RESUME) {
518 				pcie_clear_errors(rdip);
519 				if (pcieb_plat_ctlops(rdip, ctlop, arg) !=
520 				    DDI_SUCCESS)
521 					return (DDI_FAILURE);
522 			}
523 
524 			if (as->cmd == DDI_ATTACH)
525 				return (pcie_pm_hold(dip));
526 
527 			return (DDI_SUCCESS);
528 
529 		case DDI_POST:
530 			if (as->cmd == DDI_ATTACH &&
531 			    as->result != DDI_SUCCESS) {
532 				/*
533 				 * Attach failed for the child device. The child
534 				 * driver may have made PM calls before the
535 				 * attach failed. pcie_pm_remove_child() should
536 				 * cleanup PM state and holds (if any)
537 				 * associated with the child device.
538 				 */
539 				return (pcie_pm_remove_child(dip, rdip));
540 			}
541 
542 			if (as->result == DDI_SUCCESS) {
543 				pf_init(rdip, (void *)pcieb->pcieb_fm_ibc,
544 				    as->cmd);
545 
546 				(void) pcieb_plat_ctlops(rdip, ctlop, arg);
547 			}
548 
549 			/*
550 			 * For empty hotplug-capable slots, we should explicitly
551 			 * disable the errors, so that we won't panic upon
552 			 * unsupported hotplug messages.
553 			 */
554 			if ((!ddi_prop_exists(DDI_DEV_T_ANY, rdip,
555 			    DDI_PROP_DONTPASS, "hotplug-capable")) ||
556 			    ddi_get_child(rdip)) {
557 				(void) pcie_postattach_child(rdip);
558 				return (DDI_SUCCESS);
559 			}
560 
561 			pcie_disable_errors(rdip);
562 
563 			return (DDI_SUCCESS);
564 		default:
565 			break;
566 		}
567 		return (DDI_SUCCESS);
568 
569 	case DDI_CTLOPS_DETACH:
570 		if (!pcie_is_child(dip, rdip))
571 			return (DDI_SUCCESS);
572 
573 		ds = (struct detachspec *)arg;
574 		switch (ds->when) {
575 		case DDI_PRE:
576 			pf_fini(rdip, ds->cmd);
577 			return (DDI_SUCCESS);
578 
579 		case DDI_POST:
580 			if (pcieb_plat_ctlops(rdip, ctlop, arg) != DDI_SUCCESS)
581 				return (DDI_FAILURE);
582 			if (ds->cmd == DDI_DETACH &&
583 			    ds->result == DDI_SUCCESS) {
584 				return (pcie_pm_remove_child(dip, rdip));
585 			}
586 			return (DDI_SUCCESS);
587 		default:
588 			break;
589 		}
590 		return (DDI_SUCCESS);
591 	default:
592 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
593 	}
594 
595 	*(int *)result = 0;
596 	if (ddi_getlongprop(DDI_DEV_T_ANY, rdip,
597 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "reg", (caddr_t)&drv_regp,
598 	    &reglen) != DDI_SUCCESS)
599 		return (DDI_FAILURE);
600 
601 	totreg = reglen / sizeof (pci_regspec_t);
602 	if (ctlop == DDI_CTLOPS_NREGS)
603 		*(int *)result = totreg;
604 	else if (ctlop == DDI_CTLOPS_REGSIZE) {
605 		rn = *(int *)arg;
606 		if (rn >= totreg) {
607 			kmem_free(drv_regp, reglen);
608 			return (DDI_FAILURE);
609 		}
610 
611 		*(off_t *)result = drv_regp[rn].pci_size_low |
612 		    ((uint64_t)drv_regp[rn].pci_size_hi << 32);
613 	}
614 
615 	kmem_free(drv_regp, reglen);
616 	return (DDI_SUCCESS);
617 }
618 
619 /*
620  * name_child
621  *
622  * This function is called from init_child to name a node. It is
623  * also passed as a callback for node merging functions.
624  *
625  * return value: DDI_SUCCESS, DDI_FAILURE
626  */
627 static int
628 pcieb_name_child(dev_info_t *child, char *name, int namelen)
629 {
630 	pci_regspec_t *pci_rp;
631 	uint_t device, func;
632 	char **unit_addr;
633 	uint_t n;
634 
635 	/*
636 	 * For .conf nodes, use unit-address property as name
637 	 */
638 	if (ndi_dev_is_persistent_node(child) == 0) {
639 		if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child,
640 		    DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) !=
641 		    DDI_PROP_SUCCESS) {
642 			cmn_err(CE_WARN,
643 			    "cannot find unit-address in %s.conf",
644 			    ddi_driver_name(child));
645 			return (DDI_FAILURE);
646 		}
647 		if (n != 1 || *unit_addr == NULL || **unit_addr == 0) {
648 			cmn_err(CE_WARN, "unit-address property in %s.conf"
649 			    " not well-formed", ddi_driver_name(child));
650 			ddi_prop_free(unit_addr);
651 			return (DDI_FAILURE);
652 		}
653 		(void) snprintf(name, namelen, "%s", *unit_addr);
654 		ddi_prop_free(unit_addr);
655 		return (DDI_SUCCESS);
656 	}
657 
658 	/*
659 	 * Get the address portion of the node name based on
660 	 * the function and device number.
661 	 */
662 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child,
663 	    DDI_PROP_DONTPASS, "reg", (int **)&pci_rp, &n) != DDI_SUCCESS) {
664 		return (DDI_FAILURE);
665 	}
666 
667 	/* copy the device identifications */
668 	device = PCI_REG_DEV_G(pci_rp[0].pci_phys_hi);
669 	func = PCI_REG_FUNC_G(pci_rp[0].pci_phys_hi);
670 
671 	if (pcie_ari_is_enabled(ddi_get_parent(child))
672 	    == PCIE_ARI_FORW_ENABLED) {
673 		func = (device << 3) | func;
674 		device = 0;
675 	}
676 
677 	if (func != 0)
678 		(void) snprintf(name, namelen, "%x,%x", device, func);
679 	else
680 		(void) snprintf(name, namelen, "%x", device);
681 
682 	ddi_prop_free(pci_rp);
683 	return (DDI_SUCCESS);
684 }
685 
686 static int
687 pcieb_initchild(dev_info_t *child)
688 {
689 	char name[MAXNAMELEN];
690 	int result = DDI_FAILURE;
691 	pcieb_devstate_t *pcieb =
692 	    (pcieb_devstate_t *)ddi_get_soft_state(pcieb_state,
693 	    ddi_get_instance(ddi_get_parent(child)));
694 
695 	/*
696 	 * Name the child
697 	 */
698 	if (pcieb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS) {
699 		result = DDI_FAILURE;
700 		goto done;
701 	}
702 	ddi_set_name_addr(child, name);
703 
704 	/*
705 	 * Pseudo nodes indicate a prototype node with per-instance
706 	 * properties to be merged into the real h/w device node.
707 	 * The interpretation of the unit-address is DD[,F]
708 	 * where DD is the device id and F is the function.
709 	 */
710 	if (ndi_dev_is_persistent_node(child) == 0) {
711 		extern int pci_allow_pseudo_children;
712 
713 		/*
714 		 * Try to merge the properties from this prototype
715 		 * node into real h/w nodes.
716 		 */
717 		if (ndi_merge_node(child, pcieb_name_child) != DDI_SUCCESS) {
718 			/*
719 			 * Merged ok - return failure to remove the node.
720 			 */
721 			ddi_set_name_addr(child, NULL);
722 			result = DDI_FAILURE;
723 			goto done;
724 		}
725 
726 		/* workaround for ddivs to run under PCI-E */
727 		if (pci_allow_pseudo_children) {
728 			result = DDI_SUCCESS;
729 			goto done;
730 		}
731 
732 		/*
733 		 * The child was not merged into a h/w node,
734 		 * but there's not much we can do with it other
735 		 * than return failure to cause the node to be removed.
736 		 */
737 		cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
738 		    ddi_driver_name(child), ddi_get_name_addr(child),
739 		    ddi_driver_name(child));
740 		ddi_set_name_addr(child, NULL);
741 		result = DDI_NOT_WELL_FORMED;
742 		goto done;
743 	}
744 
745 	/* platform specific initchild */
746 	pcieb_plat_initchild(child);
747 
748 	if (pcie_pm_hold(pcieb->pcieb_dip) != DDI_SUCCESS) {
749 		PCIEB_DEBUG(DBG_PWR, pcieb->pcieb_dip,
750 		    "INITCHILD: px_pm_hold failed\n");
751 		result = DDI_FAILURE;
752 		goto done;
753 	}
754 	/* Any return from here must call pcie_pm_release */
755 
756 	/*
757 	 * If configuration registers were previously saved by
758 	 * child (before it entered D3), then let the child do the
759 	 * restore to set up the config regs as it'll first need to
760 	 * power the device out of D3.
761 	 */
762 	if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
763 	    "config-regs-saved-by-child") == 1) {
764 		PCIEB_DEBUG(DBG_PWR, ddi_get_parent(child),
765 		    "INITCHILD: config regs to be restored by child"
766 		    " for %s@%s\n", ddi_node_name(child),
767 		    ddi_get_name_addr(child));
768 
769 		result = DDI_SUCCESS;
770 		goto cleanup;
771 	}
772 
773 	PCIEB_DEBUG(DBG_PWR, ddi_get_parent(child),
774 	    "INITCHILD: config regs setup for %s@%s\n",
775 	    ddi_node_name(child), ddi_get_name_addr(child));
776 
777 	if (!pcie_init_bus(child) || pcie_initchild(child) != DDI_SUCCESS) {
778 		result = DDI_FAILURE;
779 		goto cleanup;
780 	}
781 
782 #ifdef PX_PLX
783 	if (pcieb_init_plx_workarounds(pcieb, child) == DDI_FAILURE) {
784 		result = DDI_FAILURE;
785 		goto cleanup;
786 	}
787 #endif /* PX_PLX */
788 
789 	result = DDI_SUCCESS;
790 cleanup:
791 	pcie_pm_release(pcieb->pcieb_dip);
792 done:
793 	return (result);
794 }
795 
796 static void
797 pcieb_uninitchild(dev_info_t *dip)
798 {
799 
800 	pcie_uninitchild(dip);
801 
802 	pcieb_plat_uninitchild(dip);
803 
804 	ddi_set_name_addr(dip, NULL);
805 
806 	/*
807 	 * Strip the node to properly convert it back to prototype form
808 	 */
809 	ddi_remove_minor_node(dip, NULL);
810 
811 	ddi_prop_remove_all(dip);
812 }
813 
814 static boolean_t
815 pcieb_is_pcie_device_type(dev_info_t *dip)
816 {
817 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
818 
819 	if (PCIE_IS_SW(bus_p) || PCIE_IS_RP(bus_p) || PCIE_IS_PCI2PCIE(bus_p))
820 		return (B_TRUE);
821 
822 	return (B_FALSE);
823 }
824 
825 static int
826 pcieb_intr_attach(pcieb_devstate_t *pcieb)
827 {
828 	int			intr_types;
829 	dev_info_t		*dip = pcieb->pcieb_dip;
830 
831 	/* Allow platform specific code to do any initialization first */
832 	pcieb_plat_intr_attach(pcieb);
833 
834 	/*
835 	 * Initialize interrupt handlers.
836 	 * If both MSI and FIXED are supported, try to attach MSI first.
837 	 * If MSI fails for any reason, then try FIXED, but only allow one
838 	 * type to be attached.
839 	 */
840 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
841 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_get_supported_types"
842 		    " failed\n");
843 		goto FAIL;
844 	}
845 
846 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
847 	    (pcieb_msi_supported(dip) == DDI_SUCCESS)) {
848 		if (pcieb_intr_init(pcieb, DDI_INTR_TYPE_MSI) == DDI_SUCCESS)
849 			intr_types = DDI_INTR_TYPE_MSI;
850 		else {
851 			PCIEB_DEBUG(DBG_ATTACH, dip, "Unable to attach MSI"
852 			    " handler\n");
853 		}
854 	}
855 
856 	if (intr_types != DDI_INTR_TYPE_MSI) {
857 		/*
858 		 * MSIs are not supported or MSI initialization failed. For Root
859 		 * Ports mark this so error handling might try to fallback to
860 		 * some other mechanism if available (machinecheck etc.).
861 		 */
862 		if (PCIE_IS_RP(PCIE_DIP2UPBUS(dip)))
863 			pcieb->pcieb_no_aer_msi = B_TRUE;
864 	}
865 
866 	if (intr_types & DDI_INTR_TYPE_FIXED) {
867 		if (pcieb_intr_init(pcieb, DDI_INTR_TYPE_FIXED) !=
868 		    DDI_SUCCESS) {
869 			PCIEB_DEBUG(DBG_ATTACH, dip,
870 			    "Unable to attach INTx handler\n");
871 			goto FAIL;
872 		}
873 	}
874 	return (DDI_SUCCESS);
875 
876 FAIL:
877 	return (DDI_FAILURE);
878 }
879 
880 /*
881  * This function initializes internally generated interrupts only.
882  * It does not affect any interrupts generated by downstream devices
883  * or the forwarding of them.
884  *
885  * Enable Device Specific Interrupts or Hotplug features here.
886  * Enabling features may change how many interrupts are requested
887  * by the device.  If features are not enabled first, the
888  * device might not ask for any interrupts.
889  */
890 
891 static int
892 pcieb_intr_init(pcieb_devstate_t *pcieb, int intr_type)
893 {
894 	dev_info_t	*dip = pcieb->pcieb_dip;
895 	int		nintrs, request, count, x;
896 	int		intr_cap = 0;
897 	int		inum = 0;
898 	int		ret, hp_msi_off;
899 	pcie_bus_t	*bus_p = PCIE_DIP2UPBUS(dip);
900 	uint16_t	vendorid = bus_p->bus_dev_ven_id & 0xFFFF;
901 	boolean_t	is_hp = B_FALSE;
902 	boolean_t	is_pme = B_FALSE;
903 
904 	PCIEB_DEBUG(DBG_ATTACH, dip, "pcieb_intr_init: Attaching %s handler\n",
905 	    (intr_type == DDI_INTR_TYPE_MSI) ? "MSI" : "INTx");
906 
907 	request = 0;
908 	if (PCIE_IS_HOTPLUG_ENABLED(dip)) {
909 		request++;
910 		is_hp = B_TRUE;
911 	}
912 
913 	/*
914 	 * Hotplug and PME share the same MSI vector. If hotplug is not
915 	 * supported check if MSI is needed for PME.
916 	 */
917 	if ((intr_type == DDI_INTR_TYPE_MSI) && PCIE_IS_RP(bus_p) &&
918 	    (vendorid == NVIDIA_VENDOR_ID)) {
919 		is_pme = B_TRUE;
920 		if (!is_hp)
921 			request++;
922 	}
923 
924 	/*
925 	 * Setup MSI if this device is a Rootport and has AER. Currently no
926 	 * SPARC Root Port supports fabric errors being reported through it.
927 	 */
928 	if (intr_type == DDI_INTR_TYPE_MSI) {
929 		if (PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p))
930 			request++;
931 	}
932 
933 	if (request == 0)
934 		return (DDI_SUCCESS);
935 
936 	/*
937 	 * Get number of supported interrupts.
938 	 *
939 	 * Several Bridges/Switches will not have this property set, resulting
940 	 * in a FAILURE, if the device is not configured in a way that
941 	 * interrupts are needed. (eg. hotplugging)
942 	 */
943 	ret = ddi_intr_get_nintrs(dip, intr_type, &nintrs);
944 	if ((ret != DDI_SUCCESS) || (nintrs == 0)) {
945 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_get_nintrs ret:%d"
946 		    " req:%d\n", ret, nintrs);
947 		return (DDI_FAILURE);
948 	}
949 
950 	PCIEB_DEBUG(DBG_ATTACH, dip, "bdf 0x%x: ddi_intr_get_nintrs: nintrs %d",
951 	    " request %d\n", bus_p->bus_bdf, nintrs, request);
952 
953 	if (request > nintrs)
954 		request = nintrs;
955 
956 	/* Allocate an array of interrupt handlers */
957 	pcieb->pcieb_htable_size = sizeof (ddi_intr_handle_t) * request;
958 	pcieb->pcieb_htable = kmem_zalloc(pcieb->pcieb_htable_size,
959 	    KM_SLEEP);
960 	pcieb->pcieb_init_flags |= PCIEB_INIT_HTABLE;
961 
962 	ret = ddi_intr_alloc(dip, pcieb->pcieb_htable, intr_type, inum,
963 	    request, &count, DDI_INTR_ALLOC_NORMAL);
964 	if ((ret != DDI_SUCCESS) || (count == 0)) {
965 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_alloc() ret: %d ask: %d"
966 		    " actual: %d\n", ret, request, count);
967 		goto FAIL;
968 	}
969 	pcieb->pcieb_init_flags |= PCIEB_INIT_ALLOC;
970 
971 	/* Save the actual number of interrupts allocated */
972 	pcieb->pcieb_intr_count = count;
973 	if (count < request) {
974 		PCIEB_DEBUG(DBG_ATTACH, dip, "bdf 0%x: Requested Intr: %d"
975 		    " Received: %d\n", bus_p->bus_bdf, request, count);
976 	}
977 
978 	/*
979 	 * NVidia (MCP55 and other) chipsets have a errata that if the number
980 	 * of requested MSI intrs is not allocated we have to fall back to INTx.
981 	 */
982 	if (intr_type == DDI_INTR_TYPE_MSI) {
983 		if (PCIE_IS_RP(bus_p) && (vendorid == NVIDIA_VENDOR_ID)) {
984 			if (request != count)
985 				goto FAIL;
986 		}
987 	}
988 
989 	/* Get interrupt priority */
990 	ret = ddi_intr_get_pri(pcieb->pcieb_htable[0],
991 	    &pcieb->pcieb_intr_priority);
992 	if (ret != DDI_SUCCESS) {
993 		PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_get_pri() ret: %d\n",
994 		    ret);
995 		goto FAIL;
996 	}
997 
998 	if (pcieb->pcieb_intr_priority >= LOCK_LEVEL) {
999 		pcieb->pcieb_intr_priority = LOCK_LEVEL - 1;
1000 		ret = ddi_intr_set_pri(pcieb->pcieb_htable[0],
1001 		    pcieb->pcieb_intr_priority);
1002 		if (ret != DDI_SUCCESS) {
1003 			PCIEB_DEBUG(DBG_ATTACH, dip, "ddi_intr_set_pri() ret:"
1004 			" %d\n", ret);
1005 
1006 			goto FAIL;
1007 		}
1008 	}
1009 
1010 	mutex_init(&pcieb->pcieb_intr_mutex, NULL, MUTEX_DRIVER, NULL);
1011 
1012 	pcieb->pcieb_init_flags |= PCIEB_INIT_MUTEX;
1013 
1014 	for (count = 0; count < pcieb->pcieb_intr_count; count++) {
1015 		ret = ddi_intr_add_handler(pcieb->pcieb_htable[count],
1016 		    pcieb_intr_handler, (caddr_t)pcieb,
1017 		    (caddr_t)(uintptr_t)(inum + count));
1018 
1019 		if (ret != DDI_SUCCESS) {
1020 			PCIEB_DEBUG(DBG_ATTACH, dip, "Cannot add "
1021 			    "interrupt(%d)\n", ret);
1022 			break;
1023 		}
1024 	}
1025 
1026 	/* If unsucessful, remove the added handlers */
1027 	if (ret != DDI_SUCCESS) {
1028 		for (x = 0; x < count; x++) {
1029 			(void) ddi_intr_remove_handler(pcieb->pcieb_htable[x]);
1030 		}
1031 		goto FAIL;
1032 	}
1033 
1034 	pcieb->pcieb_init_flags |= PCIEB_INIT_HANDLER;
1035 
1036 	(void) ddi_intr_get_cap(pcieb->pcieb_htable[0], &intr_cap);
1037 
1038 	/*
1039 	 * Get this intr lock because we are not quite ready to handle
1040 	 * interrupts immediately after enabling it. The MSI multi register
1041 	 * gets programmed in ddi_intr_enable after which we need to get the
1042 	 * MSI offsets for Hotplug/AER.
1043 	 */
1044 	mutex_enter(&pcieb->pcieb_intr_mutex);
1045 
1046 	if (intr_cap & DDI_INTR_FLAG_BLOCK) {
1047 		(void) ddi_intr_block_enable(pcieb->pcieb_htable,
1048 		    pcieb->pcieb_intr_count);
1049 		pcieb->pcieb_init_flags |= PCIEB_INIT_BLOCK;
1050 	} else {
1051 		for (count = 0; count < pcieb->pcieb_intr_count; count++) {
1052 			(void) ddi_intr_enable(pcieb->pcieb_htable[count]);
1053 		}
1054 	}
1055 	pcieb->pcieb_init_flags |= PCIEB_INIT_ENABLE;
1056 
1057 	/* Save the interrupt type */
1058 	pcieb->pcieb_intr_type = intr_type;
1059 
1060 	/* Get the MSI offset for hotplug/PME from the PCIe cap reg */
1061 	if (intr_type == DDI_INTR_TYPE_MSI) {
1062 		hp_msi_off = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL,
1063 		    bus_p->bus_pcie_off, PCIE_PCIECAP) &
1064 		    PCIE_PCIECAP_INT_MSG_NUM;
1065 
1066 		if (hp_msi_off >= count) {
1067 			PCIEB_DEBUG(DBG_ATTACH, dip, "MSI number %d in PCIe "
1068 			    "cap > max allocated %d\n", hp_msi_off, count);
1069 			mutex_exit(&pcieb->pcieb_intr_mutex);
1070 			goto FAIL;
1071 		}
1072 
1073 		if (is_hp)
1074 			pcieb->pcieb_isr_tab[hp_msi_off] |= PCIEB_INTR_SRC_HP;
1075 
1076 		if (is_pme)
1077 			pcieb->pcieb_isr_tab[hp_msi_off] |= PCIEB_INTR_SRC_PME;
1078 	} else {
1079 		/* INTx handles only Hotplug interrupts */
1080 		if (is_hp)
1081 			pcieb->pcieb_isr_tab[0] |= PCIEB_INTR_SRC_HP;
1082 	}
1083 
1084 
1085 	/*
1086 	 * Get the MSI offset for errors from the AER Root Error status
1087 	 * register.
1088 	 */
1089 	if ((intr_type == DDI_INTR_TYPE_MSI) && PCIE_IS_RP(bus_p)) {
1090 		if (PCIE_HAS_AER(bus_p)) {
1091 			int aer_msi_off;
1092 			aer_msi_off = (PCI_XCAP_GET32(bus_p->bus_cfg_hdl, NULL,
1093 			    bus_p->bus_aer_off, PCIE_AER_RE_STS) >>
1094 			    PCIE_AER_RE_STS_MSG_NUM_SHIFT) &
1095 			    PCIE_AER_RE_STS_MSG_NUM_MASK;
1096 
1097 			if (aer_msi_off >= count) {
1098 				PCIEB_DEBUG(DBG_ATTACH, dip, "MSI number %d in"
1099 				    " AER cap > max allocated %d\n",
1100 				    aer_msi_off, count);
1101 				mutex_exit(&pcieb->pcieb_intr_mutex);
1102 				goto FAIL;
1103 			}
1104 			pcieb->pcieb_isr_tab[aer_msi_off] |= PCIEB_INTR_SRC_AER;
1105 		} else {
1106 			/*
1107 			 * This RP does not have AER. Fallback to the
1108 			 * SERR+Machinecheck approach if available.
1109 			 */
1110 			pcieb->pcieb_no_aer_msi = B_TRUE;
1111 		}
1112 	}
1113 
1114 	mutex_exit(&pcieb->pcieb_intr_mutex);
1115 	return (DDI_SUCCESS);
1116 
1117 FAIL:
1118 	pcieb_intr_fini(pcieb);
1119 	return (DDI_FAILURE);
1120 }
1121 
1122 static void
1123 pcieb_intr_fini(pcieb_devstate_t *pcieb)
1124 {
1125 	int x;
1126 	int count = pcieb->pcieb_intr_count;
1127 	int flags = pcieb->pcieb_init_flags;
1128 
1129 	if ((flags & PCIEB_INIT_ENABLE) &&
1130 	    (flags & PCIEB_INIT_BLOCK)) {
1131 		(void) ddi_intr_block_disable(pcieb->pcieb_htable, count);
1132 		flags &= ~(PCIEB_INIT_ENABLE |
1133 		    PCIEB_INIT_BLOCK);
1134 	}
1135 
1136 	if (flags & PCIEB_INIT_MUTEX)
1137 		mutex_destroy(&pcieb->pcieb_intr_mutex);
1138 
1139 	for (x = 0; x < count; x++) {
1140 		if (flags & PCIEB_INIT_ENABLE)
1141 			(void) ddi_intr_disable(pcieb->pcieb_htable[x]);
1142 
1143 		if (flags & PCIEB_INIT_HANDLER)
1144 			(void) ddi_intr_remove_handler(pcieb->pcieb_htable[x]);
1145 
1146 		if (flags & PCIEB_INIT_ALLOC)
1147 			(void) ddi_intr_free(pcieb->pcieb_htable[x]);
1148 	}
1149 
1150 	flags &= ~(PCIEB_INIT_ENABLE | PCIEB_INIT_HANDLER | PCIEB_INIT_ALLOC |
1151 	    PCIEB_INIT_MUTEX);
1152 
1153 	if (flags & PCIEB_INIT_HTABLE)
1154 		kmem_free(pcieb->pcieb_htable, pcieb->pcieb_htable_size);
1155 
1156 	flags &= ~PCIEB_INIT_HTABLE;
1157 
1158 	pcieb->pcieb_init_flags &= flags;
1159 }
1160 
1161 /*
1162  * Checks if this device needs MSIs enabled or not.
1163  */
1164 /*ARGSUSED*/
1165 static int
1166 pcieb_msi_supported(dev_info_t *dip)
1167 {
1168 	return ((pcieb_enable_msi && pcieb_plat_msi_supported(dip)) ?
1169 	    DDI_SUCCESS: DDI_FAILURE);
1170 }
1171 
1172 /*ARGSUSED*/
1173 static int
1174 pcieb_fm_init_child(dev_info_t *dip, dev_info_t *tdip, int cap,
1175     ddi_iblock_cookie_t *ibc)
1176 {
1177 	pcieb_devstate_t  *pcieb = ddi_get_soft_state(pcieb_state,
1178 	    ddi_get_instance(dip));
1179 
1180 	ASSERT(ibc != NULL);
1181 	*ibc = pcieb->pcieb_fm_ibc;
1182 
1183 	return (DEVI(dip)->devi_fmhdl->fh_cap | DDI_FM_ACCCHK_CAPABLE |
1184 	    DDI_FM_DMACHK_CAPABLE);
1185 }
1186 
1187 static int
1188 pcieb_fm_init(pcieb_devstate_t *pcieb_p)
1189 {
1190 	dev_info_t	*dip = pcieb_p->pcieb_dip;
1191 	int		fm_cap = DDI_FM_EREPORT_CAPABLE;
1192 
1193 	/*
1194 	 * Request our capability level and get our parents capability
1195 	 * and ibc.
1196 	 */
1197 	ddi_fm_init(dip, &fm_cap, &pcieb_p->pcieb_fm_ibc);
1198 
1199 	return (DDI_SUCCESS);
1200 }
1201 
1202 /*
1203  * Breakdown our FMA resources
1204  */
1205 static void
1206 pcieb_fm_fini(pcieb_devstate_t *pcieb_p)
1207 {
1208 	/*
1209 	 * Clean up allocated fm structures
1210 	 */
1211 	ddi_fm_fini(pcieb_p->pcieb_dip);
1212 }
1213 
1214 static int
1215 pcieb_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1216 {
1217 	int		inst = PCI_MINOR_NUM_TO_INSTANCE(getminor(*devp));
1218 	pcieb_devstate_t	*pcieb = ddi_get_soft_state(pcieb_state, inst);
1219 	int	rv;
1220 
1221 	if (pcieb == NULL)
1222 		return (ENXIO);
1223 
1224 	mutex_enter(&pcieb->pcieb_mutex);
1225 	rv = pcie_open(pcieb->pcieb_dip, devp, flags, otyp, credp);
1226 	mutex_exit(&pcieb->pcieb_mutex);
1227 
1228 	return (rv);
1229 }
1230 
1231 static int
1232 pcieb_close(dev_t dev, int flags, int otyp, cred_t *credp)
1233 {
1234 	int		inst = PCI_MINOR_NUM_TO_INSTANCE(getminor(dev));
1235 	pcieb_devstate_t	*pcieb = ddi_get_soft_state(pcieb_state, inst);
1236 	int	rv;
1237 
1238 	if (pcieb == NULL)
1239 		return (ENXIO);
1240 
1241 	mutex_enter(&pcieb->pcieb_mutex);
1242 	rv = pcie_close(pcieb->pcieb_dip, dev, flags, otyp, credp);
1243 	mutex_exit(&pcieb->pcieb_mutex);
1244 
1245 	return (rv);
1246 }
1247 
1248 static int
1249 pcieb_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1250 	int *rvalp)
1251 {
1252 	int		inst = PCI_MINOR_NUM_TO_INSTANCE(getminor(dev));
1253 	pcieb_devstate_t	*pcieb = ddi_get_soft_state(pcieb_state, inst);
1254 	int		rv;
1255 
1256 	if (pcieb == NULL)
1257 		return (ENXIO);
1258 
1259 	/* To handle devctl and hotplug related ioctls */
1260 	rv = pcie_ioctl(pcieb->pcieb_dip, dev, cmd, arg, mode, credp, rvalp);
1261 
1262 	return (rv);
1263 }
1264 
1265 /*
1266  * Common interrupt handler for hotplug, PME and errors.
1267  */
1268 static uint_t
1269 pcieb_intr_handler(caddr_t arg1, caddr_t arg2)
1270 {
1271 	pcieb_devstate_t *pcieb_p = (pcieb_devstate_t *)arg1;
1272 	dev_info_t	*dip = pcieb_p->pcieb_dip;
1273 	ddi_fm_error_t	derr;
1274 	int		sts = 0;
1275 	int		ret = DDI_INTR_UNCLAIMED;
1276 	int		isrc;
1277 
1278 	if (!(pcieb_p->pcieb_init_flags & PCIEB_INIT_ENABLE))
1279 		goto FAIL;
1280 
1281 	mutex_enter(&pcieb_p->pcieb_intr_mutex);
1282 	isrc = pcieb_p->pcieb_isr_tab[(int)(uintptr_t)arg2];
1283 	mutex_exit(&pcieb_p->pcieb_intr_mutex);
1284 
1285 	PCIEB_DEBUG(DBG_INTR, dip, "Received intr number %d\n",
1286 	    (int)(uintptr_t)arg2);
1287 
1288 	if (isrc == PCIEB_INTR_SRC_UNKNOWN)
1289 		goto FAIL;
1290 
1291 	if (isrc & PCIEB_INTR_SRC_HP)
1292 		ret = pcie_intr(dip);
1293 
1294 	if (isrc & PCIEB_INTR_SRC_PME)
1295 		ret = DDI_INTR_CLAIMED;
1296 
1297 	/* AER Error */
1298 	if (isrc & PCIEB_INTR_SRC_AER) {
1299 		/*
1300 		 *  If MSI is shared with PME/hotplug then check Root Error
1301 		 *  Status Reg before claiming it. For now it's ok since
1302 		 *  we know we get 2 MSIs.
1303 		 */
1304 		ret = DDI_INTR_CLAIMED;
1305 		bzero(&derr, sizeof (ddi_fm_error_t));
1306 		derr.fme_version = DDI_FME_VERSION;
1307 		mutex_enter(&pcieb_p->pcieb_peek_poke_mutex);
1308 		mutex_enter(&pcieb_p->pcieb_err_mutex);
1309 
1310 		if ((DEVI(dip)->devi_fmhdl->fh_cap) & DDI_FM_EREPORT_CAPABLE)
1311 			sts = pf_scan_fabric(dip, &derr, NULL);
1312 
1313 		mutex_exit(&pcieb_p->pcieb_err_mutex);
1314 		mutex_exit(&pcieb_p->pcieb_peek_poke_mutex);
1315 		if (pcieb_die & sts)
1316 			fm_panic("%s-%d: PCI(-X) Express Fatal Error. (0x%x)",
1317 			    ddi_driver_name(dip), ddi_get_instance(dip), sts);
1318 	}
1319 FAIL:
1320 	return (ret);
1321 }
1322 
1323 /*
1324  * Some PCI-X to PCI-E bridges do not support full 64-bit addressing on the
1325  * PCI-X side of the bridge.  We build a special version of this driver for
1326  * those bridges, which uses PCIEB_ADDR_LIMIT_LO and/or PCIEB_ADDR_LIMIT_HI
1327  * to define the range of values which the chip can handle.  The code below
1328  * then clamps the DMA address range supplied by the driver, preventing the
1329  * PCI-E nexus driver from allocating any memory the bridge can't deal
1330  * with.
1331  */
1332 static int
1333 pcieb_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
1334 	ddi_dma_attr_t *attr_p, int (*waitfp)(caddr_t), caddr_t arg,
1335 	ddi_dma_handle_t *handlep)
1336 {
1337 	int		ret;
1338 #ifdef	BCM_SW_WORKAROUNDS
1339 	uint64_t	lim;
1340 
1341 	/*
1342 	 * If the leaf device's limits are outside than what the Broadcom
1343 	 * bridge can handle, we need to clip the values passed up the chain.
1344 	 */
1345 	lim = attr_p->dma_attr_addr_lo;
1346 	attr_p->dma_attr_addr_lo = MAX(lim, PCIEB_ADDR_LIMIT_LO);
1347 
1348 	lim = attr_p->dma_attr_addr_hi;
1349 	attr_p->dma_attr_addr_hi = MIN(lim, PCIEB_ADDR_LIMIT_HI);
1350 
1351 #endif	/* BCM_SW_WORKAROUNDS */
1352 
1353 	/*
1354 	 * This is a software workaround to fix the Broadcom 5714/5715 PCIe-PCI
1355 	 * bridge prefetch bug. Intercept the DMA alloc handle request and set
1356 	 * PX_DMAI_FLAGS_MAP_BUFZONE flag in the handle. If this flag is set,
1357 	 * the px nexus driver will allocate an extra page & make it valid one,
1358 	 * for any DVMA request that comes from any of the Broadcom bridge child
1359 	 * devices.
1360 	 */
1361 	if ((ret = ddi_dma_allochdl(dip, rdip, attr_p, waitfp, arg,
1362 	    handlep)) == DDI_SUCCESS) {
1363 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)*handlep;
1364 #ifdef	BCM_SW_WORKAROUNDS
1365 		mp->dmai_inuse |= PX_DMAI_FLAGS_MAP_BUFZONE;
1366 #endif	/* BCM_SW_WORKAROUNDS */
1367 		/*
1368 		 * For a given rdip, update mp->dmai_bdf with the bdf value
1369 		 * of pcieb's immediate child or secondary bus-id of the
1370 		 * PCIe2PCI bridge.
1371 		 */
1372 		mp->dmai_minxfer = pcie_get_bdf_for_dma_xfer(dip, rdip);
1373 	}
1374 
1375 	return (ret);
1376 }
1377 
1378 /*
1379  * FDVMA feature is not supported for any child device of Broadcom 5714/5715
1380  * PCIe-PCI bridge due to prefetch bug. Return failure immediately, so that
1381  * these drivers will switch to regular DVMA path.
1382  */
1383 /*ARGSUSED*/
1384 static int
1385 pcieb_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1386 	enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp,
1387 	uint_t cache_flags)
1388 {
1389 	int	ret;
1390 
1391 #ifdef	BCM_SW_WORKAROUNDS
1392 	if (cmd == DDI_DMA_RESERVE)
1393 		return (DDI_FAILURE);
1394 #endif	/* BCM_SW_WORKAROUNDS */
1395 
1396 	if (((ret = ddi_dma_mctl(dip, rdip, handle, cmd, offp, lenp, objp,
1397 	    cache_flags)) == DDI_SUCCESS) && (cmd == DDI_DMA_RESERVE)) {
1398 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)*objp;
1399 
1400 		/*
1401 		 * For a given rdip, update mp->dmai_bdf with the bdf value
1402 		 * of pcieb's immediate child or secondary bus-id of the
1403 		 * PCIe2PCI bridge.
1404 		 */
1405 		mp->dmai_minxfer = pcie_get_bdf_for_dma_xfer(dip, rdip);
1406 	}
1407 
1408 	return (ret);
1409 }
1410 
1411 static int
1412 pcieb_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1413     ddi_intr_handle_impl_t *hdlp, void *result)
1414 {
1415 	return (pcieb_plat_intr_ops(dip, rdip, intr_op, hdlp, result));
1416 
1417 }
1418 
1419 /*
1420  * Power management related initialization specific to pcieb.
1421  * Called by pcieb_attach()
1422  */
1423 static int
1424 pcieb_pwr_setup(dev_info_t *dip)
1425 {
1426 	char *comp_array[5];
1427 	int i;
1428 	ddi_acc_handle_t conf_hdl;
1429 	uint16_t pmcap, cap_ptr;
1430 	pcie_pwr_t *pwr_p;
1431 
1432 	/* Some platforms/devices may choose to disable PM */
1433 	if (pcieb_plat_pwr_disable(dip)) {
1434 		(void) pcieb_pwr_disable(dip);
1435 		return (DDI_SUCCESS);
1436 	}
1437 
1438 	ASSERT(PCIE_PMINFO(dip));
1439 	pwr_p = PCIE_NEXUS_PMINFO(dip);
1440 	ASSERT(pwr_p);
1441 
1442 	/* Code taken from pci_pci driver */
1443 	if (pci_config_setup(dip, &pwr_p->pwr_conf_hdl) != DDI_SUCCESS) {
1444 		PCIEB_DEBUG(DBG_PWR, dip, "pcieb_pwr_setup: pci_config_setup "
1445 		    "failed\n");
1446 		return (DDI_FAILURE);
1447 	}
1448 	conf_hdl = pwr_p->pwr_conf_hdl;
1449 
1450 	/*
1451 	 * Walk the capabilities searching for a PM entry.
1452 	 */
1453 	if ((PCI_CAP_LOCATE(conf_hdl, PCI_CAP_ID_PM, &cap_ptr)) ==
1454 	    DDI_FAILURE) {
1455 		PCIEB_DEBUG(DBG_PWR, dip, "switch/bridge does not support PM. "
1456 		    " PCI PM data structure not found in config header\n");
1457 		pci_config_teardown(&conf_hdl);
1458 		return (DDI_SUCCESS);
1459 	}
1460 	/*
1461 	 * Save offset to pmcsr for future references.
1462 	 */
1463 	pwr_p->pwr_pmcsr_offset = cap_ptr + PCI_PMCSR;
1464 	pmcap = PCI_CAP_GET16(conf_hdl, NULL, cap_ptr, PCI_PMCAP);
1465 	if (pmcap & PCI_PMCAP_D1) {
1466 		PCIEB_DEBUG(DBG_PWR, dip, "D1 state supported\n");
1467 		pwr_p->pwr_pmcaps |= PCIE_SUPPORTS_D1;
1468 	}
1469 	if (pmcap & PCI_PMCAP_D2) {
1470 		PCIEB_DEBUG(DBG_PWR, dip, "D2 state supported\n");
1471 		pwr_p->pwr_pmcaps |= PCIE_SUPPORTS_D2;
1472 	}
1473 
1474 	i = 0;
1475 	comp_array[i++] = "NAME=PCIe switch/bridge PM";
1476 	comp_array[i++] = "0=Power Off (D3)";
1477 	if (pwr_p->pwr_pmcaps & PCIE_SUPPORTS_D2)
1478 		comp_array[i++] = "1=D2";
1479 	if (pwr_p->pwr_pmcaps & PCIE_SUPPORTS_D1)
1480 		comp_array[i++] = "2=D1";
1481 	comp_array[i++] = "3=Full Power D0";
1482 
1483 	/*
1484 	 * Create pm-components property, if it does not exist already.
1485 	 */
1486 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
1487 	    "pm-components", comp_array, i) != DDI_PROP_SUCCESS) {
1488 		PCIEB_DEBUG(DBG_PWR, dip, "could not create pm-components "
1489 		    " prop\n");
1490 		pci_config_teardown(&conf_hdl);
1491 		return (DDI_FAILURE);
1492 	}
1493 	return (pcieb_pwr_init_and_raise(dip, pwr_p));
1494 }
1495 
1496 /*
1497  * undo whatever is done in pcieb_pwr_setup. called by pcieb_detach()
1498  */
1499 static void
1500 pcieb_pwr_teardown(dev_info_t *dip)
1501 {
1502 	pcie_pwr_t	*pwr_p;
1503 
1504 	if (!PCIE_PMINFO(dip) || !(pwr_p = PCIE_NEXUS_PMINFO(dip)))
1505 		return;
1506 
1507 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "pm-components");
1508 	if (pwr_p->pwr_conf_hdl)
1509 		pci_config_teardown(&pwr_p->pwr_conf_hdl);
1510 }
1511 
1512 /*
1513  * Initializes the power level and raise the power to D0, if it is
1514  * not at D0.
1515  */
1516 static int
1517 pcieb_pwr_init_and_raise(dev_info_t *dip, pcie_pwr_t *pwr_p)
1518 {
1519 	uint16_t pmcsr;
1520 	int ret = DDI_SUCCESS;
1521 
1522 	/*
1523 	 * Intialize our power level from PMCSR. The common code initializes
1524 	 * this to UNKNOWN. There is no guarantee that we will be at full
1525 	 * power at attach. If we are not at D0, raise the power.
1526 	 */
1527 	pmcsr = pci_config_get16(pwr_p->pwr_conf_hdl, pwr_p->pwr_pmcsr_offset);
1528 	pmcsr &= PCI_PMCSR_STATE_MASK;
1529 	switch (pmcsr) {
1530 	case PCI_PMCSR_D0:
1531 		pwr_p->pwr_func_lvl = PM_LEVEL_D0;
1532 		break;
1533 
1534 	case PCI_PMCSR_D1:
1535 		pwr_p->pwr_func_lvl = PM_LEVEL_D1;
1536 		break;
1537 
1538 	case PCI_PMCSR_D2:
1539 		pwr_p->pwr_func_lvl = PM_LEVEL_D2;
1540 		break;
1541 
1542 	case PCI_PMCSR_D3HOT:
1543 		pwr_p->pwr_func_lvl = PM_LEVEL_D3;
1544 		break;
1545 
1546 	default:
1547 		break;
1548 	}
1549 
1550 	/* Raise the power to D0. */
1551 	if (pwr_p->pwr_func_lvl != PM_LEVEL_D0 &&
1552 	    ((ret = pm_raise_power(dip, 0, PM_LEVEL_D0)) != DDI_SUCCESS)) {
1553 		/*
1554 		 * Read PMCSR again. If it is at D0, ignore the return
1555 		 * value from pm_raise_power.
1556 		 */
1557 		pmcsr = pci_config_get16(pwr_p->pwr_conf_hdl,
1558 		    pwr_p->pwr_pmcsr_offset);
1559 		if ((pmcsr & PCI_PMCSR_STATE_MASK) == PCI_PMCSR_D0)
1560 			ret = DDI_SUCCESS;
1561 		else {
1562 			PCIEB_DEBUG(DBG_PWR, dip, "pcieb_pwr_setup: could not "
1563 			    "raise power to D0 \n");
1564 		}
1565 	}
1566 	if (ret == DDI_SUCCESS)
1567 		pwr_p->pwr_func_lvl = PM_LEVEL_D0;
1568 	return (ret);
1569 }
1570 
1571 /*
1572  * Disable PM for x86 and PLX 8532 switch.
1573  * For PLX Transitioning one port on this switch to low power causes links
1574  * on other ports on the same station to die. Due to PLX erratum #34, we
1575  * can't allow the downstream device go to non-D0 state.
1576  */
1577 static int
1578 pcieb_pwr_disable(dev_info_t *dip)
1579 {
1580 	pcie_pwr_t *pwr_p;
1581 
1582 	ASSERT(PCIE_PMINFO(dip));
1583 	pwr_p = PCIE_NEXUS_PMINFO(dip);
1584 	ASSERT(pwr_p);
1585 	PCIEB_DEBUG(DBG_PWR, dip, "pcieb_pwr_disable: disabling PM\n");
1586 	pwr_p->pwr_func_lvl = PM_LEVEL_D0;
1587 	pwr_p->pwr_flags = PCIE_NO_CHILD_PM;
1588 	return (DDI_SUCCESS);
1589 }
1590 
1591 #ifdef DEBUG
1592 int pcieb_dbg_intr_print = 0;
1593 void
1594 pcieb_dbg(uint_t bit, dev_info_t *dip, char *fmt, ...)
1595 {
1596 	va_list ap;
1597 
1598 	if (!pcieb_dbg_print)
1599 		return;
1600 
1601 	if (dip)
1602 		prom_printf("%s(%d): %s", ddi_driver_name(dip),
1603 		    ddi_get_instance(dip), pcieb_debug_sym[bit]);
1604 
1605 	va_start(ap, fmt);
1606 	if (servicing_interrupt()) {
1607 		if (pcieb_dbg_intr_print)
1608 			prom_vprintf(fmt, ap);
1609 	} else {
1610 		prom_vprintf(fmt, ap);
1611 	}
1612 
1613 	va_end(ap);
1614 }
1615 #endif
1616 
1617 static void
1618 pcieb_id_props(pcieb_devstate_t *pcieb)
1619 {
1620 	uint64_t serialid = 0;	/* 40b field of EUI-64 serial no. register */
1621 	uint16_t cap_ptr;
1622 	uint8_t fic = 0;	/* 1 = first in chassis device */
1623 	pcie_bus_t *bus_p = PCIE_DIP2BUS(pcieb->pcieb_dip);
1624 	ddi_acc_handle_t config_handle = bus_p->bus_cfg_hdl;
1625 
1626 	/*
1627 	 * Identify first in chassis.  In the special case of a Sun branded
1628 	 * PLX device, it obviously is first in chassis.  Otherwise, in the
1629 	 * general case, look for an Expansion Slot Register and check its
1630 	 * first-in-chassis bit.
1631 	 */
1632 #ifdef	PX_PLX
1633 	uint16_t vendor_id = bus_p->bus_dev_ven_id & 0xFFFF;
1634 	uint16_t device_id = bus_p->bus_dev_ven_id >> 16;
1635 	if ((vendor_id == PXB_VENDOR_SUN) &&
1636 	    ((device_id == PXB_DEVICE_PLX_PCIX) ||
1637 	    (device_id == PXB_DEVICE_PLX_PCIE))) {
1638 		fic = 1;
1639 	}
1640 #endif	/* PX_PLX */
1641 	if ((fic == 0) && ((PCI_CAP_LOCATE(config_handle,
1642 	    PCI_CAP_ID_SLOT_ID, &cap_ptr)) != DDI_FAILURE)) {
1643 		uint8_t esr = PCI_CAP_GET8(config_handle, NULL,
1644 		    cap_ptr, PCI_CAP_ID_REGS_OFF);
1645 		if (PCI_CAPSLOT_FIC(esr))
1646 			fic = 1;
1647 	}
1648 
1649 	if ((PCI_CAP_LOCATE(config_handle,
1650 	    PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap_ptr)) != DDI_FAILURE) {
1651 		/* Serialid can be 0 thru a full 40b number */
1652 		serialid = PCI_XCAP_GET32(config_handle, NULL,
1653 		    cap_ptr, PCIE_SER_SID_UPPER_DW);
1654 		serialid <<= 32;
1655 		serialid |= PCI_XCAP_GET32(config_handle, NULL,
1656 		    cap_ptr, PCIE_SER_SID_LOWER_DW);
1657 	}
1658 
1659 	if (fic)
1660 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, pcieb->pcieb_dip,
1661 		    "first-in-chassis");
1662 	if (serialid)
1663 		(void) ddi_prop_update_int64(DDI_DEV_T_NONE, pcieb->pcieb_dip,
1664 		    "serialid#", serialid);
1665 }
1666 
1667 static void
1668 pcieb_create_ranges_prop(dev_info_t *dip,
1669 	ddi_acc_handle_t config_handle)
1670 {
1671 	uint32_t base, limit;
1672 	ppb_ranges_t	ranges[PCIEB_RANGE_LEN];
1673 	uint8_t io_base_lo, io_limit_lo;
1674 	uint16_t io_base_hi, io_limit_hi, mem_base, mem_limit;
1675 	int i = 0, rangelen = sizeof (ppb_ranges_t)/sizeof (int);
1676 
1677 	io_base_lo = pci_config_get8(config_handle, PCI_BCNF_IO_BASE_LOW);
1678 	io_limit_lo = pci_config_get8(config_handle, PCI_BCNF_IO_LIMIT_LOW);
1679 	io_base_hi = pci_config_get16(config_handle, PCI_BCNF_IO_BASE_HI);
1680 	io_limit_hi = pci_config_get16(config_handle, PCI_BCNF_IO_LIMIT_HI);
1681 	mem_base = pci_config_get16(config_handle, PCI_BCNF_MEM_BASE);
1682 	mem_limit = pci_config_get16(config_handle, PCI_BCNF_MEM_LIMIT);
1683 
1684 	/*
1685 	 * Create ranges for IO space
1686 	 */
1687 	ranges[i].size_low = ranges[i].size_high = 0;
1688 	ranges[i].parent_mid = ranges[i].child_mid = ranges[i].parent_high = 0;
1689 	ranges[i].child_high = ranges[i].parent_high |=
1690 	    (PCI_REG_REL_M | PCI_ADDR_IO);
1691 	base = PCIEB_16bit_IOADDR(io_base_lo);
1692 	limit = PCIEB_16bit_IOADDR(io_limit_lo);
1693 
1694 	if ((io_base_lo & 0xf) == PCIEB_32BIT_IO) {
1695 		base = PCIEB_LADDR(base, io_base_hi);
1696 	}
1697 	if ((io_limit_lo & 0xf) == PCIEB_32BIT_IO) {
1698 		limit = PCIEB_LADDR(limit, io_limit_hi);
1699 	}
1700 
1701 	if ((io_base_lo & PCIEB_32BIT_IO) && (io_limit_hi > 0)) {
1702 		base = PCIEB_LADDR(base, io_base_hi);
1703 		limit = PCIEB_LADDR(limit, io_limit_hi);
1704 	}
1705 
1706 	/*
1707 	 * Create ranges for 32bit memory space
1708 	 */
1709 	base = PCIEB_32bit_MEMADDR(mem_base);
1710 	limit = PCIEB_32bit_MEMADDR(mem_limit);
1711 	ranges[i].size_low = ranges[i].size_high = 0;
1712 	ranges[i].parent_mid = ranges[i].child_mid = ranges[i].parent_high = 0;
1713 	ranges[i].child_high = ranges[i].parent_high |=
1714 	    (PCI_REG_REL_M | PCI_ADDR_MEM32);
1715 	ranges[i].child_low = ranges[i].parent_low = base;
1716 	if (limit >= base) {
1717 		ranges[i].size_low = limit - base + PCIEB_MEMGRAIN;
1718 		i++;
1719 	}
1720 
1721 	if (i) {
1722 		(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "ranges",
1723 		    (int *)ranges, i * rangelen);
1724 	}
1725 }
1726 
1727 /*
1728  * For PCI and PCI-X devices including PCIe2PCI bridge, initialize
1729  * cache-line-size and latency timer configuration registers.
1730  */
1731 void
1732 pcieb_set_pci_perf_parameters(dev_info_t *dip, ddi_acc_handle_t cfg_hdl)
1733 {
1734 	uint_t	n;
1735 
1736 	/* Initialize cache-line-size configuration register if needed */
1737 	if (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1738 	    "cache-line-size", 0) == 0) {
1739 		pci_config_put8(cfg_hdl, PCI_CONF_CACHE_LINESZ,
1740 		    PCIEB_CACHE_LINE_SIZE);
1741 		n = pci_config_get8(cfg_hdl, PCI_CONF_CACHE_LINESZ);
1742 		if (n != 0) {
1743 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1744 			    "cache-line-size", n);
1745 		}
1746 	}
1747 
1748 	/* Initialize latency timer configuration registers if needed */
1749 	if (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1750 	    "latency-timer", 0) == 0) {
1751 		uchar_t	min_gnt, latency_timer;
1752 		uchar_t header_type;
1753 
1754 		/* Determine the configuration header type */
1755 		header_type = pci_config_get8(cfg_hdl, PCI_CONF_HEADER);
1756 
1757 		if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
1758 			latency_timer = PCIEB_LATENCY_TIMER;
1759 			pci_config_put8(cfg_hdl, PCI_BCNF_LATENCY_TIMER,
1760 			    latency_timer);
1761 		} else {
1762 			min_gnt = pci_config_get8(cfg_hdl, PCI_CONF_MIN_G);
1763 			latency_timer = min_gnt * 8;
1764 		}
1765 
1766 		pci_config_put8(cfg_hdl, PCI_CONF_LATENCY_TIMER,
1767 		    latency_timer);
1768 		n = pci_config_get8(cfg_hdl, PCI_CONF_LATENCY_TIMER);
1769 		if (n != 0) {
1770 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1771 			    "latency-timer", n);
1772 		}
1773 	}
1774 }
1775