xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie.c (revision 2f0b63d22a9a05291df8f83619060f9805eb7581)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2019 Joyent, Inc.
25  */
26 
27 #include <sys/sysmacros.h>
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/modctl.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/util.h>
36 #include <sys/promif.h>
37 #include <sys/disp.h>
38 #include <sys/stat.h>
39 #include <sys/file.h>
40 #include <sys/pci_cap.h>
41 #include <sys/pci_impl.h>
42 #include <sys/pcie_impl.h>
43 #include <sys/hotplug/pci/pcie_hp.h>
44 #include <sys/hotplug/pci/pciehpc.h>
45 #include <sys/hotplug/pci/pcishpc.h>
46 #include <sys/hotplug/pci/pcicfg.h>
47 #include <sys/pci_cfgacc.h>
48 #include <sys/sysevent.h>
49 #include <sys/sysevent/eventdefs.h>
50 #include <sys/sysevent/pcie.h>
51 
52 /* Local functions prototypes */
53 static void pcie_init_pfd(dev_info_t *);
54 static void pcie_fini_pfd(dev_info_t *);
55 
56 #if defined(__i386) || defined(__amd64)
57 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *);
58 #endif /* defined(__i386) || defined(__amd64) */
59 
60 #ifdef DEBUG
61 uint_t pcie_debug_flags = 0;
62 static void pcie_print_bus(pcie_bus_t *bus_p);
63 void pcie_dbg(char *fmt, ...);
64 #endif /* DEBUG */
65 
66 /* Variable to control default PCI-Express config settings */
67 ushort_t pcie_command_default =
68     PCI_COMM_SERR_ENABLE |
69     PCI_COMM_WAIT_CYC_ENAB |
70     PCI_COMM_PARITY_DETECT |
71     PCI_COMM_ME |
72     PCI_COMM_MAE |
73     PCI_COMM_IO;
74 
75 /* xxx_fw are bits that are controlled by FW and should not be modified */
76 ushort_t pcie_command_default_fw =
77     PCI_COMM_SPEC_CYC |
78     PCI_COMM_MEMWR_INVAL |
79     PCI_COMM_PALETTE_SNOOP |
80     PCI_COMM_WAIT_CYC_ENAB |
81     0xF800; /* Reserved Bits */
82 
83 ushort_t pcie_bdg_command_default_fw =
84     PCI_BCNF_BCNTRL_ISA_ENABLE |
85     PCI_BCNF_BCNTRL_VGA_ENABLE |
86     0xF000; /* Reserved Bits */
87 
88 /* PCI-Express Base error defaults */
89 ushort_t pcie_base_err_default =
90     PCIE_DEVCTL_CE_REPORTING_EN |
91     PCIE_DEVCTL_NFE_REPORTING_EN |
92     PCIE_DEVCTL_FE_REPORTING_EN |
93     PCIE_DEVCTL_UR_REPORTING_EN;
94 
95 /* PCI-Express Device Control Register */
96 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN |
97     PCIE_DEVCTL_MAX_READ_REQ_512;
98 
99 /* PCI-Express AER Root Control Register */
100 #define	PCIE_ROOT_SYS_ERR	(PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
101 				PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \
102 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN)
103 
104 ushort_t pcie_root_ctrl_default =
105     PCIE_ROOTCTL_SYS_ERR_ON_CE_EN |
106     PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
107     PCIE_ROOTCTL_SYS_ERR_ON_FE_EN;
108 
109 /* PCI-Express Root Error Command Register */
110 ushort_t pcie_root_error_cmd_default =
111     PCIE_AER_RE_CMD_CE_REP_EN |
112     PCIE_AER_RE_CMD_NFE_REP_EN |
113     PCIE_AER_RE_CMD_FE_REP_EN;
114 
115 /* ECRC settings in the PCIe AER Control Register */
116 uint32_t pcie_ecrc_value =
117     PCIE_AER_CTL_ECRC_GEN_ENA |
118     PCIE_AER_CTL_ECRC_CHECK_ENA;
119 
120 /*
121  * If a particular platform wants to disable certain errors such as UR/MA,
122  * instead of using #defines have the platform's PCIe Root Complex driver set
123  * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions.  For
124  * x86 the closest thing to a PCIe root complex driver is NPE.	For SPARC the
125  * closest PCIe root complex driver is PX.
126  *
127  * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
128  * systems may want to disable SERR in general.  For root ports, enabling SERR
129  * causes NMIs which are not handled and results in a watchdog timeout error.
130  */
131 uint32_t pcie_aer_uce_mask = 0;		/* AER UE Mask */
132 uint32_t pcie_aer_ce_mask = 0;		/* AER CE Mask */
133 uint32_t pcie_aer_suce_mask = 0;	/* AER Secondary UE Mask */
134 uint32_t pcie_serr_disable_flag = 0;	/* Disable SERR */
135 
136 /* Default severities needed for eversholt.  Error handling doesn't care */
137 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \
138     PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \
139     PCIE_AER_UCE_TRAINING;
140 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \
141     PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \
142     PCIE_AER_SUCE_USC_MSG_DATA_ERR;
143 
144 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5;
145 int pcie_disable_ari = 0;
146 
147 /*
148  * Amount of time to wait for an in-progress retraining. The default is to try
149  * 500 times in 10ms chunks, thus a total of 5s.
150  */
151 uint32_t pcie_link_retrain_count = 500;
152 uint32_t pcie_link_retrain_delay_ms = 10;
153 
154 taskq_t *pcie_link_tq;
155 kmutex_t pcie_link_tq_mutex;
156 
157 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip,
158 	int *max_supported);
159 static int pcie_get_max_supported(dev_info_t *dip, void *arg);
160 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
161     caddr_t *addrp, ddi_acc_handle_t *handlep);
162 static void pcie_unmap_phys(ddi_acc_handle_t *handlep,	pci_regspec_t *ph);
163 static int pcie_link_bw_intr(dev_info_t *);
164 static void pcie_capture_speeds(dev_info_t *);
165 
166 dev_info_t *pcie_get_rc_dip(dev_info_t *dip);
167 
168 /*
169  * modload support
170  */
171 
172 static struct modlmisc modlmisc	= {
173 	&mod_miscops,	/* Type	of module */
174 	"PCI Express Framework Module"
175 };
176 
177 static struct modlinkage modlinkage = {
178 	MODREV_1,
179 	(void	*)&modlmisc,
180 	NULL
181 };
182 
183 /*
184  * Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
185  * Currently used to send the pci.fabric ereports whose payload depends on the
186  * type of PCI device it is being sent for.
187  */
188 char		*pcie_nv_buf;
189 nv_alloc_t	*pcie_nvap;
190 nvlist_t	*pcie_nvl;
191 
192 int
193 _init(void)
194 {
195 	int rval;
196 
197 	pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP);
198 	pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ);
199 	pcie_nvl = fm_nvlist_create(pcie_nvap);
200 	mutex_init(&pcie_link_tq_mutex, NULL, MUTEX_DRIVER, NULL);
201 
202 	if ((rval = mod_install(&modlinkage)) != 0) {
203 		mutex_destroy(&pcie_link_tq_mutex);
204 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
205 		fm_nva_xdestroy(pcie_nvap);
206 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
207 	}
208 	return (rval);
209 }
210 
211 int
212 _fini()
213 {
214 	int		rval;
215 
216 	if ((rval = mod_remove(&modlinkage)) == 0) {
217 		if (pcie_link_tq != NULL) {
218 			taskq_destroy(pcie_link_tq);
219 		}
220 		mutex_destroy(&pcie_link_tq_mutex);
221 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
222 		fm_nva_xdestroy(pcie_nvap);
223 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
224 	}
225 	return (rval);
226 }
227 
228 int
229 _info(struct modinfo *modinfop)
230 {
231 	return (mod_info(&modlinkage, modinfop));
232 }
233 
234 /* ARGSUSED */
235 int
236 pcie_init(dev_info_t *dip, caddr_t arg)
237 {
238 	int	ret = DDI_SUCCESS;
239 
240 	/*
241 	 * Our _init function is too early to create a taskq. Create the pcie
242 	 * link management taskq here now instead.
243 	 */
244 	mutex_enter(&pcie_link_tq_mutex);
245 	if (pcie_link_tq == NULL) {
246 		pcie_link_tq = taskq_create("pcie_link", 1, minclsyspri, 0, 0,
247 		    0);
248 	}
249 	mutex_exit(&pcie_link_tq_mutex);
250 
251 
252 	/*
253 	 * Create a "devctl" minor node to support DEVCTL_DEVICE_*
254 	 * and DEVCTL_BUS_* ioctls to this bus.
255 	 */
256 	if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR,
257 	    PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR),
258 	    DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
259 		PCIE_DBG("Failed to create devctl minor node for %s%d\n",
260 		    ddi_driver_name(dip), ddi_get_instance(dip));
261 
262 		return (ret);
263 	}
264 
265 	if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) {
266 		/*
267 		 * On some x86 platforms, we observed unexpected hotplug
268 		 * initialization failures in recent years. The known cause
269 		 * is a hardware issue: while the problem PCI bridges have
270 		 * the Hotplug Capable registers set, the machine actually
271 		 * does not implement the expected ACPI object.
272 		 *
273 		 * We don't want to stop PCI driver attach and system boot
274 		 * just because of this hotplug initialization failure.
275 		 * Continue with a debug message printed.
276 		 */
277 		PCIE_DBG("%s%d: Failed setting hotplug framework\n",
278 		    ddi_driver_name(dip), ddi_get_instance(dip));
279 
280 #if defined(__sparc)
281 		ddi_remove_minor_node(dip, "devctl");
282 
283 		return (ret);
284 #endif /* defined(__sparc) */
285 	}
286 
287 	return (DDI_SUCCESS);
288 }
289 
290 /* ARGSUSED */
291 int
292 pcie_uninit(dev_info_t *dip)
293 {
294 	int	ret = DDI_SUCCESS;
295 
296 	if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED)
297 		(void) pcie_ari_disable(dip);
298 
299 	if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) {
300 		PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
301 		    ddi_driver_name(dip), ddi_get_instance(dip));
302 
303 		return (ret);
304 	}
305 
306 	if (pcie_link_bw_supported(dip)) {
307 		(void) pcie_link_bw_disable(dip);
308 	}
309 
310 	ddi_remove_minor_node(dip, "devctl");
311 
312 	return (ret);
313 }
314 
315 /*
316  * PCIe module interface for enabling hotplug interrupt.
317  *
318  * It should be called after pcie_init() is done and bus driver's
319  * interrupt handlers have being attached.
320  */
321 int
322 pcie_hpintr_enable(dev_info_t *dip)
323 {
324 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
325 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
326 
327 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
328 		(void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p);
329 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
330 		(void) pcishpc_enable_irqs(ctrl_p);
331 	}
332 	return (DDI_SUCCESS);
333 }
334 
335 /*
336  * PCIe module interface for disabling hotplug interrupt.
337  *
338  * It should be called before pcie_uninit() is called and bus driver's
339  * interrupt handlers is dettached.
340  */
341 int
342 pcie_hpintr_disable(dev_info_t *dip)
343 {
344 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
345 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
346 
347 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
348 		(void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p);
349 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
350 		(void) pcishpc_disable_irqs(ctrl_p);
351 	}
352 	return (DDI_SUCCESS);
353 }
354 
355 /* ARGSUSED */
356 int
357 pcie_intr(dev_info_t *dip)
358 {
359 	int hp, lbw;
360 
361 	hp = pcie_hp_intr(dip);
362 	lbw = pcie_link_bw_intr(dip);
363 
364 	if (hp == DDI_INTR_CLAIMED || lbw == DDI_INTR_CLAIMED) {
365 		return (DDI_INTR_CLAIMED);
366 	}
367 
368 	return (DDI_INTR_UNCLAIMED);
369 }
370 
371 /* ARGSUSED */
372 int
373 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp)
374 {
375 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
376 
377 	/*
378 	 * Make sure the open is for the right file type.
379 	 */
380 	if (otyp != OTYP_CHR)
381 		return (EINVAL);
382 
383 	/*
384 	 * Handle the open by tracking the device state.
385 	 */
386 	if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) ||
387 	    ((flags & FEXCL) &&
388 	    (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) {
389 		return (EBUSY);
390 	}
391 
392 	if (flags & FEXCL)
393 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
394 	else
395 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN;
396 
397 	return (0);
398 }
399 
400 /* ARGSUSED */
401 int
402 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp)
403 {
404 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
405 
406 	if (otyp != OTYP_CHR)
407 		return (EINVAL);
408 
409 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
410 
411 	return (0);
412 }
413 
414 /* ARGSUSED */
415 int
416 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode,
417     cred_t *credp, int *rvalp)
418 {
419 	struct devctl_iocdata	*dcp;
420 	uint_t			bus_state;
421 	int			rv = DDI_SUCCESS;
422 
423 	/*
424 	 * We can use the generic implementation for devctl ioctl
425 	 */
426 	switch (cmd) {
427 	case DEVCTL_DEVICE_GETSTATE:
428 	case DEVCTL_DEVICE_ONLINE:
429 	case DEVCTL_DEVICE_OFFLINE:
430 	case DEVCTL_BUS_GETSTATE:
431 		return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
432 	default:
433 		break;
434 	}
435 
436 	/*
437 	 * read devctl ioctl data
438 	 */
439 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
440 		return (EFAULT);
441 
442 	switch (cmd) {
443 	case DEVCTL_BUS_QUIESCE:
444 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
445 			if (bus_state == BUS_QUIESCED)
446 				break;
447 		(void) ndi_set_bus_state(dip, BUS_QUIESCED);
448 		break;
449 	case DEVCTL_BUS_UNQUIESCE:
450 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
451 			if (bus_state == BUS_ACTIVE)
452 				break;
453 		(void) ndi_set_bus_state(dip, BUS_ACTIVE);
454 		break;
455 	case DEVCTL_BUS_RESET:
456 	case DEVCTL_BUS_RESETALL:
457 	case DEVCTL_DEVICE_RESET:
458 		rv = ENOTSUP;
459 		break;
460 	default:
461 		rv = ENOTTY;
462 	}
463 
464 	ndi_dc_freehdl(dcp);
465 	return (rv);
466 }
467 
468 /* ARGSUSED */
469 int
470 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
471     int flags, char *name, caddr_t valuep, int *lengthp)
472 {
473 	if (dev == DDI_DEV_T_ANY)
474 		goto skip;
475 
476 	if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
477 	    strcmp(name, "pci-occupant") == 0) {
478 		int	pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev));
479 
480 		pcie_hp_create_occupant_props(dip, dev, pci_dev);
481 	}
482 
483 skip:
484 	return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
485 }
486 
487 int
488 pcie_init_cfghdl(dev_info_t *cdip)
489 {
490 	pcie_bus_t		*bus_p;
491 	ddi_acc_handle_t	eh = NULL;
492 
493 	bus_p = PCIE_DIP2BUS(cdip);
494 	if (bus_p == NULL)
495 		return (DDI_FAILURE);
496 
497 	/* Create an config access special to error handling */
498 	if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) {
499 		cmn_err(CE_WARN, "Cannot setup config access"
500 		    " for BDF 0x%x\n", bus_p->bus_bdf);
501 		return (DDI_FAILURE);
502 	}
503 
504 	bus_p->bus_cfg_hdl = eh;
505 	return (DDI_SUCCESS);
506 }
507 
508 void
509 pcie_fini_cfghdl(dev_info_t *cdip)
510 {
511 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(cdip);
512 
513 	pci_config_teardown(&bus_p->bus_cfg_hdl);
514 }
515 
516 void
517 pcie_determine_serial(dev_info_t *dip)
518 {
519 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
520 	ddi_acc_handle_t	h;
521 	uint16_t		cap;
522 	uchar_t			serial[8];
523 	uint32_t		low, high;
524 
525 	if (!PCIE_IS_PCIE(bus_p))
526 		return;
527 
528 	h = bus_p->bus_cfg_hdl;
529 
530 	if ((PCI_CAP_LOCATE(h, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap)) ==
531 	    DDI_FAILURE)
532 		return;
533 
534 	high = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_UPPER_DW);
535 	low = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_LOWER_DW);
536 
537 	/*
538 	 * Here, we're trying to figure out if we had an invalid PCIe read. From
539 	 * looking at the contents of the value, it can be hard to tell the
540 	 * difference between a value that has all 1s correctly versus if we had
541 	 * an error. In this case, we only assume it's invalid if both register
542 	 * reads are invalid. We also only use 32-bit reads as we're not sure if
543 	 * all devices will support these as 64-bit reads, while we know that
544 	 * they'll support these as 32-bit reads.
545 	 */
546 	if (high == PCI_EINVAL32 && low == PCI_EINVAL32)
547 		return;
548 
549 	serial[0] = low & 0xff;
550 	serial[1] = (low >> 8) & 0xff;
551 	serial[2] = (low >> 16) & 0xff;
552 	serial[3] = (low >> 24) & 0xff;
553 	serial[4] = high & 0xff;
554 	serial[5] = (high >> 8) & 0xff;
555 	serial[6] = (high >> 16) & 0xff;
556 	serial[7] = (high >> 24) & 0xff;
557 
558 	(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "pcie-serial",
559 	    serial, sizeof (serial));
560 }
561 
562 /*
563  * PCI-Express child device initialization.
564  * This function enables generic pci-express interrupts and error
565  * handling.
566  *
567  * @param pdip		root dip (root nexus's dip)
568  * @param cdip		child's dip (device's dip)
569  * @return		DDI_SUCCESS or DDI_FAILURE
570  */
571 /* ARGSUSED */
572 int
573 pcie_initchild(dev_info_t *cdip)
574 {
575 	uint16_t		tmp16, reg16;
576 	pcie_bus_t		*bus_p;
577 	uint32_t		devid, venid;
578 
579 	bus_p = PCIE_DIP2BUS(cdip);
580 	if (bus_p == NULL) {
581 		PCIE_DBG("%s: BUS not found.\n",
582 		    ddi_driver_name(cdip));
583 
584 		return (DDI_FAILURE);
585 	}
586 
587 	if (pcie_init_cfghdl(cdip) != DDI_SUCCESS)
588 		return (DDI_FAILURE);
589 
590 	/*
591 	 * Update pcie_bus_t with real Vendor Id Device Id.
592 	 *
593 	 * For assigned devices in IOV environment, the OBP will return
594 	 * faked device id/vendor id on configration read and for both
595 	 * properties in root domain. translate_devid() function will
596 	 * update the properties with real device-id/vendor-id on such
597 	 * platforms, so that we can utilize the properties here to get
598 	 * real device-id/vendor-id and overwrite the faked ids.
599 	 *
600 	 * For unassigned devices or devices in non-IOV environment, the
601 	 * operation below won't make a difference.
602 	 *
603 	 * The IOV implementation only supports assignment of PCIE
604 	 * endpoint devices. Devices under pci-pci bridges don't need
605 	 * operation like this.
606 	 */
607 	devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
608 	    "device-id", -1);
609 	venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
610 	    "vendor-id", -1);
611 	bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff);
612 
613 	/* Clear the device's status register */
614 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT);
615 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16);
616 
617 	/* Setup the device's command register */
618 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM);
619 	tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default;
620 
621 #if defined(__i386) || defined(__amd64)
622 	boolean_t empty_io_range = B_FALSE;
623 	boolean_t empty_mem_range = B_FALSE;
624 	/*
625 	 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem
626 	 * access as it can cause a hang if enabled.
627 	 */
628 	pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range,
629 	    &empty_mem_range);
630 	if ((empty_io_range == B_TRUE) &&
631 	    (pcie_command_default & PCI_COMM_IO)) {
632 		tmp16 &= ~PCI_COMM_IO;
633 		PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
634 		    ddi_driver_name(cdip), bus_p->bus_bdf);
635 	}
636 	if ((empty_mem_range == B_TRUE) &&
637 	    (pcie_command_default & PCI_COMM_MAE)) {
638 		tmp16 &= ~PCI_COMM_MAE;
639 		PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
640 		    ddi_driver_name(cdip), bus_p->bus_bdf);
641 	}
642 #endif /* defined(__i386) || defined(__amd64) */
643 
644 	if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p))
645 		tmp16 &= ~PCI_COMM_SERR_ENABLE;
646 
647 	PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16);
648 	PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16);
649 
650 	/*
651 	 * If the device has a bus control register then program it
652 	 * based on the settings in the command register.
653 	 */
654 	if (PCIE_IS_BDG(bus_p)) {
655 		/* Clear the device's secondary status register */
656 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
657 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16);
658 
659 		/* Setup the device's secondary command register */
660 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
661 		tmp16 = (reg16 & pcie_bdg_command_default_fw);
662 
663 		tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE;
664 		/*
665 		 * Workaround for this Nvidia bridge. Don't enable the SERR
666 		 * enable bit in the bridge control register as it could lead to
667 		 * bogus NMIs.
668 		 */
669 		if (bus_p->bus_dev_ven_id == 0x037010DE)
670 			tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE;
671 
672 		if (pcie_command_default & PCI_COMM_PARITY_DETECT)
673 			tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
674 
675 		/*
676 		 * Enable Master Abort Mode only if URs have not been masked.
677 		 * For PCI and PCIe-PCI bridges, enabling this bit causes a
678 		 * Master Aborts/UR to be forwarded as a UR/TA or SERR.  If this
679 		 * bit is masked, posted requests are dropped and non-posted
680 		 * requests are returned with -1.
681 		 */
682 		if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
683 			tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE;
684 		else
685 			tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
686 		PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16);
687 		PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL,
688 		    reg16);
689 	}
690 
691 	if (PCIE_IS_PCIE(bus_p)) {
692 		/* Setup PCIe device control register */
693 		reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
694 		/* note: MPS/MRRS are initialized in pcie_initchild_mps() */
695 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
696 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
697 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
698 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
699 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
700 		PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
701 
702 		/* Enable PCIe errors */
703 		pcie_enable_errors(cdip);
704 
705 		pcie_determine_serial(cdip);
706 
707 		pcie_capture_speeds(cdip);
708 	}
709 
710 	bus_p->bus_ari = B_FALSE;
711 	if ((pcie_ari_is_enabled(ddi_get_parent(cdip))
712 	    == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip)
713 	    == PCIE_ARI_DEVICE)) {
714 		bus_p->bus_ari = B_TRUE;
715 	}
716 
717 	if (pcie_initchild_mps(cdip) == DDI_FAILURE) {
718 		pcie_fini_cfghdl(cdip);
719 		return (DDI_FAILURE);
720 	}
721 
722 	return (DDI_SUCCESS);
723 }
724 
725 static void
726 pcie_init_pfd(dev_info_t *dip)
727 {
728 	pf_data_t	*pfd_p = PCIE_ZALLOC(pf_data_t);
729 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
730 
731 	PCIE_DIP2PFD(dip) = pfd_p;
732 
733 	pfd_p->pe_bus_p = bus_p;
734 	pfd_p->pe_severity_flags = 0;
735 	pfd_p->pe_severity_mask = 0;
736 	pfd_p->pe_orig_severity_flags = 0;
737 	pfd_p->pe_lock = B_FALSE;
738 	pfd_p->pe_valid = B_FALSE;
739 
740 	/* Allocate the root fault struct for both RC and RP */
741 	if (PCIE_IS_ROOT(bus_p)) {
742 		PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
743 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
744 		PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
745 	}
746 
747 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
748 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
749 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
750 
751 	if (PCIE_IS_BDG(bus_p))
752 		PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
753 
754 	if (PCIE_IS_PCIE(bus_p)) {
755 		PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
756 
757 		if (PCIE_IS_RP(bus_p))
758 			PCIE_RP_REG(pfd_p) =
759 			    PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
760 
761 		PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
762 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
763 
764 		if (PCIE_IS_RP(bus_p)) {
765 			PCIE_ADV_RP_REG(pfd_p) =
766 			    PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
767 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
768 			    PCIE_INVALID_BDF;
769 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
770 			    PCIE_INVALID_BDF;
771 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
772 			PCIE_ADV_BDG_REG(pfd_p) =
773 			    PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t);
774 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
775 			    PCIE_INVALID_BDF;
776 		}
777 
778 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
779 			PCIX_BDG_ERR_REG(pfd_p) =
780 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
781 
782 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
783 				PCIX_BDG_ECC_REG(pfd_p, 0) =
784 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
785 				PCIX_BDG_ECC_REG(pfd_p, 1) =
786 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
787 			}
788 		}
789 	} else if (PCIE_IS_PCIX(bus_p)) {
790 		if (PCIE_IS_BDG(bus_p)) {
791 			PCIX_BDG_ERR_REG(pfd_p) =
792 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
793 
794 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
795 				PCIX_BDG_ECC_REG(pfd_p, 0) =
796 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
797 				PCIX_BDG_ECC_REG(pfd_p, 1) =
798 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
799 			}
800 		} else {
801 			PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t);
802 
803 			if (PCIX_ECC_VERSION_CHECK(bus_p))
804 				PCIX_ECC_REG(pfd_p) =
805 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
806 		}
807 	}
808 }
809 
810 static void
811 pcie_fini_pfd(dev_info_t *dip)
812 {
813 	pf_data_t	*pfd_p = PCIE_DIP2PFD(dip);
814 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
815 
816 	if (PCIE_IS_PCIE(bus_p)) {
817 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
818 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
819 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
820 				    sizeof (pf_pcix_ecc_regs_t));
821 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
822 				    sizeof (pf_pcix_ecc_regs_t));
823 			}
824 
825 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
826 			    sizeof (pf_pcix_bdg_err_regs_t));
827 		}
828 
829 		if (PCIE_IS_RP(bus_p))
830 			kmem_free(PCIE_ADV_RP_REG(pfd_p),
831 			    sizeof (pf_pcie_adv_rp_err_regs_t));
832 		else if (PCIE_IS_PCIE_BDG(bus_p))
833 			kmem_free(PCIE_ADV_BDG_REG(pfd_p),
834 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
835 
836 		kmem_free(PCIE_ADV_REG(pfd_p),
837 		    sizeof (pf_pcie_adv_err_regs_t));
838 
839 		if (PCIE_IS_RP(bus_p))
840 			kmem_free(PCIE_RP_REG(pfd_p),
841 			    sizeof (pf_pcie_rp_err_regs_t));
842 
843 		kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
844 	} else if (PCIE_IS_PCIX(bus_p)) {
845 		if (PCIE_IS_BDG(bus_p)) {
846 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
847 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
848 				    sizeof (pf_pcix_ecc_regs_t));
849 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
850 				    sizeof (pf_pcix_ecc_regs_t));
851 			}
852 
853 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
854 			    sizeof (pf_pcix_bdg_err_regs_t));
855 		} else {
856 			if (PCIX_ECC_VERSION_CHECK(bus_p))
857 				kmem_free(PCIX_ECC_REG(pfd_p),
858 				    sizeof (pf_pcix_ecc_regs_t));
859 
860 			kmem_free(PCIX_ERR_REG(pfd_p),
861 			    sizeof (pf_pcix_err_regs_t));
862 		}
863 	}
864 
865 	if (PCIE_IS_BDG(bus_p))
866 		kmem_free(PCI_BDG_ERR_REG(pfd_p),
867 		    sizeof (pf_pci_bdg_err_regs_t));
868 
869 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
870 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
871 
872 	if (PCIE_IS_ROOT(bus_p)) {
873 		kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
874 		kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
875 	}
876 
877 	kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t));
878 
879 	PCIE_DIP2PFD(dip) = NULL;
880 }
881 
882 
883 /*
884  * Special functions to allocate pf_data_t's for PCIe root complexes.
885  * Note: Root Complex not Root Port
886  */
887 void
888 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p)
889 {
890 	pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip);
891 	pfd_p->pe_severity_flags = 0;
892 	pfd_p->pe_severity_mask = 0;
893 	pfd_p->pe_orig_severity_flags = 0;
894 	pfd_p->pe_lock = B_FALSE;
895 	pfd_p->pe_valid = B_FALSE;
896 
897 	PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
898 	PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
899 	PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
900 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
901 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
902 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
903 	PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
904 	PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
905 	PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
906 	PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
907 	PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
908 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF;
909 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF;
910 
911 	PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity;
912 }
913 
914 void
915 pcie_rc_fini_pfd(pf_data_t *pfd_p)
916 {
917 	kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t));
918 	kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t));
919 	kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t));
920 	kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
921 	kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
922 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
923 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
924 	kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
925 	kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
926 }
927 
928 /*
929  * init pcie_bus_t for root complex
930  *
931  * Only a few of the fields in bus_t is valid for root complex.
932  * The fields that are bracketed are initialized in this routine:
933  *
934  * dev_info_t *		<bus_dip>
935  * dev_info_t *		bus_rp_dip
936  * ddi_acc_handle_t	bus_cfg_hdl
937  * uint_t		<bus_fm_flags>
938  * pcie_req_id_t	bus_bdf
939  * pcie_req_id_t	bus_rp_bdf
940  * uint32_t		bus_dev_ven_id
941  * uint8_t		bus_rev_id
942  * uint8_t		<bus_hdr_type>
943  * uint16_t		<bus_dev_type>
944  * uint8_t		bus_bdg_secbus
945  * uint16_t		bus_pcie_off
946  * uint16_t		<bus_aer_off>
947  * uint16_t		bus_pcix_off
948  * uint16_t		bus_ecc_ver
949  * pci_bus_range_t	bus_bus_range
950  * ppb_ranges_t	*	bus_addr_ranges
951  * int			bus_addr_entries
952  * pci_regspec_t *	bus_assigned_addr
953  * int			bus_assigned_entries
954  * pf_data_t *		bus_pfd
955  * pcie_domain_t *	<bus_dom>
956  * int			bus_mps
957  * uint64_t		bus_cfgacc_base
958  * void	*		bus_plat_private
959  */
960 void
961 pcie_rc_init_bus(dev_info_t *dip)
962 {
963 	pcie_bus_t *bus_p;
964 
965 	bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
966 	bus_p->bus_dip = dip;
967 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO;
968 	bus_p->bus_hdr_type = PCI_HEADER_ONE;
969 
970 	/* Fake that there are AER logs */
971 	bus_p->bus_aer_off = (uint16_t)-1;
972 
973 	/* Needed only for handle lookup */
974 	atomic_or_uint(&bus_p->bus_fm_flags, PF_FM_READY);
975 
976 	ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p);
977 
978 	PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t);
979 }
980 
981 void
982 pcie_rc_fini_bus(dev_info_t *dip)
983 {
984 	pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip);
985 	ndi_set_bus_private(dip, B_FALSE, 0, NULL);
986 	kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t));
987 	kmem_free(bus_p, sizeof (pcie_bus_t));
988 }
989 
990 static int
991 pcie_width_to_int(pcie_link_width_t width)
992 {
993 	switch (width) {
994 	case PCIE_LINK_WIDTH_X1:
995 		return (1);
996 	case PCIE_LINK_WIDTH_X2:
997 		return (2);
998 	case PCIE_LINK_WIDTH_X4:
999 		return (4);
1000 	case PCIE_LINK_WIDTH_X8:
1001 		return (8);
1002 	case PCIE_LINK_WIDTH_X12:
1003 		return (12);
1004 	case PCIE_LINK_WIDTH_X16:
1005 		return (16);
1006 	case PCIE_LINK_WIDTH_X32:
1007 		return (32);
1008 	default:
1009 		return (0);
1010 	}
1011 }
1012 
1013 /*
1014  * Return the speed in Transfers / second. This is a signed quantity to match
1015  * the ndi/ddi property interfaces.
1016  */
1017 static int64_t
1018 pcie_speed_to_int(pcie_link_speed_t speed)
1019 {
1020 	switch (speed) {
1021 	case PCIE_LINK_SPEED_2_5:
1022 		return (2500000000LL);
1023 	case PCIE_LINK_SPEED_5:
1024 		return (5000000000LL);
1025 	case PCIE_LINK_SPEED_8:
1026 		return (8000000000LL);
1027 	case PCIE_LINK_SPEED_16:
1028 		return (16000000000LL);
1029 	default:
1030 		return (0);
1031 	}
1032 }
1033 
1034 /*
1035  * Translate the recorded speed information into devinfo properties.
1036  */
1037 static void
1038 pcie_speeds_to_devinfo(dev_info_t *dip, pcie_bus_t *bus_p)
1039 {
1040 	if (bus_p->bus_max_width != PCIE_LINK_WIDTH_UNKNOWN) {
1041 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1042 		    "pcie-link-maximum-width",
1043 		    pcie_width_to_int(bus_p->bus_max_width));
1044 	}
1045 
1046 	if (bus_p->bus_cur_width != PCIE_LINK_WIDTH_UNKNOWN) {
1047 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1048 		    "pcie-link-current-width",
1049 		    pcie_width_to_int(bus_p->bus_cur_width));
1050 	}
1051 
1052 	if (bus_p->bus_cur_speed != PCIE_LINK_SPEED_UNKNOWN) {
1053 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1054 		    "pcie-link-current-speed",
1055 		    pcie_speed_to_int(bus_p->bus_cur_speed));
1056 	}
1057 
1058 	if (bus_p->bus_max_speed != PCIE_LINK_SPEED_UNKNOWN) {
1059 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1060 		    "pcie-link-maximum-speed",
1061 		    pcie_speed_to_int(bus_p->bus_max_speed));
1062 	}
1063 
1064 	if (bus_p->bus_target_speed != PCIE_LINK_SPEED_UNKNOWN) {
1065 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1066 		    "pcie-link-target-speed",
1067 		    pcie_speed_to_int(bus_p->bus_target_speed));
1068 	}
1069 
1070 	if ((bus_p->bus_speed_flags & PCIE_LINK_F_ADMIN_TARGET) != 0) {
1071 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1072 		    "pcie-link-admin-target-speed");
1073 	}
1074 
1075 	if (bus_p->bus_sup_speed != PCIE_LINK_SPEED_UNKNOWN) {
1076 		int64_t speeds[4];
1077 		uint_t nspeeds = 0;
1078 
1079 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_2_5) {
1080 			speeds[nspeeds++] =
1081 			    pcie_speed_to_int(PCIE_LINK_SPEED_2_5);
1082 		}
1083 
1084 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_5) {
1085 			speeds[nspeeds++] =
1086 			    pcie_speed_to_int(PCIE_LINK_SPEED_5);
1087 		}
1088 
1089 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_8) {
1090 			speeds[nspeeds++] =
1091 			    pcie_speed_to_int(PCIE_LINK_SPEED_8);
1092 		}
1093 
1094 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_16) {
1095 			speeds[nspeeds++] =
1096 			    pcie_speed_to_int(PCIE_LINK_SPEED_16);
1097 		}
1098 
1099 		(void) ndi_prop_update_int64_array(DDI_DEV_T_NONE, dip,
1100 		    "pcie-link-supported-speeds", speeds, nspeeds);
1101 	}
1102 }
1103 
1104 /*
1105  * We need to capture the supported, maximum, and current device speed and
1106  * width. The way that this has been done has changed over time.
1107  *
1108  * Prior to PCIe Gen 3, there were only current and supported speed fields.
1109  * These were found in the link status and link capabilities registers of the
1110  * PCI express capability. With the change to PCIe Gen 3, the information in the
1111  * link capabilities changed to the maximum value. The supported speeds vector
1112  * was moved to the link capabilities 2 register.
1113  *
1114  * Now, a device may not implement some of these registers. To determine whether
1115  * or not it's here, we have to do the following. First, we need to check the
1116  * revision of the PCI express capability. The link capabilities 2 register did
1117  * not exist prior to version 2 of this capability. If a modern device does not
1118  * implement it, it is supposed to return zero for the register.
1119  */
1120 static void
1121 pcie_capture_speeds(dev_info_t *dip)
1122 {
1123 	uint16_t	vers, status;
1124 	uint32_t	cap, cap2, ctl2;
1125 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1126 
1127 	if (!PCIE_IS_PCIE(bus_p))
1128 		return;
1129 
1130 	vers = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
1131 	if (vers == PCI_EINVAL16)
1132 		return;
1133 	vers &= PCIE_PCIECAP_VER_MASK;
1134 
1135 	/*
1136 	 * Verify the capability's version.
1137 	 */
1138 	switch (vers) {
1139 	case PCIE_PCIECAP_VER_1_0:
1140 		cap2 = 0;
1141 		ctl2 = 0;
1142 		break;
1143 	case PCIE_PCIECAP_VER_2_0:
1144 		cap2 = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP2);
1145 		if (cap2 == PCI_EINVAL32)
1146 			cap2 = 0;
1147 		ctl2 = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL2);
1148 		if (ctl2 == PCI_EINVAL16)
1149 			ctl2 = 0;
1150 		break;
1151 	default:
1152 		/* Don't try and handle an unknown version */
1153 		return;
1154 	}
1155 
1156 	status = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
1157 	cap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
1158 	if (status == PCI_EINVAL16 || cap == PCI_EINVAL32)
1159 		return;
1160 
1161 	mutex_enter(&bus_p->bus_speed_mutex);
1162 
1163 	switch (status & PCIE_LINKSTS_SPEED_MASK) {
1164 	case PCIE_LINKSTS_SPEED_2_5:
1165 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_2_5;
1166 		break;
1167 	case PCIE_LINKSTS_SPEED_5:
1168 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_5;
1169 		break;
1170 	case PCIE_LINKSTS_SPEED_8:
1171 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_8;
1172 		break;
1173 	case PCIE_LINKSTS_SPEED_16:
1174 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_16;
1175 		break;
1176 	default:
1177 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_UNKNOWN;
1178 		break;
1179 	}
1180 
1181 	switch (status & PCIE_LINKSTS_NEG_WIDTH_MASK) {
1182 	case PCIE_LINKSTS_NEG_WIDTH_X1:
1183 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X1;
1184 		break;
1185 	case PCIE_LINKSTS_NEG_WIDTH_X2:
1186 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X2;
1187 		break;
1188 	case PCIE_LINKSTS_NEG_WIDTH_X4:
1189 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X4;
1190 		break;
1191 	case PCIE_LINKSTS_NEG_WIDTH_X8:
1192 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X8;
1193 		break;
1194 	case PCIE_LINKSTS_NEG_WIDTH_X12:
1195 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X12;
1196 		break;
1197 	case PCIE_LINKSTS_NEG_WIDTH_X16:
1198 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X16;
1199 		break;
1200 	case PCIE_LINKSTS_NEG_WIDTH_X32:
1201 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X32;
1202 		break;
1203 	default:
1204 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_UNKNOWN;
1205 		break;
1206 	}
1207 
1208 	switch (cap & PCIE_LINKCAP_MAX_WIDTH_MASK) {
1209 	case PCIE_LINKCAP_MAX_WIDTH_X1:
1210 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X1;
1211 		break;
1212 	case PCIE_LINKCAP_MAX_WIDTH_X2:
1213 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X2;
1214 		break;
1215 	case PCIE_LINKCAP_MAX_WIDTH_X4:
1216 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X4;
1217 		break;
1218 	case PCIE_LINKCAP_MAX_WIDTH_X8:
1219 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X8;
1220 		break;
1221 	case PCIE_LINKCAP_MAX_WIDTH_X12:
1222 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X12;
1223 		break;
1224 	case PCIE_LINKCAP_MAX_WIDTH_X16:
1225 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X16;
1226 		break;
1227 	case PCIE_LINKCAP_MAX_WIDTH_X32:
1228 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X32;
1229 		break;
1230 	default:
1231 		bus_p->bus_max_width = PCIE_LINK_WIDTH_UNKNOWN;
1232 		break;
1233 	}
1234 
1235 	/*
1236 	 * If we have the Link Capabilities 2, then we can get the supported
1237 	 * speeds from it and treat the bits in Link Capabilities 1 as the
1238 	 * maximum. If we don't, then we need to follow the Implementation Note
1239 	 * in the standard under Link Capabilities 2. Effectively, this means
1240 	 * that if the value of 10b is set in Link Capabilities register, that
1241 	 * it supports both 2.5 and 5 GT/s speeds.
1242 	 */
1243 	if (cap2 != 0) {
1244 		if (cap2 & PCIE_LINKCAP2_SPEED_2_5)
1245 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_2_5;
1246 		if (cap2 & PCIE_LINKCAP2_SPEED_5)
1247 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_5;
1248 		if (cap2 & PCIE_LINKCAP2_SPEED_8)
1249 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_8;
1250 		if (cap2 & PCIE_LINKCAP2_SPEED_16)
1251 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_16;
1252 
1253 		switch (cap & PCIE_LINKCAP_MAX_SPEED_MASK) {
1254 		case PCIE_LINKCAP_MAX_SPEED_2_5:
1255 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1256 			break;
1257 		case PCIE_LINKCAP_MAX_SPEED_5:
1258 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1259 			break;
1260 		case PCIE_LINKCAP_MAX_SPEED_8:
1261 			bus_p->bus_max_speed = PCIE_LINK_SPEED_8;
1262 			break;
1263 		case PCIE_LINKCAP_MAX_SPEED_16:
1264 			bus_p->bus_max_speed = PCIE_LINK_SPEED_16;
1265 			break;
1266 		default:
1267 			bus_p->bus_max_speed = PCIE_LINK_SPEED_UNKNOWN;
1268 			break;
1269 		}
1270 	} else {
1271 		if (cap & PCIE_LINKCAP_MAX_SPEED_5) {
1272 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1273 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5 |
1274 			    PCIE_LINK_SPEED_5;
1275 		} else if (cap & PCIE_LINKCAP_MAX_SPEED_2_5) {
1276 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1277 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5;
1278 		}
1279 	}
1280 
1281 	switch (ctl2 & PCIE_LINKCTL2_TARGET_SPEED_MASK) {
1282 	case PCIE_LINKCTL2_TARGET_SPEED_2_5:
1283 		bus_p->bus_target_speed = PCIE_LINK_SPEED_2_5;
1284 		break;
1285 	case PCIE_LINKCTL2_TARGET_SPEED_5:
1286 		bus_p->bus_target_speed = PCIE_LINK_SPEED_5;
1287 		break;
1288 	case PCIE_LINKCTL2_TARGET_SPEED_8:
1289 		bus_p->bus_target_speed = PCIE_LINK_SPEED_8;
1290 		break;
1291 	case PCIE_LINKCTL2_TARGET_SPEED_16:
1292 		bus_p->bus_target_speed = PCIE_LINK_SPEED_16;
1293 		break;
1294 	default:
1295 		bus_p->bus_target_speed = PCIE_LINK_SPEED_UNKNOWN;
1296 		break;
1297 	}
1298 
1299 	pcie_speeds_to_devinfo(dip, bus_p);
1300 	mutex_exit(&bus_p->bus_speed_mutex);
1301 }
1302 
1303 /*
1304  * partially init pcie_bus_t for device (dip,bdf) for accessing pci
1305  * config space
1306  *
1307  * This routine is invoked during boot, either after creating a devinfo node
1308  * (x86 case) or during px driver attach (sparc case); it is also invoked
1309  * in hotplug context after a devinfo node is created.
1310  *
1311  * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
1312  * is set:
1313  *
1314  * dev_info_t *		<bus_dip>
1315  * dev_info_t *		<bus_rp_dip>
1316  * ddi_acc_handle_t	bus_cfg_hdl
1317  * uint_t		bus_fm_flags
1318  * pcie_req_id_t	<bus_bdf>
1319  * pcie_req_id_t	<bus_rp_bdf>
1320  * uint32_t		<bus_dev_ven_id>
1321  * uint8_t		<bus_rev_id>
1322  * uint8_t		<bus_hdr_type>
1323  * uint16_t		<bus_dev_type>
1324  * uint8_t		<bus_bdg_secbus
1325  * uint16_t		<bus_pcie_off>
1326  * uint16_t		<bus_aer_off>
1327  * uint16_t		<bus_pcix_off>
1328  * uint16_t		<bus_ecc_ver>
1329  * pci_bus_range_t	bus_bus_range
1330  * ppb_ranges_t	*	bus_addr_ranges
1331  * int			bus_addr_entries
1332  * pci_regspec_t *	bus_assigned_addr
1333  * int			bus_assigned_entries
1334  * pf_data_t *		bus_pfd
1335  * pcie_domain_t *	bus_dom
1336  * int			bus_mps
1337  * uint64_t		bus_cfgacc_base
1338  * void	*		bus_plat_private
1339  *
1340  * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
1341  * is set:
1342  *
1343  * dev_info_t *		bus_dip
1344  * dev_info_t *		bus_rp_dip
1345  * ddi_acc_handle_t	bus_cfg_hdl
1346  * uint_t		bus_fm_flags
1347  * pcie_req_id_t	bus_bdf
1348  * pcie_req_id_t	bus_rp_bdf
1349  * uint32_t		bus_dev_ven_id
1350  * uint8_t		bus_rev_id
1351  * uint8_t		bus_hdr_type
1352  * uint16_t		bus_dev_type
1353  * uint8_t		<bus_bdg_secbus>
1354  * uint16_t		bus_pcie_off
1355  * uint16_t		bus_aer_off
1356  * uint16_t		bus_pcix_off
1357  * uint16_t		bus_ecc_ver
1358  * pci_bus_range_t	<bus_bus_range>
1359  * ppb_ranges_t	*	<bus_addr_ranges>
1360  * int			<bus_addr_entries>
1361  * pci_regspec_t *	<bus_assigned_addr>
1362  * int			<bus_assigned_entries>
1363  * pf_data_t *		<bus_pfd>
1364  * pcie_domain_t *	bus_dom
1365  * int			bus_mps
1366  * uint64_t		bus_cfgacc_base
1367  * void	*		<bus_plat_private>
1368  */
1369 
1370 pcie_bus_t *
1371 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags)
1372 {
1373 	uint16_t	status, base, baseptr, num_cap;
1374 	uint32_t	capid;
1375 	int		range_size;
1376 	pcie_bus_t	*bus_p = NULL;
1377 	dev_info_t	*rcdip;
1378 	dev_info_t	*pdip;
1379 	const char	*errstr = NULL;
1380 
1381 	if (!(flags & PCIE_BUS_INITIAL))
1382 		goto initial_done;
1383 
1384 	bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
1385 
1386 	bus_p->bus_dip = dip;
1387 	bus_p->bus_bdf = bdf;
1388 
1389 	rcdip = pcie_get_rc_dip(dip);
1390 	ASSERT(rcdip != NULL);
1391 
1392 	/* Save the Vendor ID, Device ID and revision ID */
1393 	bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID);
1394 	bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID);
1395 	/* Save the Header Type */
1396 	bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER);
1397 	bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M;
1398 
1399 	/*
1400 	 * Figure out the device type and all the relavant capability offsets
1401 	 */
1402 	/* set default value */
1403 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
1404 
1405 	status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT);
1406 	if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP))
1407 		goto caps_done; /* capability not supported */
1408 
1409 	/* Relevant conventional capabilities first */
1410 
1411 	/* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
1412 	num_cap = 2;
1413 
1414 	switch (bus_p->bus_hdr_type) {
1415 	case PCI_HEADER_ZERO:
1416 		baseptr = PCI_CONF_CAP_PTR;
1417 		break;
1418 	case PCI_HEADER_PPB:
1419 		baseptr = PCI_BCNF_CAP_PTR;
1420 		break;
1421 	case PCI_HEADER_CARDBUS:
1422 		baseptr = PCI_CBUS_CAP_PTR;
1423 		break;
1424 	default:
1425 		cmn_err(CE_WARN, "%s: unexpected pci header type:%x",
1426 		    __func__, bus_p->bus_hdr_type);
1427 		goto caps_done;
1428 	}
1429 
1430 	base = baseptr;
1431 	for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap;
1432 	    base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) {
1433 		capid = pci_cfgacc_get8(rcdip, bdf, base);
1434 		switch (capid) {
1435 		case PCI_CAP_ID_PCI_E:
1436 			bus_p->bus_pcie_off = base;
1437 			bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf,
1438 			    base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1439 
1440 			/* Check and save PCIe hotplug capability information */
1441 			if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) &&
1442 			    (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP)
1443 			    & PCIE_PCIECAP_SLOT_IMPL) &&
1444 			    (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP)
1445 			    & PCIE_SLOTCAP_HP_CAPABLE))
1446 				bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
1447 
1448 			num_cap--;
1449 			break;
1450 		case PCI_CAP_ID_PCIX:
1451 			bus_p->bus_pcix_off = base;
1452 			if (PCIE_IS_BDG(bus_p))
1453 				bus_p->bus_ecc_ver =
1454 				    pci_cfgacc_get16(rcdip, bdf, base +
1455 				    PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
1456 			else
1457 				bus_p->bus_ecc_ver =
1458 				    pci_cfgacc_get16(rcdip, bdf, base +
1459 				    PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
1460 			num_cap--;
1461 			break;
1462 		default:
1463 			break;
1464 		}
1465 	}
1466 
1467 	/* Check and save PCI hotplug (SHPC) capability information */
1468 	if (PCIE_IS_BDG(bus_p)) {
1469 		base = baseptr;
1470 		for (base = pci_cfgacc_get8(rcdip, bdf, base);
1471 		    base; base = pci_cfgacc_get8(rcdip, bdf,
1472 		    base + PCI_CAP_NEXT_PTR)) {
1473 			capid = pci_cfgacc_get8(rcdip, bdf, base);
1474 			if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
1475 				bus_p->bus_pci_hp_off = base;
1476 				bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE;
1477 				break;
1478 			}
1479 		}
1480 	}
1481 
1482 	/* Then, relevant extended capabilities */
1483 
1484 	if (!PCIE_IS_PCIE(bus_p))
1485 		goto caps_done;
1486 
1487 	/* Extended caps: PCIE_EXT_CAP_ID_AER */
1488 	for (base = PCIE_EXT_CAP; base; base = (capid >>
1489 	    PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) {
1490 		capid = pci_cfgacc_get32(rcdip, bdf, base);
1491 		if (capid == PCI_CAP_EINVAL32)
1492 			break;
1493 		if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK)
1494 		    == PCIE_EXT_CAP_ID_AER) {
1495 			bus_p->bus_aer_off = base;
1496 			break;
1497 		}
1498 	}
1499 
1500 	/*
1501 	 * Save and record speed information about the device.
1502 	 */
1503 
1504 caps_done:
1505 	/* save RP dip and RP bdf */
1506 	if (PCIE_IS_RP(bus_p)) {
1507 		bus_p->bus_rp_dip = dip;
1508 		bus_p->bus_rp_bdf = bus_p->bus_bdf;
1509 	} else {
1510 		for (pdip = ddi_get_parent(dip); pdip;
1511 		    pdip = ddi_get_parent(pdip)) {
1512 			pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip);
1513 
1514 			/*
1515 			 * If RP dip and RP bdf in parent's bus_t have
1516 			 * been initialized, simply use these instead of
1517 			 * continuing up to the RC.
1518 			 */
1519 			if (parent_bus_p->bus_rp_dip != NULL) {
1520 				bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip;
1521 				bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf;
1522 				break;
1523 			}
1524 
1525 			/*
1526 			 * When debugging be aware that some NVIDIA x86
1527 			 * architectures have 2 nodes for each RP, One at Bus
1528 			 * 0x0 and one at Bus 0x80.  The requester is from Bus
1529 			 * 0x80
1530 			 */
1531 			if (PCIE_IS_ROOT(parent_bus_p)) {
1532 				bus_p->bus_rp_dip = pdip;
1533 				bus_p->bus_rp_bdf = parent_bus_p->bus_bdf;
1534 				break;
1535 			}
1536 		}
1537 	}
1538 
1539 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
1540 	(void) atomic_swap_uint(&bus_p->bus_fm_flags, 0);
1541 	bus_p->bus_mps = 0;
1542 
1543 	ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p);
1544 
1545 	if (PCIE_IS_HOTPLUG_CAPABLE(dip))
1546 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1547 		    "hotplug-capable");
1548 
1549 initial_done:
1550 	if (!(flags & PCIE_BUS_FINAL))
1551 		goto final_done;
1552 
1553 	/* already initialized? */
1554 	bus_p = PCIE_DIP2BUS(dip);
1555 
1556 	/* Save the Range information if device is a switch/bridge */
1557 	if (PCIE_IS_BDG(bus_p)) {
1558 		/* get "bus_range" property */
1559 		range_size = sizeof (pci_bus_range_t);
1560 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1561 		    "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size)
1562 		    != DDI_PROP_SUCCESS) {
1563 			errstr = "Cannot find \"bus-range\" property";
1564 			cmn_err(CE_WARN,
1565 			    "PCIE init err info failed BDF 0x%x:%s\n",
1566 			    bus_p->bus_bdf, errstr);
1567 		}
1568 
1569 		/* get secondary bus number */
1570 		rcdip = pcie_get_rc_dip(dip);
1571 		ASSERT(rcdip != NULL);
1572 
1573 		bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip,
1574 		    bus_p->bus_bdf, PCI_BCNF_SECBUS);
1575 
1576 		/* Get "ranges" property */
1577 		if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1578 		    "ranges", (caddr_t)&bus_p->bus_addr_ranges,
1579 		    &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS)
1580 			bus_p->bus_addr_entries = 0;
1581 		bus_p->bus_addr_entries /= sizeof (ppb_ranges_t);
1582 	}
1583 
1584 	/* save "assigned-addresses" property array, ignore failues */
1585 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1586 	    "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr,
1587 	    &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS)
1588 		bus_p->bus_assigned_entries /= sizeof (pci_regspec_t);
1589 	else
1590 		bus_p->bus_assigned_entries = 0;
1591 
1592 	pcie_init_pfd(dip);
1593 
1594 	pcie_init_plat(dip);
1595 
1596 final_done:
1597 
1598 	PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
1599 	    ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf,
1600 	    bus_p->bus_bdg_secbus);
1601 #ifdef DEBUG
1602 	if (bus_p != NULL) {
1603 		pcie_print_bus(bus_p);
1604 	}
1605 #endif
1606 
1607 	return (bus_p);
1608 }
1609 
1610 /*
1611  * Invoked before destroying devinfo node, mostly during hotplug
1612  * operation to free pcie_bus_t data structure
1613  */
1614 /* ARGSUSED */
1615 void
1616 pcie_fini_bus(dev_info_t *dip, uint8_t flags)
1617 {
1618 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
1619 	ASSERT(bus_p);
1620 
1621 	if (flags & PCIE_BUS_INITIAL) {
1622 		pcie_fini_plat(dip);
1623 		pcie_fini_pfd(dip);
1624 
1625 		kmem_free(bus_p->bus_assigned_addr,
1626 		    (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries));
1627 		kmem_free(bus_p->bus_addr_ranges,
1628 		    (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries));
1629 		/* zero out the fields that have been destroyed */
1630 		bus_p->bus_assigned_addr = NULL;
1631 		bus_p->bus_addr_ranges = NULL;
1632 		bus_p->bus_assigned_entries = 0;
1633 		bus_p->bus_addr_entries = 0;
1634 	}
1635 
1636 	if (flags & PCIE_BUS_FINAL) {
1637 		if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
1638 			(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1639 			    "hotplug-capable");
1640 		}
1641 
1642 		ndi_set_bus_private(dip, B_TRUE, 0, NULL);
1643 		kmem_free(bus_p, sizeof (pcie_bus_t));
1644 	}
1645 }
1646 
1647 int
1648 pcie_postattach_child(dev_info_t *cdip)
1649 {
1650 	pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
1651 
1652 	if (!bus_p)
1653 		return (DDI_FAILURE);
1654 
1655 	return (pcie_enable_ce(cdip));
1656 }
1657 
1658 /*
1659  * PCI-Express child device de-initialization.
1660  * This function disables generic pci-express interrupts and error
1661  * handling.
1662  */
1663 void
1664 pcie_uninitchild(dev_info_t *cdip)
1665 {
1666 	pcie_disable_errors(cdip);
1667 	pcie_fini_cfghdl(cdip);
1668 	pcie_fini_dom(cdip);
1669 }
1670 
1671 /*
1672  * find the root complex dip
1673  */
1674 dev_info_t *
1675 pcie_get_rc_dip(dev_info_t *dip)
1676 {
1677 	dev_info_t *rcdip;
1678 	pcie_bus_t *rc_bus_p;
1679 
1680 	for (rcdip = ddi_get_parent(dip); rcdip;
1681 	    rcdip = ddi_get_parent(rcdip)) {
1682 		rc_bus_p = PCIE_DIP2BUS(rcdip);
1683 		if (rc_bus_p && PCIE_IS_RC(rc_bus_p))
1684 			break;
1685 	}
1686 
1687 	return (rcdip);
1688 }
1689 
1690 boolean_t
1691 pcie_is_pci_device(dev_info_t *dip)
1692 {
1693 	dev_info_t	*pdip;
1694 	char		*device_type;
1695 
1696 	pdip = ddi_get_parent(dip);
1697 	if (pdip == NULL)
1698 		return (B_FALSE);
1699 
1700 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
1701 	    "device_type", &device_type) != DDI_PROP_SUCCESS)
1702 		return (B_FALSE);
1703 
1704 	if (strcmp(device_type, "pciex") != 0 &&
1705 	    strcmp(device_type, "pci") != 0) {
1706 		ddi_prop_free(device_type);
1707 		return (B_FALSE);
1708 	}
1709 
1710 	ddi_prop_free(device_type);
1711 	return (B_TRUE);
1712 }
1713 
1714 typedef struct {
1715 	boolean_t	init;
1716 	uint8_t		flags;
1717 } pcie_bus_arg_t;
1718 
1719 /*ARGSUSED*/
1720 static int
1721 pcie_fab_do_init_fini(dev_info_t *dip, void *arg)
1722 {
1723 	pcie_req_id_t	bdf;
1724 	pcie_bus_arg_t	*bus_arg = (pcie_bus_arg_t *)arg;
1725 
1726 	if (!pcie_is_pci_device(dip))
1727 		goto out;
1728 
1729 	if (bus_arg->init) {
1730 		if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS)
1731 			goto out;
1732 
1733 		(void) pcie_init_bus(dip, bdf, bus_arg->flags);
1734 	} else {
1735 		(void) pcie_fini_bus(dip, bus_arg->flags);
1736 	}
1737 
1738 	return (DDI_WALK_CONTINUE);
1739 
1740 out:
1741 	return (DDI_WALK_PRUNECHILD);
1742 }
1743 
1744 void
1745 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags)
1746 {
1747 	int		circular_count;
1748 	dev_info_t	*dip = ddi_get_child(rcdip);
1749 	pcie_bus_arg_t	arg;
1750 
1751 	arg.init = B_TRUE;
1752 	arg.flags = flags;
1753 
1754 	ndi_devi_enter(rcdip, &circular_count);
1755 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1756 	ndi_devi_exit(rcdip, circular_count);
1757 }
1758 
1759 void
1760 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags)
1761 {
1762 	int		circular_count;
1763 	dev_info_t	*dip = ddi_get_child(rcdip);
1764 	pcie_bus_arg_t	arg;
1765 
1766 	arg.init = B_FALSE;
1767 	arg.flags = flags;
1768 
1769 	ndi_devi_enter(rcdip, &circular_count);
1770 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1771 	ndi_devi_exit(rcdip, circular_count);
1772 }
1773 
1774 void
1775 pcie_enable_errors(dev_info_t *dip)
1776 {
1777 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1778 	uint16_t	reg16, tmp16;
1779 	uint32_t	reg32, tmp32;
1780 
1781 	ASSERT(bus_p);
1782 
1783 	/*
1784 	 * Clear any pending errors
1785 	 */
1786 	pcie_clear_errors(dip);
1787 
1788 	if (!PCIE_IS_PCIE(bus_p))
1789 		return;
1790 
1791 	/*
1792 	 * Enable Baseline Error Handling but leave CE reporting off (poweron
1793 	 * default).
1794 	 */
1795 	if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) !=
1796 	    PCI_CAP_EINVAL16) {
1797 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1798 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1799 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1800 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1801 		    (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN));
1802 
1803 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
1804 		PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
1805 	}
1806 
1807 	/* Enable Root Port Baseline Error Receiving */
1808 	if (PCIE_IS_ROOT(bus_p) &&
1809 	    (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) !=
1810 	    PCI_CAP_EINVAL16) {
1811 
1812 		tmp16 = pcie_serr_disable_flag ?
1813 		    (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) :
1814 		    pcie_root_ctrl_default;
1815 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16);
1816 		PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL,
1817 		    reg16);
1818 	}
1819 
1820 	/*
1821 	 * Enable PCI-Express Advanced Error Handling if Exists
1822 	 */
1823 	if (!PCIE_HAS_AER(bus_p))
1824 		return;
1825 
1826 	/* Set Uncorrectable Severity */
1827 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) !=
1828 	    PCI_CAP_EINVAL32) {
1829 		tmp32 = pcie_aer_uce_severity;
1830 
1831 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32);
1832 		PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV,
1833 		    reg32);
1834 	}
1835 
1836 	/* Enable Uncorrectable errors */
1837 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) !=
1838 	    PCI_CAP_EINVAL32) {
1839 		tmp32 = pcie_aer_uce_mask;
1840 
1841 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32);
1842 		PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK,
1843 		    reg32);
1844 	}
1845 
1846 	/* Enable ECRC generation and checking */
1847 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1848 	    PCI_CAP_EINVAL32) {
1849 		tmp32 = reg32 | pcie_ecrc_value;
1850 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32);
1851 		PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32);
1852 	}
1853 
1854 	/* Enable Secondary Uncorrectable errors if this is a bridge */
1855 	if (!PCIE_IS_PCIE_BDG(bus_p))
1856 		goto root;
1857 
1858 	/* Set Uncorrectable Severity */
1859 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) !=
1860 	    PCI_CAP_EINVAL32) {
1861 		tmp32 = pcie_aer_suce_severity;
1862 
1863 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32);
1864 		PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV,
1865 		    reg32);
1866 	}
1867 
1868 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) !=
1869 	    PCI_CAP_EINVAL32) {
1870 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask);
1871 		PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32,
1872 		    PCIE_AER_SUCE_MASK, reg32);
1873 	}
1874 
1875 root:
1876 	/*
1877 	 * Enable Root Control this is a Root device
1878 	 */
1879 	if (!PCIE_IS_ROOT(bus_p))
1880 		return;
1881 
1882 	if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1883 	    PCI_CAP_EINVAL16) {
1884 		PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD,
1885 		    pcie_root_error_cmd_default);
1886 		PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16,
1887 		    PCIE_AER_RE_CMD, reg16);
1888 	}
1889 }
1890 
1891 /*
1892  * This function is used for enabling CE reporting and setting the AER CE mask.
1893  * When called from outside the pcie module it should always be preceded by
1894  * a call to pcie_enable_errors.
1895  */
1896 int
1897 pcie_enable_ce(dev_info_t *dip)
1898 {
1899 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1900 	uint16_t	device_sts, device_ctl;
1901 	uint32_t	tmp_pcie_aer_ce_mask;
1902 
1903 	if (!PCIE_IS_PCIE(bus_p))
1904 		return (DDI_SUCCESS);
1905 
1906 	/*
1907 	 * The "pcie_ce_mask" property is used to control both the CE reporting
1908 	 * enable field in the device control register and the AER CE mask. We
1909 	 * leave CE reporting disabled if pcie_ce_mask is set to -1.
1910 	 */
1911 
1912 	tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1913 	    DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask);
1914 
1915 	if (tmp_pcie_aer_ce_mask == (uint32_t)-1) {
1916 		/*
1917 		 * Nothing to do since CE reporting has already been disabled.
1918 		 */
1919 		return (DDI_SUCCESS);
1920 	}
1921 
1922 	if (PCIE_HAS_AER(bus_p)) {
1923 		/* Enable AER CE */
1924 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask);
1925 		PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK,
1926 		    0);
1927 
1928 		/* Clear any pending AER CE errors */
1929 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1);
1930 	}
1931 
1932 	/* clear any pending CE errors */
1933 	if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) !=
1934 	    PCI_CAP_EINVAL16)
1935 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS,
1936 		    device_sts & (~PCIE_DEVSTS_CE_DETECTED));
1937 
1938 	/* Enable CE reporting */
1939 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1940 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL,
1941 	    (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default);
1942 	PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl);
1943 
1944 	return (DDI_SUCCESS);
1945 }
1946 
1947 /* ARGSUSED */
1948 void
1949 pcie_disable_errors(dev_info_t *dip)
1950 {
1951 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1952 	uint16_t	device_ctl;
1953 	uint32_t	aer_reg;
1954 
1955 	if (!PCIE_IS_PCIE(bus_p))
1956 		return;
1957 
1958 	/*
1959 	 * Disable PCI-Express Baseline Error Handling
1960 	 */
1961 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1962 	device_ctl &= ~PCIE_DEVCTL_ERR_MASK;
1963 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl);
1964 
1965 	/*
1966 	 * Disable PCI-Express Advanced Error Handling if Exists
1967 	 */
1968 	if (!PCIE_HAS_AER(bus_p))
1969 		goto root;
1970 
1971 	/* Disable Uncorrectable errors */
1972 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS);
1973 
1974 	/* Disable Correctable errors */
1975 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS);
1976 
1977 	/* Disable ECRC generation and checking */
1978 	if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1979 	    PCI_CAP_EINVAL32) {
1980 		aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
1981 		    PCIE_AER_CTL_ECRC_CHECK_ENA);
1982 
1983 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg);
1984 	}
1985 	/*
1986 	 * Disable Secondary Uncorrectable errors if this is a bridge
1987 	 */
1988 	if (!PCIE_IS_PCIE_BDG(bus_p))
1989 		goto root;
1990 
1991 	PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS);
1992 
1993 root:
1994 	/*
1995 	 * disable Root Control this is a Root device
1996 	 */
1997 	if (!PCIE_IS_ROOT(bus_p))
1998 		return;
1999 
2000 	if (!pcie_serr_disable_flag) {
2001 		device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL);
2002 		device_ctl &= ~PCIE_ROOT_SYS_ERR;
2003 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl);
2004 	}
2005 
2006 	if (!PCIE_HAS_AER(bus_p))
2007 		return;
2008 
2009 	if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
2010 	    PCI_CAP_EINVAL16) {
2011 		device_ctl &= ~pcie_root_error_cmd_default;
2012 		PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl);
2013 	}
2014 }
2015 
2016 /*
2017  * Extract bdf from "reg" property.
2018  */
2019 int
2020 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf)
2021 {
2022 	pci_regspec_t	*regspec;
2023 	int		reglen;
2024 
2025 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2026 	    "reg", (int **)&regspec, (uint_t *)&reglen) != DDI_SUCCESS)
2027 		return (DDI_FAILURE);
2028 
2029 	if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
2030 		ddi_prop_free(regspec);
2031 		return (DDI_FAILURE);
2032 	}
2033 
2034 	/* Get phys_hi from first element.  All have same bdf. */
2035 	*bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8;
2036 
2037 	ddi_prop_free(regspec);
2038 	return (DDI_SUCCESS);
2039 }
2040 
2041 dev_info_t *
2042 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
2043 {
2044 	dev_info_t *cdip = rdip;
2045 
2046 	for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
2047 		;
2048 
2049 	return (cdip);
2050 }
2051 
2052 uint32_t
2053 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip)
2054 {
2055 	dev_info_t *cdip;
2056 
2057 	/*
2058 	 * As part of the probing, the PCI fcode interpreter may setup a DMA
2059 	 * request if a given card has a fcode on it using dip and rdip of the
2060 	 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this
2061 	 * case, return a invalid value for the bdf since we cannot get to the
2062 	 * bdf value of the actual device which will be initiating this DMA.
2063 	 */
2064 	if (rdip == dip)
2065 		return (PCIE_INVALID_BDF);
2066 
2067 	cdip = pcie_get_my_childs_dip(dip, rdip);
2068 
2069 	/*
2070 	 * For a given rdip, return the bdf value of dip's (px or pcieb)
2071 	 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
2072 	 *
2073 	 * XXX - For now, return a invalid bdf value for all PCI and PCI-X
2074 	 * devices since this needs more work.
2075 	 */
2076 	return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
2077 	    PCIE_INVALID_BDF : PCI_GET_BDF(cdip));
2078 }
2079 
2080 uint32_t
2081 pcie_get_aer_uce_mask()
2082 {
2083 	return (pcie_aer_uce_mask);
2084 }
2085 uint32_t
2086 pcie_get_aer_ce_mask()
2087 {
2088 	return (pcie_aer_ce_mask);
2089 }
2090 uint32_t
2091 pcie_get_aer_suce_mask()
2092 {
2093 	return (pcie_aer_suce_mask);
2094 }
2095 uint32_t
2096 pcie_get_serr_mask()
2097 {
2098 	return (pcie_serr_disable_flag);
2099 }
2100 
2101 void
2102 pcie_set_aer_uce_mask(uint32_t mask)
2103 {
2104 	pcie_aer_uce_mask = mask;
2105 	if (mask & PCIE_AER_UCE_UR)
2106 		pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN;
2107 	else
2108 		pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN;
2109 
2110 	if (mask & PCIE_AER_UCE_ECRC)
2111 		pcie_ecrc_value = 0;
2112 }
2113 
2114 void
2115 pcie_set_aer_ce_mask(uint32_t mask)
2116 {
2117 	pcie_aer_ce_mask = mask;
2118 }
2119 void
2120 pcie_set_aer_suce_mask(uint32_t mask)
2121 {
2122 	pcie_aer_suce_mask = mask;
2123 }
2124 void
2125 pcie_set_serr_mask(uint32_t mask)
2126 {
2127 	pcie_serr_disable_flag = mask;
2128 }
2129 
2130 /*
2131  * Is the rdip a child of dip.	Used for checking certain CTLOPS from bubbling
2132  * up erronously.  Ex.	ISA ctlops to a PCI-PCI Bridge.
2133  */
2134 boolean_t
2135 pcie_is_child(dev_info_t *dip, dev_info_t *rdip)
2136 {
2137 	dev_info_t	*cdip = ddi_get_child(dip);
2138 	for (; cdip; cdip = ddi_get_next_sibling(cdip))
2139 		if (cdip == rdip)
2140 			break;
2141 	return (cdip != NULL);
2142 }
2143 
2144 boolean_t
2145 pcie_is_link_disabled(dev_info_t *dip)
2146 {
2147 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2148 
2149 	if (PCIE_IS_PCIE(bus_p)) {
2150 		if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) &
2151 		    PCIE_LINKCTL_LINK_DISABLE)
2152 			return (B_TRUE);
2153 	}
2154 	return (B_FALSE);
2155 }
2156 
2157 /*
2158  * Initialize the MPS for a root port.
2159  *
2160  * dip - dip of root port device.
2161  */
2162 void
2163 pcie_init_root_port_mps(dev_info_t *dip)
2164 {
2165 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
2166 	int rp_cap, max_supported = pcie_max_mps;
2167 
2168 	(void) pcie_get_fabric_mps(ddi_get_parent(dip),
2169 	    ddi_get_child(dip), &max_supported);
2170 
2171 	rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, 0,
2172 	    bus_p->bus_pcie_off, PCIE_DEVCAP) &
2173 	    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2174 
2175 	if (rp_cap < max_supported)
2176 		max_supported = rp_cap;
2177 
2178 	bus_p->bus_mps = max_supported;
2179 	(void) pcie_initchild_mps(dip);
2180 }
2181 
2182 /*
2183  * Initialize the Maximum Payload Size of a device.
2184  *
2185  * cdip - dip of device.
2186  *
2187  * returns - DDI_SUCCESS or DDI_FAILURE
2188  */
2189 int
2190 pcie_initchild_mps(dev_info_t *cdip)
2191 {
2192 	pcie_bus_t	*bus_p;
2193 	dev_info_t	*pdip = ddi_get_parent(cdip);
2194 	uint8_t		dev_type;
2195 
2196 	bus_p = PCIE_DIP2BUS(cdip);
2197 	if (bus_p == NULL) {
2198 		PCIE_DBG("%s: BUS not found.\n",
2199 		    ddi_driver_name(cdip));
2200 		return (DDI_FAILURE);
2201 	}
2202 
2203 	dev_type = bus_p->bus_dev_type;
2204 
2205 	/*
2206 	 * For ARI Devices, only function zero's MPS needs to be set.
2207 	 */
2208 	if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
2209 	    (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) {
2210 		pcie_req_id_t child_bdf;
2211 
2212 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2213 			return (DDI_FAILURE);
2214 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
2215 			return (DDI_SUCCESS);
2216 	}
2217 
2218 	if (PCIE_IS_PCIE(bus_p)) {
2219 		int suggested_mrrs, fabric_mps;
2220 		uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl;
2221 
2222 		dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
2223 		if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p :
2224 		    PCIE_DIP2BUS(pdip))->bus_mps) < 0) {
2225 			dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2226 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
2227 			    (pcie_devctl_default &
2228 			    (PCIE_DEVCTL_MAX_READ_REQ_MASK |
2229 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
2230 
2231 			PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2232 			return (DDI_SUCCESS);
2233 		}
2234 
2235 		device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
2236 		    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2237 
2238 		device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >>
2239 		    PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
2240 
2241 		if (device_mps_cap < fabric_mps)
2242 			device_mrrs = device_mps = device_mps_cap;
2243 		else
2244 			device_mps = (uint16_t)fabric_mps;
2245 
2246 		suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
2247 		    cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs);
2248 
2249 		if ((device_mps == fabric_mps) ||
2250 		    (suggested_mrrs < device_mrrs))
2251 			device_mrrs = (uint16_t)suggested_mrrs;
2252 
2253 		/*
2254 		 * Replace MPS and MRRS settings.
2255 		 */
2256 		dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2257 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK);
2258 
2259 		dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) |
2260 		    device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT);
2261 
2262 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2263 
2264 		bus_p->bus_mps = device_mps;
2265 	}
2266 
2267 	return (DDI_SUCCESS);
2268 }
2269 
2270 /*
2271  * Scans a device tree/branch for a maximum payload size capabilities.
2272  *
2273  * rc_dip - dip of Root Complex.
2274  * dip - dip of device where scan will begin.
2275  * max_supported (IN) - maximum allowable MPS.
2276  * max_supported (OUT) - maximum payload size capability of fabric.
2277  */
2278 void
2279 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2280 {
2281 	if (dip == NULL)
2282 		return;
2283 
2284 	/*
2285 	 * Perform a fabric scan to obtain Maximum Payload Capabilities
2286 	 */
2287 	(void) pcie_scan_mps(rc_dip, dip, max_supported);
2288 
2289 	PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported);
2290 }
2291 
2292 /*
2293  * Scans fabric and determines Maximum Payload Size based on
2294  * highest common denominator alogorithm
2295  */
2296 static void
2297 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2298 {
2299 	int circular_count;
2300 	pcie_max_supported_t max_pay_load_supported;
2301 
2302 	max_pay_load_supported.dip = rc_dip;
2303 	max_pay_load_supported.highest_common_mps = *max_supported;
2304 
2305 	ndi_devi_enter(ddi_get_parent(dip), &circular_count);
2306 	ddi_walk_devs(dip, pcie_get_max_supported,
2307 	    (void *)&max_pay_load_supported);
2308 	ndi_devi_exit(ddi_get_parent(dip), circular_count);
2309 
2310 	*max_supported = max_pay_load_supported.highest_common_mps;
2311 }
2312 
2313 /*
2314  * Called as part of the Maximum Payload Size scan.
2315  */
2316 static int
2317 pcie_get_max_supported(dev_info_t *dip, void *arg)
2318 {
2319 	uint32_t max_supported;
2320 	uint16_t cap_ptr;
2321 	pcie_max_supported_t *current = (pcie_max_supported_t *)arg;
2322 	pci_regspec_t *reg;
2323 	int rlen;
2324 	caddr_t virt;
2325 	ddi_acc_handle_t config_handle;
2326 
2327 	if (ddi_get_child(current->dip) == NULL) {
2328 		goto fail1;
2329 	}
2330 
2331 	if (pcie_dev(dip) == DDI_FAILURE) {
2332 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2333 		    "Not a PCIe dev\n", ddi_driver_name(dip));
2334 		goto fail1;
2335 	}
2336 
2337 	/*
2338 	 * If the suggested-mrrs property exists, then don't include this
2339 	 * device in the MPS capabilities scan.
2340 	 */
2341 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2342 	    "suggested-mrrs") != 0)
2343 		goto fail1;
2344 
2345 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
2346 	    (caddr_t)&reg, &rlen) != DDI_PROP_SUCCESS) {
2347 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2348 		    "Can not read reg\n", ddi_driver_name(dip));
2349 		goto fail1;
2350 	}
2351 
2352 	if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt,
2353 	    &config_handle) != DDI_SUCCESS) {
2354 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  pcie_map_phys "
2355 		    "failed\n", ddi_driver_name(dip));
2356 		goto fail2;
2357 	}
2358 
2359 	if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) ==
2360 	    DDI_FAILURE) {
2361 		goto fail3;
2362 	}
2363 
2364 	max_supported = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2365 	    PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2366 
2367 	PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip),
2368 	    max_supported);
2369 
2370 	if (max_supported < current->highest_common_mps)
2371 		current->highest_common_mps = max_supported;
2372 
2373 fail3:
2374 	pcie_unmap_phys(&config_handle, reg);
2375 fail2:
2376 	kmem_free(reg, rlen);
2377 fail1:
2378 	return (DDI_WALK_CONTINUE);
2379 }
2380 
2381 /*
2382  * Determines if there are any root ports attached to a root complex.
2383  *
2384  * dip - dip of root complex
2385  *
2386  * Returns - DDI_SUCCESS if there is at least one root port otherwise
2387  *	     DDI_FAILURE.
2388  */
2389 int
2390 pcie_root_port(dev_info_t *dip)
2391 {
2392 	int port_type;
2393 	uint16_t cap_ptr;
2394 	ddi_acc_handle_t config_handle;
2395 	dev_info_t *cdip = ddi_get_child(dip);
2396 
2397 	/*
2398 	 * Determine if any of the children of the passed in dip
2399 	 * are root ports.
2400 	 */
2401 	for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
2402 
2403 		if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS)
2404 			continue;
2405 
2406 		if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E,
2407 		    &cap_ptr)) == DDI_FAILURE) {
2408 			pci_config_teardown(&config_handle);
2409 			continue;
2410 		}
2411 
2412 		port_type = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2413 		    PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
2414 
2415 		pci_config_teardown(&config_handle);
2416 
2417 		if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
2418 			return (DDI_SUCCESS);
2419 	}
2420 
2421 	/* No root ports were found */
2422 
2423 	return (DDI_FAILURE);
2424 }
2425 
2426 /*
2427  * Function that determines if a device a PCIe device.
2428  *
2429  * dip - dip of device.
2430  *
2431  * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
2432  */
2433 int
2434 pcie_dev(dev_info_t *dip)
2435 {
2436 	/* get parent device's device_type property */
2437 	char *device_type;
2438 	int rc = DDI_FAILURE;
2439 	dev_info_t *pdip = ddi_get_parent(dip);
2440 
2441 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
2442 	    DDI_PROP_DONTPASS, "device_type", &device_type)
2443 	    != DDI_PROP_SUCCESS) {
2444 		return (DDI_FAILURE);
2445 	}
2446 
2447 	if (strcmp(device_type, "pciex") == 0)
2448 		rc = DDI_SUCCESS;
2449 	else
2450 		rc = DDI_FAILURE;
2451 
2452 	ddi_prop_free(device_type);
2453 	return (rc);
2454 }
2455 
2456 /*
2457  * Function to map in a device's memory space.
2458  */
2459 static int
2460 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
2461     caddr_t *addrp, ddi_acc_handle_t *handlep)
2462 {
2463 	ddi_map_req_t mr;
2464 	ddi_acc_hdl_t *hp;
2465 	int result;
2466 	ddi_device_acc_attr_t attr;
2467 
2468 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2469 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2470 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2471 	attr.devacc_attr_access = DDI_CAUTIOUS_ACC;
2472 
2473 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
2474 	hp = impl_acc_hdl_get(*handlep);
2475 	hp->ah_vers = VERS_ACCHDL;
2476 	hp->ah_dip = dip;
2477 	hp->ah_rnumber = 0;
2478 	hp->ah_offset = 0;
2479 	hp->ah_len = 0;
2480 	hp->ah_acc = attr;
2481 
2482 	mr.map_op = DDI_MO_MAP_LOCKED;
2483 	mr.map_type = DDI_MT_REGSPEC;
2484 	mr.map_obj.rp = (struct regspec *)phys_spec;
2485 	mr.map_prot = PROT_READ | PROT_WRITE;
2486 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2487 	mr.map_handlep = hp;
2488 	mr.map_vers = DDI_MAP_VERSION;
2489 
2490 	result = ddi_map(dip, &mr, 0, 0, addrp);
2491 
2492 	if (result != DDI_SUCCESS) {
2493 		impl_acc_hdl_free(*handlep);
2494 		*handlep = (ddi_acc_handle_t)NULL;
2495 	} else {
2496 		hp->ah_addr = *addrp;
2497 	}
2498 
2499 	return (result);
2500 }
2501 
2502 /*
2503  * Map out memory that was mapped in with pcie_map_phys();
2504  */
2505 static void
2506 pcie_unmap_phys(ddi_acc_handle_t *handlep,  pci_regspec_t *ph)
2507 {
2508 	ddi_map_req_t mr;
2509 	ddi_acc_hdl_t *hp;
2510 
2511 	hp = impl_acc_hdl_get(*handlep);
2512 	ASSERT(hp);
2513 
2514 	mr.map_op = DDI_MO_UNMAP;
2515 	mr.map_type = DDI_MT_REGSPEC;
2516 	mr.map_obj.rp = (struct regspec *)ph;
2517 	mr.map_prot = PROT_READ | PROT_WRITE;
2518 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2519 	mr.map_handlep = hp;
2520 	mr.map_vers = DDI_MAP_VERSION;
2521 
2522 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
2523 	    hp->ah_len, &hp->ah_addr);
2524 
2525 	impl_acc_hdl_free(*handlep);
2526 	*handlep = (ddi_acc_handle_t)NULL;
2527 }
2528 
2529 void
2530 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val)
2531 {
2532 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2533 	bus_p->bus_pfd->pe_rber_fatal = val;
2534 }
2535 
2536 /*
2537  * Return parent Root Port's pe_rber_fatal value.
2538  */
2539 boolean_t
2540 pcie_get_rber_fatal(dev_info_t *dip)
2541 {
2542 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2543 	pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip);
2544 	return (rp_bus_p->bus_pfd->pe_rber_fatal);
2545 }
2546 
2547 int
2548 pcie_ari_supported(dev_info_t *dip)
2549 {
2550 	uint32_t devcap2;
2551 	uint16_t pciecap;
2552 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2553 	uint8_t dev_type;
2554 
2555 	PCIE_DBG("pcie_ari_supported: dip=%p\n", dip);
2556 
2557 	if (bus_p == NULL)
2558 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2559 
2560 	dev_type = bus_p->bus_dev_type;
2561 
2562 	if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
2563 	    (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT))
2564 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2565 
2566 	if (pcie_disable_ari) {
2567 		PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip);
2568 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2569 	}
2570 
2571 	pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
2572 
2573 	if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) {
2574 		PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip);
2575 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2576 	}
2577 
2578 	devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2);
2579 
2580 	PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
2581 	    dip, devcap2);
2582 
2583 	if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
2584 		PCIE_DBG("pcie_ari_supported: "
2585 		    "dip=%p: ARI Forwarding is supported\n", dip);
2586 		return (PCIE_ARI_FORW_SUPPORTED);
2587 	}
2588 	return (PCIE_ARI_FORW_NOT_SUPPORTED);
2589 }
2590 
2591 int
2592 pcie_ari_enable(dev_info_t *dip)
2593 {
2594 	uint16_t devctl2;
2595 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2596 
2597 	PCIE_DBG("pcie_ari_enable: dip=%p\n", dip);
2598 
2599 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2600 		return (DDI_FAILURE);
2601 
2602 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2603 	devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN;
2604 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2605 
2606 	PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
2607 	    dip, devctl2);
2608 
2609 	return (DDI_SUCCESS);
2610 }
2611 
2612 int
2613 pcie_ari_disable(dev_info_t *dip)
2614 {
2615 	uint16_t devctl2;
2616 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2617 
2618 	PCIE_DBG("pcie_ari_disable: dip=%p\n", dip);
2619 
2620 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2621 		return (DDI_FAILURE);
2622 
2623 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2624 	devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN;
2625 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2626 
2627 	PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
2628 	    dip, devctl2);
2629 
2630 	return (DDI_SUCCESS);
2631 }
2632 
2633 int
2634 pcie_ari_is_enabled(dev_info_t *dip)
2635 {
2636 	uint16_t devctl2;
2637 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2638 
2639 	PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip);
2640 
2641 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2642 		return (PCIE_ARI_FORW_DISABLED);
2643 
2644 	devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2);
2645 
2646 	PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
2647 	    dip, devctl2);
2648 
2649 	if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
2650 		PCIE_DBG("pcie_ari_is_enabled: "
2651 		    "dip=%p: ARI Forwarding is enabled\n", dip);
2652 		return (PCIE_ARI_FORW_ENABLED);
2653 	}
2654 
2655 	return (PCIE_ARI_FORW_DISABLED);
2656 }
2657 
2658 int
2659 pcie_ari_device(dev_info_t *dip)
2660 {
2661 	ddi_acc_handle_t handle;
2662 	uint16_t cap_ptr;
2663 
2664 	PCIE_DBG("pcie_ari_device: dip=%p\n", dip);
2665 
2666 	/*
2667 	 * XXX - This function may be called before the bus_p structure
2668 	 * has been populated.  This code can be changed to remove
2669 	 * pci_config_setup()/pci_config_teardown() when the RFE
2670 	 * to populate the bus_p structures early in boot is putback.
2671 	 */
2672 
2673 	/* First make sure it is a PCIe device */
2674 
2675 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2676 		return (PCIE_NOT_ARI_DEVICE);
2677 
2678 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
2679 	    != DDI_SUCCESS) {
2680 		pci_config_teardown(&handle);
2681 		return (PCIE_NOT_ARI_DEVICE);
2682 	}
2683 
2684 	/* Locate the ARI Capability */
2685 
2686 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI),
2687 	    &cap_ptr)) == DDI_FAILURE) {
2688 		pci_config_teardown(&handle);
2689 		return (PCIE_NOT_ARI_DEVICE);
2690 	}
2691 
2692 	/* ARI Capability was found so it must be a ARI device */
2693 	PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip);
2694 
2695 	pci_config_teardown(&handle);
2696 	return (PCIE_ARI_DEVICE);
2697 }
2698 
2699 int
2700 pcie_ari_get_next_function(dev_info_t *dip, int *func)
2701 {
2702 	uint32_t val;
2703 	uint16_t cap_ptr, next_function;
2704 	ddi_acc_handle_t handle;
2705 
2706 	/*
2707 	 * XXX - This function may be called before the bus_p structure
2708 	 * has been populated.  This code can be changed to remove
2709 	 * pci_config_setup()/pci_config_teardown() when the RFE
2710 	 * to populate the bus_p structures early in boot is putback.
2711 	 */
2712 
2713 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2714 		return (DDI_FAILURE);
2715 
2716 	if ((PCI_CAP_LOCATE(handle,
2717 	    PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) {
2718 		pci_config_teardown(&handle);
2719 		return (DDI_FAILURE);
2720 	}
2721 
2722 	val = PCI_CAP_GET32(handle, 0, cap_ptr, PCIE_ARI_CAP);
2723 
2724 	next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) &
2725 	    PCIE_ARI_CAP_NEXT_FUNC_MASK;
2726 
2727 	pci_config_teardown(&handle);
2728 
2729 	*func = next_function;
2730 
2731 	return (DDI_SUCCESS);
2732 }
2733 
2734 dev_info_t *
2735 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function)
2736 {
2737 	pcie_req_id_t child_bdf;
2738 	dev_info_t *cdip;
2739 
2740 	for (cdip = ddi_get_child(dip); cdip;
2741 	    cdip = ddi_get_next_sibling(cdip)) {
2742 
2743 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2744 			return (NULL);
2745 
2746 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function)
2747 			return (cdip);
2748 	}
2749 	return (NULL);
2750 }
2751 
2752 #ifdef	DEBUG
2753 
2754 static void
2755 pcie_print_bus(pcie_bus_t *bus_p)
2756 {
2757 	pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip);
2758 	pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags);
2759 
2760 	pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf);
2761 	pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id);
2762 	pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id);
2763 	pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type);
2764 	pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type);
2765 	pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus);
2766 	pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off);
2767 	pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off);
2768 	pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off);
2769 	pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver);
2770 }
2771 
2772 /*
2773  * For debugging purposes set pcie_dbg_print != 0 to see printf messages
2774  * during interrupt.
2775  *
2776  * When a proper solution is in place this code will disappear.
2777  * Potential solutions are:
2778  * o circular buffers
2779  * o taskq to print at lower pil
2780  */
2781 int pcie_dbg_print = 0;
2782 void
2783 pcie_dbg(char *fmt, ...)
2784 {
2785 	va_list ap;
2786 
2787 	if (!pcie_debug_flags) {
2788 		return;
2789 	}
2790 	va_start(ap, fmt);
2791 	if (servicing_interrupt()) {
2792 		if (pcie_dbg_print) {
2793 			prom_vprintf(fmt, ap);
2794 		}
2795 	} else {
2796 		prom_vprintf(fmt, ap);
2797 	}
2798 	va_end(ap);
2799 }
2800 #endif	/* DEBUG */
2801 
2802 #if defined(__i386) || defined(__amd64)
2803 static void
2804 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range,
2805     boolean_t *empty_mem_range)
2806 {
2807 	uint8_t	class, subclass;
2808 	uint_t	val;
2809 
2810 	class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
2811 	subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);
2812 
2813 	if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) {
2814 		val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) &
2815 		    PCI_BCNF_IO_MASK) << 8);
2816 		/*
2817 		 * Assuming that a zero based io_range[0] implies an
2818 		 * invalid I/O range.  Likewise for mem_range[0].
2819 		 */
2820 		if (val == 0)
2821 			*empty_io_range = B_TRUE;
2822 		val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) &
2823 		    PCI_BCNF_MEM_MASK) << 16);
2824 		if (val == 0)
2825 			*empty_mem_range = B_TRUE;
2826 	}
2827 }
2828 
2829 #endif /* defined(__i386) || defined(__amd64) */
2830 
2831 boolean_t
2832 pcie_link_bw_supported(dev_info_t *dip)
2833 {
2834 	uint32_t linkcap;
2835 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2836 
2837 	if (!PCIE_IS_PCIE(bus_p)) {
2838 		return (B_FALSE);
2839 	}
2840 
2841 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
2842 		return (B_FALSE);
2843 	}
2844 
2845 	linkcap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
2846 	return ((linkcap & PCIE_LINKCAP_LINK_BW_NOTIFY_CAP) != 0);
2847 }
2848 
2849 int
2850 pcie_link_bw_enable(dev_info_t *dip)
2851 {
2852 	uint16_t linkctl;
2853 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2854 
2855 	if (!pcie_link_bw_supported(dip)) {
2856 		return (DDI_FAILURE);
2857 	}
2858 
2859 	mutex_init(&bus_p->bus_lbw_mutex, NULL, MUTEX_DRIVER, NULL);
2860 	cv_init(&bus_p->bus_lbw_cv, NULL, CV_DRIVER, NULL);
2861 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
2862 	linkctl |= PCIE_LINKCTL_LINK_BW_INTR_EN;
2863 	linkctl |= PCIE_LINKCTL_LINK_AUTO_BW_INTR_EN;
2864 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, linkctl);
2865 
2866 	bus_p->bus_lbw_pbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2867 	bus_p->bus_lbw_cbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2868 	bus_p->bus_lbw_state |= PCIE_LBW_S_ENABLED;
2869 
2870 	return (DDI_SUCCESS);
2871 }
2872 
2873 int
2874 pcie_link_bw_disable(dev_info_t *dip)
2875 {
2876 	uint16_t linkctl;
2877 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2878 
2879 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_ENABLED) == 0) {
2880 		return (DDI_FAILURE);
2881 	}
2882 
2883 	mutex_enter(&bus_p->bus_lbw_mutex);
2884 	while ((bus_p->bus_lbw_state &
2885 	    (PCIE_LBW_S_DISPATCHED | PCIE_LBW_S_RUNNING)) != 0) {
2886 		cv_wait(&bus_p->bus_lbw_cv, &bus_p->bus_lbw_mutex);
2887 	}
2888 	mutex_exit(&bus_p->bus_lbw_mutex);
2889 
2890 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
2891 	linkctl &= ~PCIE_LINKCTL_LINK_BW_INTR_EN;
2892 	linkctl &= ~PCIE_LINKCTL_LINK_AUTO_BW_INTR_EN;
2893 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, linkctl);
2894 
2895 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_ENABLED;
2896 	kmem_free(bus_p->bus_lbw_pbuf, MAXPATHLEN);
2897 	kmem_free(bus_p->bus_lbw_cbuf, MAXPATHLEN);
2898 	bus_p->bus_lbw_pbuf = NULL;
2899 	bus_p->bus_lbw_cbuf = NULL;
2900 
2901 	mutex_destroy(&bus_p->bus_lbw_mutex);
2902 	cv_destroy(&bus_p->bus_lbw_cv);
2903 
2904 	return (DDI_SUCCESS);
2905 }
2906 
2907 void
2908 pcie_link_bw_taskq(void *arg)
2909 {
2910 	dev_info_t *dip = arg;
2911 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2912 	dev_info_t *cdip;
2913 	boolean_t again;
2914 	sysevent_t *se;
2915 	sysevent_value_t se_val;
2916 	sysevent_id_t eid;
2917 	sysevent_attr_list_t *ev_attr_list;
2918 	int circular;
2919 
2920 top:
2921 	ndi_devi_enter(dip, &circular);
2922 	se = NULL;
2923 	ev_attr_list = NULL;
2924 	mutex_enter(&bus_p->bus_lbw_mutex);
2925 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_DISPATCHED;
2926 	bus_p->bus_lbw_state |= PCIE_LBW_S_RUNNING;
2927 	mutex_exit(&bus_p->bus_lbw_mutex);
2928 
2929 	/*
2930 	 * Update our own speeds as we've likely changed something.
2931 	 */
2932 	pcie_capture_speeds(dip);
2933 
2934 	/*
2935 	 * Walk our children. We only care about updating this on function 0
2936 	 * because the PCIe specification requires that these all be the same
2937 	 * otherwise.
2938 	 */
2939 	for (cdip = ddi_get_child(dip); cdip != NULL;
2940 	    cdip = ddi_get_next_sibling(cdip)) {
2941 		pcie_bus_t *cbus_p = PCIE_DIP2BUS(cdip);
2942 
2943 		if (cbus_p == NULL) {
2944 			continue;
2945 		}
2946 
2947 		if ((cbus_p->bus_bdf & PCIE_REQ_ID_FUNC_MASK) != 0) {
2948 			continue;
2949 		}
2950 
2951 		/*
2952 		 * It's possible that this can fire while a child is otherwise
2953 		 * only partially constructed. Therefore, if we don't have the
2954 		 * config handle, don't bother updating the child.
2955 		 */
2956 		if (cbus_p->bus_cfg_hdl == NULL) {
2957 			continue;
2958 		}
2959 
2960 		pcie_capture_speeds(cdip);
2961 		break;
2962 	}
2963 
2964 	se = sysevent_alloc(EC_PCIE, ESC_PCIE_LINK_STATE,
2965 	    ILLUMOS_KERN_PUB "pcie", SE_SLEEP);
2966 
2967 	(void) ddi_pathname(dip, bus_p->bus_lbw_pbuf);
2968 	se_val.value_type = SE_DATA_TYPE_STRING;
2969 	se_val.value.sv_string = bus_p->bus_lbw_pbuf;
2970 	if (sysevent_add_attr(&ev_attr_list, PCIE_EV_DETECTOR_PATH, &se_val,
2971 	    SE_SLEEP) != 0) {
2972 		ndi_devi_exit(dip, circular);
2973 		goto err;
2974 	}
2975 
2976 	if (cdip != NULL) {
2977 		(void) ddi_pathname(cdip, bus_p->bus_lbw_cbuf);
2978 
2979 		se_val.value_type = SE_DATA_TYPE_STRING;
2980 		se_val.value.sv_string = bus_p->bus_lbw_cbuf;
2981 
2982 		/*
2983 		 * If this fails, that's OK. We'd rather get the event off and
2984 		 * there's a chance that there may not be anything there for us.
2985 		 */
2986 		(void) sysevent_add_attr(&ev_attr_list, PCIE_EV_CHILD_PATH,
2987 		    &se_val, SE_SLEEP);
2988 	}
2989 
2990 	ndi_devi_exit(dip, circular);
2991 
2992 	/*
2993 	 * Before we generate and send down a sysevent, we need to tell the
2994 	 * system that parts of the devinfo cache need to be invalidated. While
2995 	 * the function below takes several args, it ignores them all. Because
2996 	 * this is a global invalidation, we don't bother trying to do much more
2997 	 * than requesting a global invalidation, lest we accidentally kick off
2998 	 * several in a row.
2999 	 */
3000 	ddi_prop_cache_invalidate(DDI_DEV_T_NONE, NULL, NULL, 0);
3001 
3002 	if (sysevent_attach_attributes(se, ev_attr_list) != 0) {
3003 		goto err;
3004 	}
3005 	ev_attr_list = NULL;
3006 
3007 	if (log_sysevent(se, SE_SLEEP, &eid) != 0) {
3008 		goto err;
3009 	}
3010 
3011 err:
3012 	sysevent_free_attr(ev_attr_list);
3013 	sysevent_free(se);
3014 
3015 	mutex_enter(&bus_p->bus_lbw_mutex);
3016 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_RUNNING;
3017 	cv_broadcast(&bus_p->bus_lbw_cv);
3018 	again = (bus_p->bus_lbw_state & PCIE_LBW_S_DISPATCHED) != 0;
3019 	mutex_exit(&bus_p->bus_lbw_mutex);
3020 
3021 	if (again) {
3022 		goto top;
3023 	}
3024 }
3025 
3026 int
3027 pcie_link_bw_intr(dev_info_t *dip)
3028 {
3029 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3030 	uint16_t linksts;
3031 	uint16_t flags = PCIE_LINKSTS_LINK_BW_MGMT | PCIE_LINKSTS_AUTO_BW;
3032 
3033 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_ENABLED) == 0) {
3034 		return (DDI_INTR_UNCLAIMED);
3035 	}
3036 
3037 	linksts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3038 	if ((linksts & flags) == 0) {
3039 		return (DDI_INTR_UNCLAIMED);
3040 	}
3041 
3042 	/*
3043 	 * Check if we've already dispatched this event. If we have already
3044 	 * dispatched it, then there's nothing else to do, we coalesce multiple
3045 	 * events.
3046 	 */
3047 	mutex_enter(&bus_p->bus_lbw_mutex);
3048 	bus_p->bus_lbw_nevents++;
3049 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_DISPATCHED) == 0) {
3050 		if ((bus_p->bus_lbw_state & PCIE_LBW_S_RUNNING) == 0) {
3051 			taskq_dispatch_ent(pcie_link_tq, pcie_link_bw_taskq,
3052 			    dip, 0, &bus_p->bus_lbw_ent);
3053 		}
3054 
3055 		bus_p->bus_lbw_state |= PCIE_LBW_S_DISPATCHED;
3056 	}
3057 	mutex_exit(&bus_p->bus_lbw_mutex);
3058 
3059 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKSTS, flags);
3060 	return (DDI_INTR_CLAIMED);
3061 }
3062 
3063 int
3064 pcie_link_set_target(dev_info_t *dip, pcie_link_speed_t speed)
3065 {
3066 	uint16_t ctl2, rval;
3067 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3068 
3069 	if (!PCIE_IS_PCIE(bus_p)) {
3070 		return (ENOTSUP);
3071 	}
3072 
3073 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
3074 		return (ENOTSUP);
3075 	}
3076 
3077 	switch (speed) {
3078 	case PCIE_LINK_SPEED_2_5:
3079 		rval = PCIE_LINKCTL2_TARGET_SPEED_2_5;
3080 		break;
3081 	case PCIE_LINK_SPEED_5:
3082 		rval = PCIE_LINKCTL2_TARGET_SPEED_5;
3083 		break;
3084 	case PCIE_LINK_SPEED_8:
3085 		rval = PCIE_LINKCTL2_TARGET_SPEED_8;
3086 		break;
3087 	case PCIE_LINK_SPEED_16:
3088 		rval = PCIE_LINKCTL2_TARGET_SPEED_16;
3089 		break;
3090 	default:
3091 		return (EINVAL);
3092 	}
3093 
3094 	mutex_enter(&bus_p->bus_speed_mutex);
3095 	bus_p->bus_target_speed = speed;
3096 	bus_p->bus_speed_flags |= PCIE_LINK_F_ADMIN_TARGET;
3097 
3098 	ctl2 = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL2);
3099 	ctl2 &= ~PCIE_LINKCTL2_TARGET_SPEED_MASK;
3100 	ctl2 |= rval;
3101 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL2, ctl2);
3102 	mutex_exit(&bus_p->bus_speed_mutex);
3103 
3104 	/*
3105 	 * Make sure our updates have been reflected in devinfo.
3106 	 */
3107 	pcie_capture_speeds(dip);
3108 
3109 	return (0);
3110 }
3111 
3112 int
3113 pcie_link_retrain(dev_info_t *dip)
3114 {
3115 	uint16_t ctl;
3116 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3117 
3118 	if (!PCIE_IS_PCIE(bus_p)) {
3119 		return (ENOTSUP);
3120 	}
3121 
3122 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
3123 		return (ENOTSUP);
3124 	}
3125 
3126 	/*
3127 	 * The PCIe specification suggests that we make sure that the link isn't
3128 	 * in training before issuing this command in case there was a state
3129 	 * machine transition prior to when we got here. We wait and then go
3130 	 * ahead and issue the command anyways.
3131 	 */
3132 	for (uint32_t i = 0; i < pcie_link_retrain_count; i++) {
3133 		uint16_t sts;
3134 
3135 		sts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3136 		if ((sts & PCIE_LINKSTS_LINK_TRAINING) == 0)
3137 			break;
3138 		delay(drv_usectohz(pcie_link_retrain_delay_ms * 1000));
3139 	}
3140 
3141 	ctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
3142 	ctl |= PCIE_LINKCTL_RETRAIN_LINK;
3143 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, ctl);
3144 
3145 	/*
3146 	 * Wait again to see if it clears before returning to the user.
3147 	 */
3148 	for (uint32_t i = 0; i < pcie_link_retrain_count; i++) {
3149 		uint16_t sts;
3150 
3151 		sts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3152 		if ((sts & PCIE_LINKSTS_LINK_TRAINING) == 0)
3153 			break;
3154 		delay(drv_usectohz(pcie_link_retrain_delay_ms * 1000));
3155 	}
3156 
3157 	return (0);
3158 }
3159