xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie.c (revision c21bd51d7acbaf77116c4cc3a23dfc6d16c637c2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2019 Joyent, Inc.
25  * Copyright 2021 Oxide Computer Company
26  */
27 
28 #include <sys/sysmacros.h>
29 #include <sys/types.h>
30 #include <sys/kmem.h>
31 #include <sys/modctl.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/sunndi.h>
35 #include <sys/fm/protocol.h>
36 #include <sys/fm/util.h>
37 #include <sys/promif.h>
38 #include <sys/disp.h>
39 #include <sys/stat.h>
40 #include <sys/file.h>
41 #include <sys/pci_cap.h>
42 #include <sys/pci_impl.h>
43 #include <sys/pcie_impl.h>
44 #include <sys/hotplug/pci/pcie_hp.h>
45 #include <sys/hotplug/pci/pciehpc.h>
46 #include <sys/hotplug/pci/pcishpc.h>
47 #include <sys/hotplug/pci/pcicfg.h>
48 #include <sys/pci_cfgacc.h>
49 #include <sys/sysevent.h>
50 #include <sys/sysevent/eventdefs.h>
51 #include <sys/sysevent/pcie.h>
52 
53 /* Local functions prototypes */
54 static void pcie_init_pfd(dev_info_t *);
55 static void pcie_fini_pfd(dev_info_t *);
56 
57 #if defined(__i386) || defined(__amd64)
58 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *);
59 #endif /* defined(__i386) || defined(__amd64) */
60 
61 #ifdef DEBUG
62 uint_t pcie_debug_flags = 0;
63 static void pcie_print_bus(pcie_bus_t *bus_p);
64 void pcie_dbg(char *fmt, ...);
65 #endif /* DEBUG */
66 
67 /* Variable to control default PCI-Express config settings */
68 ushort_t pcie_command_default =
69     PCI_COMM_SERR_ENABLE |
70     PCI_COMM_WAIT_CYC_ENAB |
71     PCI_COMM_PARITY_DETECT |
72     PCI_COMM_ME |
73     PCI_COMM_MAE |
74     PCI_COMM_IO;
75 
76 /* xxx_fw are bits that are controlled by FW and should not be modified */
77 ushort_t pcie_command_default_fw =
78     PCI_COMM_SPEC_CYC |
79     PCI_COMM_MEMWR_INVAL |
80     PCI_COMM_PALETTE_SNOOP |
81     PCI_COMM_WAIT_CYC_ENAB |
82     0xF800; /* Reserved Bits */
83 
84 ushort_t pcie_bdg_command_default_fw =
85     PCI_BCNF_BCNTRL_ISA_ENABLE |
86     PCI_BCNF_BCNTRL_VGA_ENABLE |
87     0xF000; /* Reserved Bits */
88 
89 /* PCI-Express Base error defaults */
90 ushort_t pcie_base_err_default =
91     PCIE_DEVCTL_CE_REPORTING_EN |
92     PCIE_DEVCTL_NFE_REPORTING_EN |
93     PCIE_DEVCTL_FE_REPORTING_EN |
94     PCIE_DEVCTL_UR_REPORTING_EN;
95 
96 /* PCI-Express Device Control Register */
97 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN |
98     PCIE_DEVCTL_MAX_READ_REQ_512;
99 
100 /* PCI-Express AER Root Control Register */
101 #define	PCIE_ROOT_SYS_ERR	(PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
102 				PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \
103 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN)
104 
105 ushort_t pcie_root_ctrl_default =
106     PCIE_ROOTCTL_SYS_ERR_ON_CE_EN |
107     PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
108     PCIE_ROOTCTL_SYS_ERR_ON_FE_EN;
109 
110 /* PCI-Express Root Error Command Register */
111 ushort_t pcie_root_error_cmd_default =
112     PCIE_AER_RE_CMD_CE_REP_EN |
113     PCIE_AER_RE_CMD_NFE_REP_EN |
114     PCIE_AER_RE_CMD_FE_REP_EN;
115 
116 /* ECRC settings in the PCIe AER Control Register */
117 uint32_t pcie_ecrc_value =
118     PCIE_AER_CTL_ECRC_GEN_ENA |
119     PCIE_AER_CTL_ECRC_CHECK_ENA;
120 
121 /*
122  * If a particular platform wants to disable certain errors such as UR/MA,
123  * instead of using #defines have the platform's PCIe Root Complex driver set
124  * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions.  For
125  * x86 the closest thing to a PCIe root complex driver is NPE.	For SPARC the
126  * closest PCIe root complex driver is PX.
127  *
128  * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
129  * systems may want to disable SERR in general.  For root ports, enabling SERR
130  * causes NMIs which are not handled and results in a watchdog timeout error.
131  */
132 uint32_t pcie_aer_uce_mask = 0;		/* AER UE Mask */
133 uint32_t pcie_aer_ce_mask = 0;		/* AER CE Mask */
134 uint32_t pcie_aer_suce_mask = 0;	/* AER Secondary UE Mask */
135 uint32_t pcie_serr_disable_flag = 0;	/* Disable SERR */
136 
137 /* Default severities needed for eversholt.  Error handling doesn't care */
138 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \
139     PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \
140     PCIE_AER_UCE_TRAINING;
141 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \
142     PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \
143     PCIE_AER_SUCE_USC_MSG_DATA_ERR;
144 
145 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5;
146 int pcie_disable_ari = 0;
147 
148 /*
149  * On some platforms, such as the AMD B450 chipset, we've seen an odd
150  * relationship between enabling link bandwidth notifications and AERs about
151  * ECRC errors. This provides a mechanism to disable it.
152  */
153 int pcie_disable_lbw = 0;
154 
155 /*
156  * Amount of time to wait for an in-progress retraining. The default is to try
157  * 500 times in 10ms chunks, thus a total of 5s.
158  */
159 uint32_t pcie_link_retrain_count = 500;
160 uint32_t pcie_link_retrain_delay_ms = 10;
161 
162 taskq_t *pcie_link_tq;
163 kmutex_t pcie_link_tq_mutex;
164 
165 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip,
166 	int *max_supported);
167 static int pcie_get_max_supported(dev_info_t *dip, void *arg);
168 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
169     caddr_t *addrp, ddi_acc_handle_t *handlep);
170 static void pcie_unmap_phys(ddi_acc_handle_t *handlep,	pci_regspec_t *ph);
171 static int pcie_link_bw_intr(dev_info_t *);
172 static void pcie_capture_speeds(dev_info_t *);
173 
174 dev_info_t *pcie_get_rc_dip(dev_info_t *dip);
175 
176 /*
177  * modload support
178  */
179 
180 static struct modlmisc modlmisc	= {
181 	&mod_miscops,	/* Type	of module */
182 	"PCI Express Framework Module"
183 };
184 
185 static struct modlinkage modlinkage = {
186 	MODREV_1,
187 	(void	*)&modlmisc,
188 	NULL
189 };
190 
191 /*
192  * Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
193  * Currently used to send the pci.fabric ereports whose payload depends on the
194  * type of PCI device it is being sent for.
195  */
196 char		*pcie_nv_buf;
197 nv_alloc_t	*pcie_nvap;
198 nvlist_t	*pcie_nvl;
199 
200 int
201 _init(void)
202 {
203 	int rval;
204 
205 	pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP);
206 	pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ);
207 	pcie_nvl = fm_nvlist_create(pcie_nvap);
208 	mutex_init(&pcie_link_tq_mutex, NULL, MUTEX_DRIVER, NULL);
209 
210 	if ((rval = mod_install(&modlinkage)) != 0) {
211 		mutex_destroy(&pcie_link_tq_mutex);
212 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
213 		fm_nva_xdestroy(pcie_nvap);
214 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
215 	}
216 	return (rval);
217 }
218 
219 int
220 _fini()
221 {
222 	int		rval;
223 
224 	if ((rval = mod_remove(&modlinkage)) == 0) {
225 		if (pcie_link_tq != NULL) {
226 			taskq_destroy(pcie_link_tq);
227 		}
228 		mutex_destroy(&pcie_link_tq_mutex);
229 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
230 		fm_nva_xdestroy(pcie_nvap);
231 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
232 	}
233 	return (rval);
234 }
235 
236 int
237 _info(struct modinfo *modinfop)
238 {
239 	return (mod_info(&modlinkage, modinfop));
240 }
241 
242 /* ARGSUSED */
243 int
244 pcie_init(dev_info_t *dip, caddr_t arg)
245 {
246 	int	ret = DDI_SUCCESS;
247 
248 	/*
249 	 * Our _init function is too early to create a taskq. Create the pcie
250 	 * link management taskq here now instead.
251 	 */
252 	mutex_enter(&pcie_link_tq_mutex);
253 	if (pcie_link_tq == NULL) {
254 		pcie_link_tq = taskq_create("pcie_link", 1, minclsyspri, 0, 0,
255 		    0);
256 	}
257 	mutex_exit(&pcie_link_tq_mutex);
258 
259 
260 	/*
261 	 * Create a "devctl" minor node to support DEVCTL_DEVICE_*
262 	 * and DEVCTL_BUS_* ioctls to this bus.
263 	 */
264 	if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR,
265 	    PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR),
266 	    DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
267 		PCIE_DBG("Failed to create devctl minor node for %s%d\n",
268 		    ddi_driver_name(dip), ddi_get_instance(dip));
269 
270 		return (ret);
271 	}
272 
273 	if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) {
274 		/*
275 		 * On some x86 platforms, we observed unexpected hotplug
276 		 * initialization failures in recent years. The known cause
277 		 * is a hardware issue: while the problem PCI bridges have
278 		 * the Hotplug Capable registers set, the machine actually
279 		 * does not implement the expected ACPI object.
280 		 *
281 		 * We don't want to stop PCI driver attach and system boot
282 		 * just because of this hotplug initialization failure.
283 		 * Continue with a debug message printed.
284 		 */
285 		PCIE_DBG("%s%d: Failed setting hotplug framework\n",
286 		    ddi_driver_name(dip), ddi_get_instance(dip));
287 
288 #if defined(__sparc)
289 		ddi_remove_minor_node(dip, "devctl");
290 
291 		return (ret);
292 #endif /* defined(__sparc) */
293 	}
294 
295 	return (DDI_SUCCESS);
296 }
297 
298 /* ARGSUSED */
299 int
300 pcie_uninit(dev_info_t *dip)
301 {
302 	int	ret = DDI_SUCCESS;
303 
304 	if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED)
305 		(void) pcie_ari_disable(dip);
306 
307 	if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) {
308 		PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
309 		    ddi_driver_name(dip), ddi_get_instance(dip));
310 
311 		return (ret);
312 	}
313 
314 	if (pcie_link_bw_supported(dip)) {
315 		(void) pcie_link_bw_disable(dip);
316 	}
317 
318 	ddi_remove_minor_node(dip, "devctl");
319 
320 	return (ret);
321 }
322 
323 /*
324  * PCIe module interface for enabling hotplug interrupt.
325  *
326  * It should be called after pcie_init() is done and bus driver's
327  * interrupt handlers have being attached.
328  */
329 int
330 pcie_hpintr_enable(dev_info_t *dip)
331 {
332 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
333 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
334 
335 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
336 		(void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p);
337 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
338 		(void) pcishpc_enable_irqs(ctrl_p);
339 	}
340 	return (DDI_SUCCESS);
341 }
342 
343 /*
344  * PCIe module interface for disabling hotplug interrupt.
345  *
346  * It should be called before pcie_uninit() is called and bus driver's
347  * interrupt handlers is dettached.
348  */
349 int
350 pcie_hpintr_disable(dev_info_t *dip)
351 {
352 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
353 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
354 
355 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
356 		(void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p);
357 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
358 		(void) pcishpc_disable_irqs(ctrl_p);
359 	}
360 	return (DDI_SUCCESS);
361 }
362 
363 /* ARGSUSED */
364 int
365 pcie_intr(dev_info_t *dip)
366 {
367 	int hp, lbw;
368 
369 	hp = pcie_hp_intr(dip);
370 	lbw = pcie_link_bw_intr(dip);
371 
372 	if (hp == DDI_INTR_CLAIMED || lbw == DDI_INTR_CLAIMED) {
373 		return (DDI_INTR_CLAIMED);
374 	}
375 
376 	return (DDI_INTR_UNCLAIMED);
377 }
378 
379 /* ARGSUSED */
380 int
381 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp)
382 {
383 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
384 
385 	/*
386 	 * Make sure the open is for the right file type.
387 	 */
388 	if (otyp != OTYP_CHR)
389 		return (EINVAL);
390 
391 	/*
392 	 * Handle the open by tracking the device state.
393 	 */
394 	if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) ||
395 	    ((flags & FEXCL) &&
396 	    (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) {
397 		return (EBUSY);
398 	}
399 
400 	if (flags & FEXCL)
401 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
402 	else
403 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN;
404 
405 	return (0);
406 }
407 
408 /* ARGSUSED */
409 int
410 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp)
411 {
412 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
413 
414 	if (otyp != OTYP_CHR)
415 		return (EINVAL);
416 
417 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
418 
419 	return (0);
420 }
421 
422 /* ARGSUSED */
423 int
424 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode,
425     cred_t *credp, int *rvalp)
426 {
427 	struct devctl_iocdata	*dcp;
428 	uint_t			bus_state;
429 	int			rv = DDI_SUCCESS;
430 
431 	/*
432 	 * We can use the generic implementation for devctl ioctl
433 	 */
434 	switch (cmd) {
435 	case DEVCTL_DEVICE_GETSTATE:
436 	case DEVCTL_DEVICE_ONLINE:
437 	case DEVCTL_DEVICE_OFFLINE:
438 	case DEVCTL_BUS_GETSTATE:
439 		return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
440 	default:
441 		break;
442 	}
443 
444 	/*
445 	 * read devctl ioctl data
446 	 */
447 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
448 		return (EFAULT);
449 
450 	switch (cmd) {
451 	case DEVCTL_BUS_QUIESCE:
452 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
453 			if (bus_state == BUS_QUIESCED)
454 				break;
455 		(void) ndi_set_bus_state(dip, BUS_QUIESCED);
456 		break;
457 	case DEVCTL_BUS_UNQUIESCE:
458 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
459 			if (bus_state == BUS_ACTIVE)
460 				break;
461 		(void) ndi_set_bus_state(dip, BUS_ACTIVE);
462 		break;
463 	case DEVCTL_BUS_RESET:
464 	case DEVCTL_BUS_RESETALL:
465 	case DEVCTL_DEVICE_RESET:
466 		rv = ENOTSUP;
467 		break;
468 	default:
469 		rv = ENOTTY;
470 	}
471 
472 	ndi_dc_freehdl(dcp);
473 	return (rv);
474 }
475 
476 /* ARGSUSED */
477 int
478 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
479     int flags, char *name, caddr_t valuep, int *lengthp)
480 {
481 	if (dev == DDI_DEV_T_ANY)
482 		goto skip;
483 
484 	if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
485 	    strcmp(name, "pci-occupant") == 0) {
486 		int	pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev));
487 
488 		pcie_hp_create_occupant_props(dip, dev, pci_dev);
489 	}
490 
491 skip:
492 	return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
493 }
494 
495 int
496 pcie_init_cfghdl(dev_info_t *cdip)
497 {
498 	pcie_bus_t		*bus_p;
499 	ddi_acc_handle_t	eh = NULL;
500 
501 	bus_p = PCIE_DIP2BUS(cdip);
502 	if (bus_p == NULL)
503 		return (DDI_FAILURE);
504 
505 	/* Create an config access special to error handling */
506 	if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) {
507 		cmn_err(CE_WARN, "Cannot setup config access"
508 		    " for BDF 0x%x\n", bus_p->bus_bdf);
509 		return (DDI_FAILURE);
510 	}
511 
512 	bus_p->bus_cfg_hdl = eh;
513 	return (DDI_SUCCESS);
514 }
515 
516 void
517 pcie_fini_cfghdl(dev_info_t *cdip)
518 {
519 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(cdip);
520 
521 	pci_config_teardown(&bus_p->bus_cfg_hdl);
522 }
523 
524 void
525 pcie_determine_serial(dev_info_t *dip)
526 {
527 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
528 	ddi_acc_handle_t	h;
529 	uint16_t		cap;
530 	uchar_t			serial[8];
531 	uint32_t		low, high;
532 
533 	if (!PCIE_IS_PCIE(bus_p))
534 		return;
535 
536 	h = bus_p->bus_cfg_hdl;
537 
538 	if ((PCI_CAP_LOCATE(h, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap)) ==
539 	    DDI_FAILURE)
540 		return;
541 
542 	high = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_UPPER_DW);
543 	low = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_LOWER_DW);
544 
545 	/*
546 	 * Here, we're trying to figure out if we had an invalid PCIe read. From
547 	 * looking at the contents of the value, it can be hard to tell the
548 	 * difference between a value that has all 1s correctly versus if we had
549 	 * an error. In this case, we only assume it's invalid if both register
550 	 * reads are invalid. We also only use 32-bit reads as we're not sure if
551 	 * all devices will support these as 64-bit reads, while we know that
552 	 * they'll support these as 32-bit reads.
553 	 */
554 	if (high == PCI_EINVAL32 && low == PCI_EINVAL32)
555 		return;
556 
557 	serial[0] = low & 0xff;
558 	serial[1] = (low >> 8) & 0xff;
559 	serial[2] = (low >> 16) & 0xff;
560 	serial[3] = (low >> 24) & 0xff;
561 	serial[4] = high & 0xff;
562 	serial[5] = (high >> 8) & 0xff;
563 	serial[6] = (high >> 16) & 0xff;
564 	serial[7] = (high >> 24) & 0xff;
565 
566 	(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "pcie-serial",
567 	    serial, sizeof (serial));
568 }
569 
570 static void
571 pcie_determine_aspm(dev_info_t *dip)
572 {
573 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
574 	uint32_t	linkcap;
575 	uint16_t	linkctl;
576 
577 	if (!PCIE_IS_PCIE(bus_p))
578 		return;
579 
580 	linkcap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
581 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
582 
583 	switch (linkcap & PCIE_LINKCAP_ASPM_SUP_MASK) {
584 	case PCIE_LINKCAP_ASPM_SUP_L0S:
585 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
586 		    "pcie-aspm-support", "l0s");
587 		break;
588 	case PCIE_LINKCAP_ASPM_SUP_L1:
589 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
590 		    "pcie-aspm-support", "l1");
591 		break;
592 	case PCIE_LINKCAP_ASPM_SUP_L0S_L1:
593 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
594 		    "pcie-aspm-support", "l0s,l1");
595 		break;
596 	default:
597 		return;
598 	}
599 
600 	switch (linkctl & PCIE_LINKCTL_ASPM_CTL_MASK) {
601 	case PCIE_LINKCTL_ASPM_CTL_DIS:
602 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
603 		    "pcie-aspm-state", "disabled");
604 		break;
605 	case PCIE_LINKCTL_ASPM_CTL_L0S:
606 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
607 		    "pcie-aspm-state", "l0s");
608 		break;
609 	case PCIE_LINKCTL_ASPM_CTL_L1:
610 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
611 		    "pcie-aspm-state", "l1");
612 		break;
613 	case PCIE_LINKCTL_ASPM_CTL_L0S_L1:
614 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
615 		    "pcie-aspm-state", "l0s,l1");
616 		break;
617 	}
618 }
619 
620 /*
621  * PCI-Express child device initialization.
622  * This function enables generic pci-express interrupts and error
623  * handling.
624  *
625  * @param pdip		root dip (root nexus's dip)
626  * @param cdip		child's dip (device's dip)
627  * @return		DDI_SUCCESS or DDI_FAILURE
628  */
629 /* ARGSUSED */
630 int
631 pcie_initchild(dev_info_t *cdip)
632 {
633 	uint16_t		tmp16, reg16;
634 	pcie_bus_t		*bus_p;
635 	uint32_t		devid, venid;
636 
637 	bus_p = PCIE_DIP2BUS(cdip);
638 	if (bus_p == NULL) {
639 		PCIE_DBG("%s: BUS not found.\n",
640 		    ddi_driver_name(cdip));
641 
642 		return (DDI_FAILURE);
643 	}
644 
645 	if (pcie_init_cfghdl(cdip) != DDI_SUCCESS)
646 		return (DDI_FAILURE);
647 
648 	/*
649 	 * Update pcie_bus_t with real Vendor Id Device Id.
650 	 *
651 	 * For assigned devices in IOV environment, the OBP will return
652 	 * faked device id/vendor id on configration read and for both
653 	 * properties in root domain. translate_devid() function will
654 	 * update the properties with real device-id/vendor-id on such
655 	 * platforms, so that we can utilize the properties here to get
656 	 * real device-id/vendor-id and overwrite the faked ids.
657 	 *
658 	 * For unassigned devices or devices in non-IOV environment, the
659 	 * operation below won't make a difference.
660 	 *
661 	 * The IOV implementation only supports assignment of PCIE
662 	 * endpoint devices. Devices under pci-pci bridges don't need
663 	 * operation like this.
664 	 */
665 	devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
666 	    "device-id", -1);
667 	venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
668 	    "vendor-id", -1);
669 	bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff);
670 
671 	/* Clear the device's status register */
672 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT);
673 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16);
674 
675 	/* Setup the device's command register */
676 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM);
677 	tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default;
678 
679 #if defined(__i386) || defined(__amd64)
680 	boolean_t empty_io_range = B_FALSE;
681 	boolean_t empty_mem_range = B_FALSE;
682 	/*
683 	 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem
684 	 * access as it can cause a hang if enabled.
685 	 */
686 	pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range,
687 	    &empty_mem_range);
688 	if ((empty_io_range == B_TRUE) &&
689 	    (pcie_command_default & PCI_COMM_IO)) {
690 		tmp16 &= ~PCI_COMM_IO;
691 		PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
692 		    ddi_driver_name(cdip), bus_p->bus_bdf);
693 	}
694 	if ((empty_mem_range == B_TRUE) &&
695 	    (pcie_command_default & PCI_COMM_MAE)) {
696 		tmp16 &= ~PCI_COMM_MAE;
697 		PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
698 		    ddi_driver_name(cdip), bus_p->bus_bdf);
699 	}
700 #endif /* defined(__i386) || defined(__amd64) */
701 
702 	if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p))
703 		tmp16 &= ~PCI_COMM_SERR_ENABLE;
704 
705 	PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16);
706 	PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16);
707 
708 	/*
709 	 * If the device has a bus control register then program it
710 	 * based on the settings in the command register.
711 	 */
712 	if (PCIE_IS_BDG(bus_p)) {
713 		/* Clear the device's secondary status register */
714 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
715 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16);
716 
717 		/* Setup the device's secondary command register */
718 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
719 		tmp16 = (reg16 & pcie_bdg_command_default_fw);
720 
721 		tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE;
722 		/*
723 		 * Workaround for this Nvidia bridge. Don't enable the SERR
724 		 * enable bit in the bridge control register as it could lead to
725 		 * bogus NMIs.
726 		 */
727 		if (bus_p->bus_dev_ven_id == 0x037010DE)
728 			tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE;
729 
730 		if (pcie_command_default & PCI_COMM_PARITY_DETECT)
731 			tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
732 
733 		/*
734 		 * Enable Master Abort Mode only if URs have not been masked.
735 		 * For PCI and PCIe-PCI bridges, enabling this bit causes a
736 		 * Master Aborts/UR to be forwarded as a UR/TA or SERR.  If this
737 		 * bit is masked, posted requests are dropped and non-posted
738 		 * requests are returned with -1.
739 		 */
740 		if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
741 			tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE;
742 		else
743 			tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
744 		PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16);
745 		PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL,
746 		    reg16);
747 	}
748 
749 	if (PCIE_IS_PCIE(bus_p)) {
750 		/* Setup PCIe device control register */
751 		reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
752 		/* note: MPS/MRRS are initialized in pcie_initchild_mps() */
753 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
754 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
755 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
756 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
757 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
758 		PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
759 
760 		/* Enable PCIe errors */
761 		pcie_enable_errors(cdip);
762 
763 		pcie_determine_serial(cdip);
764 
765 		pcie_determine_aspm(cdip);
766 
767 		pcie_capture_speeds(cdip);
768 	}
769 
770 	bus_p->bus_ari = B_FALSE;
771 	if ((pcie_ari_is_enabled(ddi_get_parent(cdip))
772 	    == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip)
773 	    == PCIE_ARI_DEVICE)) {
774 		bus_p->bus_ari = B_TRUE;
775 	}
776 
777 	if (pcie_initchild_mps(cdip) == DDI_FAILURE) {
778 		pcie_fini_cfghdl(cdip);
779 		return (DDI_FAILURE);
780 	}
781 
782 	return (DDI_SUCCESS);
783 }
784 
785 static void
786 pcie_init_pfd(dev_info_t *dip)
787 {
788 	pf_data_t	*pfd_p = PCIE_ZALLOC(pf_data_t);
789 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
790 
791 	PCIE_DIP2PFD(dip) = pfd_p;
792 
793 	pfd_p->pe_bus_p = bus_p;
794 	pfd_p->pe_severity_flags = 0;
795 	pfd_p->pe_severity_mask = 0;
796 	pfd_p->pe_orig_severity_flags = 0;
797 	pfd_p->pe_lock = B_FALSE;
798 	pfd_p->pe_valid = B_FALSE;
799 
800 	/* Allocate the root fault struct for both RC and RP */
801 	if (PCIE_IS_ROOT(bus_p)) {
802 		PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
803 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
804 		PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
805 	}
806 
807 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
808 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
809 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
810 
811 	if (PCIE_IS_BDG(bus_p))
812 		PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
813 
814 	if (PCIE_IS_PCIE(bus_p)) {
815 		PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
816 
817 		if (PCIE_IS_RP(bus_p))
818 			PCIE_RP_REG(pfd_p) =
819 			    PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
820 
821 		PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
822 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
823 
824 		if (PCIE_IS_RP(bus_p)) {
825 			PCIE_ADV_RP_REG(pfd_p) =
826 			    PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
827 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
828 			    PCIE_INVALID_BDF;
829 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
830 			    PCIE_INVALID_BDF;
831 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
832 			PCIE_ADV_BDG_REG(pfd_p) =
833 			    PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t);
834 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
835 			    PCIE_INVALID_BDF;
836 		}
837 
838 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
839 			PCIX_BDG_ERR_REG(pfd_p) =
840 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
841 
842 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
843 				PCIX_BDG_ECC_REG(pfd_p, 0) =
844 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
845 				PCIX_BDG_ECC_REG(pfd_p, 1) =
846 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
847 			}
848 		}
849 
850 		PCIE_SLOT_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_slot_regs_t);
851 		PCIE_SLOT_REG(pfd_p)->pcie_slot_regs_valid = B_FALSE;
852 		PCIE_SLOT_REG(pfd_p)->pcie_slot_cap = 0;
853 		PCIE_SLOT_REG(pfd_p)->pcie_slot_control = 0;
854 		PCIE_SLOT_REG(pfd_p)->pcie_slot_status = 0;
855 
856 	} else if (PCIE_IS_PCIX(bus_p)) {
857 		if (PCIE_IS_BDG(bus_p)) {
858 			PCIX_BDG_ERR_REG(pfd_p) =
859 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
860 
861 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
862 				PCIX_BDG_ECC_REG(pfd_p, 0) =
863 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
864 				PCIX_BDG_ECC_REG(pfd_p, 1) =
865 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
866 			}
867 		} else {
868 			PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t);
869 
870 			if (PCIX_ECC_VERSION_CHECK(bus_p))
871 				PCIX_ECC_REG(pfd_p) =
872 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
873 		}
874 	}
875 }
876 
877 static void
878 pcie_fini_pfd(dev_info_t *dip)
879 {
880 	pf_data_t	*pfd_p = PCIE_DIP2PFD(dip);
881 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
882 
883 	if (PCIE_IS_PCIE(bus_p)) {
884 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
885 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
886 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
887 				    sizeof (pf_pcix_ecc_regs_t));
888 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
889 				    sizeof (pf_pcix_ecc_regs_t));
890 			}
891 
892 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
893 			    sizeof (pf_pcix_bdg_err_regs_t));
894 		}
895 
896 		if (PCIE_IS_RP(bus_p))
897 			kmem_free(PCIE_ADV_RP_REG(pfd_p),
898 			    sizeof (pf_pcie_adv_rp_err_regs_t));
899 		else if (PCIE_IS_PCIE_BDG(bus_p))
900 			kmem_free(PCIE_ADV_BDG_REG(pfd_p),
901 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
902 
903 		kmem_free(PCIE_ADV_REG(pfd_p),
904 		    sizeof (pf_pcie_adv_err_regs_t));
905 
906 		if (PCIE_IS_RP(bus_p))
907 			kmem_free(PCIE_RP_REG(pfd_p),
908 			    sizeof (pf_pcie_rp_err_regs_t));
909 
910 		kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
911 	} else if (PCIE_IS_PCIX(bus_p)) {
912 		if (PCIE_IS_BDG(bus_p)) {
913 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
914 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
915 				    sizeof (pf_pcix_ecc_regs_t));
916 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
917 				    sizeof (pf_pcix_ecc_regs_t));
918 			}
919 
920 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
921 			    sizeof (pf_pcix_bdg_err_regs_t));
922 		} else {
923 			if (PCIX_ECC_VERSION_CHECK(bus_p))
924 				kmem_free(PCIX_ECC_REG(pfd_p),
925 				    sizeof (pf_pcix_ecc_regs_t));
926 
927 			kmem_free(PCIX_ERR_REG(pfd_p),
928 			    sizeof (pf_pcix_err_regs_t));
929 		}
930 	}
931 
932 	if (PCIE_IS_BDG(bus_p))
933 		kmem_free(PCI_BDG_ERR_REG(pfd_p),
934 		    sizeof (pf_pci_bdg_err_regs_t));
935 
936 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
937 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
938 
939 	if (PCIE_IS_ROOT(bus_p)) {
940 		kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
941 		kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
942 	}
943 
944 	kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t));
945 
946 	PCIE_DIP2PFD(dip) = NULL;
947 }
948 
949 
950 /*
951  * Special functions to allocate pf_data_t's for PCIe root complexes.
952  * Note: Root Complex not Root Port
953  */
954 void
955 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p)
956 {
957 	pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip);
958 	pfd_p->pe_severity_flags = 0;
959 	pfd_p->pe_severity_mask = 0;
960 	pfd_p->pe_orig_severity_flags = 0;
961 	pfd_p->pe_lock = B_FALSE;
962 	pfd_p->pe_valid = B_FALSE;
963 
964 	PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
965 	PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
966 	PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
967 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
968 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
969 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
970 	PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
971 	PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
972 	PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
973 	PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
974 	PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
975 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF;
976 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF;
977 
978 	PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity;
979 }
980 
981 void
982 pcie_rc_fini_pfd(pf_data_t *pfd_p)
983 {
984 	kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t));
985 	kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t));
986 	kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t));
987 	kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
988 	kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
989 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
990 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
991 	kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
992 	kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
993 }
994 
995 /*
996  * init pcie_bus_t for root complex
997  *
998  * Only a few of the fields in bus_t is valid for root complex.
999  * The fields that are bracketed are initialized in this routine:
1000  *
1001  * dev_info_t *		<bus_dip>
1002  * dev_info_t *		bus_rp_dip
1003  * ddi_acc_handle_t	bus_cfg_hdl
1004  * uint_t		<bus_fm_flags>
1005  * pcie_req_id_t	bus_bdf
1006  * pcie_req_id_t	bus_rp_bdf
1007  * uint32_t		bus_dev_ven_id
1008  * uint8_t		bus_rev_id
1009  * uint8_t		<bus_hdr_type>
1010  * uint16_t		<bus_dev_type>
1011  * uint8_t		bus_bdg_secbus
1012  * uint16_t		bus_pcie_off
1013  * uint16_t		<bus_aer_off>
1014  * uint16_t		bus_pcix_off
1015  * uint16_t		bus_ecc_ver
1016  * pci_bus_range_t	bus_bus_range
1017  * ppb_ranges_t	*	bus_addr_ranges
1018  * int			bus_addr_entries
1019  * pci_regspec_t *	bus_assigned_addr
1020  * int			bus_assigned_entries
1021  * pf_data_t *		bus_pfd
1022  * pcie_domain_t *	<bus_dom>
1023  * int			bus_mps
1024  * uint64_t		bus_cfgacc_base
1025  * void	*		bus_plat_private
1026  */
1027 void
1028 pcie_rc_init_bus(dev_info_t *dip)
1029 {
1030 	pcie_bus_t *bus_p;
1031 
1032 	bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
1033 	bus_p->bus_dip = dip;
1034 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO;
1035 	bus_p->bus_hdr_type = PCI_HEADER_ONE;
1036 
1037 	/* Fake that there are AER logs */
1038 	bus_p->bus_aer_off = (uint16_t)-1;
1039 
1040 	/* Needed only for handle lookup */
1041 	atomic_or_uint(&bus_p->bus_fm_flags, PF_FM_READY);
1042 
1043 	ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p);
1044 
1045 	PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t);
1046 }
1047 
1048 void
1049 pcie_rc_fini_bus(dev_info_t *dip)
1050 {
1051 	pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip);
1052 	ndi_set_bus_private(dip, B_FALSE, 0, NULL);
1053 	kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t));
1054 	kmem_free(bus_p, sizeof (pcie_bus_t));
1055 }
1056 
1057 static int
1058 pcie_width_to_int(pcie_link_width_t width)
1059 {
1060 	switch (width) {
1061 	case PCIE_LINK_WIDTH_X1:
1062 		return (1);
1063 	case PCIE_LINK_WIDTH_X2:
1064 		return (2);
1065 	case PCIE_LINK_WIDTH_X4:
1066 		return (4);
1067 	case PCIE_LINK_WIDTH_X8:
1068 		return (8);
1069 	case PCIE_LINK_WIDTH_X12:
1070 		return (12);
1071 	case PCIE_LINK_WIDTH_X16:
1072 		return (16);
1073 	case PCIE_LINK_WIDTH_X32:
1074 		return (32);
1075 	default:
1076 		return (0);
1077 	}
1078 }
1079 
1080 /*
1081  * Return the speed in Transfers / second. This is a signed quantity to match
1082  * the ndi/ddi property interfaces.
1083  */
1084 static int64_t
1085 pcie_speed_to_int(pcie_link_speed_t speed)
1086 {
1087 	switch (speed) {
1088 	case PCIE_LINK_SPEED_2_5:
1089 		return (2500000000LL);
1090 	case PCIE_LINK_SPEED_5:
1091 		return (5000000000LL);
1092 	case PCIE_LINK_SPEED_8:
1093 		return (8000000000LL);
1094 	case PCIE_LINK_SPEED_16:
1095 		return (16000000000LL);
1096 	default:
1097 		return (0);
1098 	}
1099 }
1100 
1101 /*
1102  * Translate the recorded speed information into devinfo properties.
1103  */
1104 static void
1105 pcie_speeds_to_devinfo(dev_info_t *dip, pcie_bus_t *bus_p)
1106 {
1107 	if (bus_p->bus_max_width != PCIE_LINK_WIDTH_UNKNOWN) {
1108 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1109 		    "pcie-link-maximum-width",
1110 		    pcie_width_to_int(bus_p->bus_max_width));
1111 	}
1112 
1113 	if (bus_p->bus_cur_width != PCIE_LINK_WIDTH_UNKNOWN) {
1114 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1115 		    "pcie-link-current-width",
1116 		    pcie_width_to_int(bus_p->bus_cur_width));
1117 	}
1118 
1119 	if (bus_p->bus_cur_speed != PCIE_LINK_SPEED_UNKNOWN) {
1120 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1121 		    "pcie-link-current-speed",
1122 		    pcie_speed_to_int(bus_p->bus_cur_speed));
1123 	}
1124 
1125 	if (bus_p->bus_max_speed != PCIE_LINK_SPEED_UNKNOWN) {
1126 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1127 		    "pcie-link-maximum-speed",
1128 		    pcie_speed_to_int(bus_p->bus_max_speed));
1129 	}
1130 
1131 	if (bus_p->bus_target_speed != PCIE_LINK_SPEED_UNKNOWN) {
1132 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1133 		    "pcie-link-target-speed",
1134 		    pcie_speed_to_int(bus_p->bus_target_speed));
1135 	}
1136 
1137 	if ((bus_p->bus_speed_flags & PCIE_LINK_F_ADMIN_TARGET) != 0) {
1138 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1139 		    "pcie-link-admin-target-speed");
1140 	}
1141 
1142 	if (bus_p->bus_sup_speed != PCIE_LINK_SPEED_UNKNOWN) {
1143 		int64_t speeds[4];
1144 		uint_t nspeeds = 0;
1145 
1146 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_2_5) {
1147 			speeds[nspeeds++] =
1148 			    pcie_speed_to_int(PCIE_LINK_SPEED_2_5);
1149 		}
1150 
1151 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_5) {
1152 			speeds[nspeeds++] =
1153 			    pcie_speed_to_int(PCIE_LINK_SPEED_5);
1154 		}
1155 
1156 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_8) {
1157 			speeds[nspeeds++] =
1158 			    pcie_speed_to_int(PCIE_LINK_SPEED_8);
1159 		}
1160 
1161 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_16) {
1162 			speeds[nspeeds++] =
1163 			    pcie_speed_to_int(PCIE_LINK_SPEED_16);
1164 		}
1165 
1166 		(void) ndi_prop_update_int64_array(DDI_DEV_T_NONE, dip,
1167 		    "pcie-link-supported-speeds", speeds, nspeeds);
1168 	}
1169 }
1170 
1171 /*
1172  * We need to capture the supported, maximum, and current device speed and
1173  * width. The way that this has been done has changed over time.
1174  *
1175  * Prior to PCIe Gen 3, there were only current and supported speed fields.
1176  * These were found in the link status and link capabilities registers of the
1177  * PCI express capability. With the change to PCIe Gen 3, the information in the
1178  * link capabilities changed to the maximum value. The supported speeds vector
1179  * was moved to the link capabilities 2 register.
1180  *
1181  * Now, a device may not implement some of these registers. To determine whether
1182  * or not it's here, we have to do the following. First, we need to check the
1183  * revision of the PCI express capability. The link capabilities 2 register did
1184  * not exist prior to version 2 of this capability. If a modern device does not
1185  * implement it, it is supposed to return zero for the register.
1186  */
1187 static void
1188 pcie_capture_speeds(dev_info_t *dip)
1189 {
1190 	uint16_t	vers, status;
1191 	uint32_t	cap, cap2, ctl2;
1192 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1193 	dev_info_t	*rcdip;
1194 
1195 	if (!PCIE_IS_PCIE(bus_p))
1196 		return;
1197 
1198 	rcdip = pcie_get_rc_dip(dip);
1199 	if (bus_p->bus_cfg_hdl == NULL) {
1200 		vers = pci_cfgacc_get16(rcdip, bus_p->bus_bdf,
1201 		    bus_p->bus_pcie_off + PCIE_PCIECAP);
1202 	} else {
1203 		vers = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
1204 	}
1205 	if (vers == PCI_EINVAL16)
1206 		return;
1207 	vers &= PCIE_PCIECAP_VER_MASK;
1208 
1209 	/*
1210 	 * Verify the capability's version.
1211 	 */
1212 	switch (vers) {
1213 	case PCIE_PCIECAP_VER_1_0:
1214 		cap2 = 0;
1215 		ctl2 = 0;
1216 		break;
1217 	case PCIE_PCIECAP_VER_2_0:
1218 		if (bus_p->bus_cfg_hdl == NULL) {
1219 			cap2 = pci_cfgacc_get32(rcdip, bus_p->bus_bdf,
1220 			    bus_p->bus_pcie_off + PCIE_LINKCAP2);
1221 			ctl2 = pci_cfgacc_get16(rcdip, bus_p->bus_bdf,
1222 			    bus_p->bus_pcie_off + PCIE_LINKCTL2);
1223 		} else {
1224 			cap2 = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP2);
1225 			ctl2 = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL2);
1226 		}
1227 		if (cap2 == PCI_EINVAL32)
1228 			cap2 = 0;
1229 		if (ctl2 == PCI_EINVAL16)
1230 			ctl2 = 0;
1231 		break;
1232 	default:
1233 		/* Don't try and handle an unknown version */
1234 		return;
1235 	}
1236 
1237 	if (bus_p->bus_cfg_hdl == NULL) {
1238 		status = pci_cfgacc_get16(rcdip, bus_p->bus_bdf,
1239 		    bus_p->bus_pcie_off + PCIE_LINKSTS);
1240 		cap = pci_cfgacc_get32(rcdip, bus_p->bus_bdf,
1241 		    bus_p->bus_pcie_off + PCIE_LINKCAP);
1242 	} else {
1243 		status = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
1244 		cap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
1245 	}
1246 	if (status == PCI_EINVAL16 || cap == PCI_EINVAL32)
1247 		return;
1248 
1249 	mutex_enter(&bus_p->bus_speed_mutex);
1250 
1251 	switch (status & PCIE_LINKSTS_SPEED_MASK) {
1252 	case PCIE_LINKSTS_SPEED_2_5:
1253 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_2_5;
1254 		break;
1255 	case PCIE_LINKSTS_SPEED_5:
1256 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_5;
1257 		break;
1258 	case PCIE_LINKSTS_SPEED_8:
1259 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_8;
1260 		break;
1261 	case PCIE_LINKSTS_SPEED_16:
1262 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_16;
1263 		break;
1264 	default:
1265 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_UNKNOWN;
1266 		break;
1267 	}
1268 
1269 	switch (status & PCIE_LINKSTS_NEG_WIDTH_MASK) {
1270 	case PCIE_LINKSTS_NEG_WIDTH_X1:
1271 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X1;
1272 		break;
1273 	case PCIE_LINKSTS_NEG_WIDTH_X2:
1274 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X2;
1275 		break;
1276 	case PCIE_LINKSTS_NEG_WIDTH_X4:
1277 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X4;
1278 		break;
1279 	case PCIE_LINKSTS_NEG_WIDTH_X8:
1280 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X8;
1281 		break;
1282 	case PCIE_LINKSTS_NEG_WIDTH_X12:
1283 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X12;
1284 		break;
1285 	case PCIE_LINKSTS_NEG_WIDTH_X16:
1286 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X16;
1287 		break;
1288 	case PCIE_LINKSTS_NEG_WIDTH_X32:
1289 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X32;
1290 		break;
1291 	default:
1292 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_UNKNOWN;
1293 		break;
1294 	}
1295 
1296 	switch (cap & PCIE_LINKCAP_MAX_WIDTH_MASK) {
1297 	case PCIE_LINKCAP_MAX_WIDTH_X1:
1298 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X1;
1299 		break;
1300 	case PCIE_LINKCAP_MAX_WIDTH_X2:
1301 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X2;
1302 		break;
1303 	case PCIE_LINKCAP_MAX_WIDTH_X4:
1304 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X4;
1305 		break;
1306 	case PCIE_LINKCAP_MAX_WIDTH_X8:
1307 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X8;
1308 		break;
1309 	case PCIE_LINKCAP_MAX_WIDTH_X12:
1310 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X12;
1311 		break;
1312 	case PCIE_LINKCAP_MAX_WIDTH_X16:
1313 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X16;
1314 		break;
1315 	case PCIE_LINKCAP_MAX_WIDTH_X32:
1316 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X32;
1317 		break;
1318 	default:
1319 		bus_p->bus_max_width = PCIE_LINK_WIDTH_UNKNOWN;
1320 		break;
1321 	}
1322 
1323 	/*
1324 	 * If we have the Link Capabilities 2, then we can get the supported
1325 	 * speeds from it and treat the bits in Link Capabilities 1 as the
1326 	 * maximum. If we don't, then we need to follow the Implementation Note
1327 	 * in the standard under Link Capabilities 2. Effectively, this means
1328 	 * that if the value of 10b is set in Link Capabilities register, that
1329 	 * it supports both 2.5 and 5 GT/s speeds.
1330 	 */
1331 	if (cap2 != 0) {
1332 		if (cap2 & PCIE_LINKCAP2_SPEED_2_5)
1333 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_2_5;
1334 		if (cap2 & PCIE_LINKCAP2_SPEED_5)
1335 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_5;
1336 		if (cap2 & PCIE_LINKCAP2_SPEED_8)
1337 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_8;
1338 		if (cap2 & PCIE_LINKCAP2_SPEED_16)
1339 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_16;
1340 
1341 		switch (cap & PCIE_LINKCAP_MAX_SPEED_MASK) {
1342 		case PCIE_LINKCAP_MAX_SPEED_2_5:
1343 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1344 			break;
1345 		case PCIE_LINKCAP_MAX_SPEED_5:
1346 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1347 			break;
1348 		case PCIE_LINKCAP_MAX_SPEED_8:
1349 			bus_p->bus_max_speed = PCIE_LINK_SPEED_8;
1350 			break;
1351 		case PCIE_LINKCAP_MAX_SPEED_16:
1352 			bus_p->bus_max_speed = PCIE_LINK_SPEED_16;
1353 			break;
1354 		default:
1355 			bus_p->bus_max_speed = PCIE_LINK_SPEED_UNKNOWN;
1356 			break;
1357 		}
1358 	} else {
1359 		if (cap & PCIE_LINKCAP_MAX_SPEED_5) {
1360 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1361 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5 |
1362 			    PCIE_LINK_SPEED_5;
1363 		} else if (cap & PCIE_LINKCAP_MAX_SPEED_2_5) {
1364 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1365 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5;
1366 		}
1367 	}
1368 
1369 	switch (ctl2 & PCIE_LINKCTL2_TARGET_SPEED_MASK) {
1370 	case PCIE_LINKCTL2_TARGET_SPEED_2_5:
1371 		bus_p->bus_target_speed = PCIE_LINK_SPEED_2_5;
1372 		break;
1373 	case PCIE_LINKCTL2_TARGET_SPEED_5:
1374 		bus_p->bus_target_speed = PCIE_LINK_SPEED_5;
1375 		break;
1376 	case PCIE_LINKCTL2_TARGET_SPEED_8:
1377 		bus_p->bus_target_speed = PCIE_LINK_SPEED_8;
1378 		break;
1379 	case PCIE_LINKCTL2_TARGET_SPEED_16:
1380 		bus_p->bus_target_speed = PCIE_LINK_SPEED_16;
1381 		break;
1382 	default:
1383 		bus_p->bus_target_speed = PCIE_LINK_SPEED_UNKNOWN;
1384 		break;
1385 	}
1386 
1387 	pcie_speeds_to_devinfo(dip, bus_p);
1388 	mutex_exit(&bus_p->bus_speed_mutex);
1389 }
1390 
1391 /*
1392  * partially init pcie_bus_t for device (dip,bdf) for accessing pci
1393  * config space
1394  *
1395  * This routine is invoked during boot, either after creating a devinfo node
1396  * (x86 case) or during px driver attach (sparc case); it is also invoked
1397  * in hotplug context after a devinfo node is created.
1398  *
1399  * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
1400  * is set:
1401  *
1402  * dev_info_t *		<bus_dip>
1403  * dev_info_t *		<bus_rp_dip>
1404  * ddi_acc_handle_t	bus_cfg_hdl
1405  * uint_t		bus_fm_flags
1406  * pcie_req_id_t	<bus_bdf>
1407  * pcie_req_id_t	<bus_rp_bdf>
1408  * uint32_t		<bus_dev_ven_id>
1409  * uint8_t		<bus_rev_id>
1410  * uint8_t		<bus_hdr_type>
1411  * uint16_t		<bus_dev_type>
1412  * uint8_t		<bus_bdg_secbus
1413  * uint16_t		<bus_pcie_off>
1414  * uint16_t		<bus_aer_off>
1415  * uint16_t		<bus_pcix_off>
1416  * uint16_t		<bus_ecc_ver>
1417  * pci_bus_range_t	bus_bus_range
1418  * ppb_ranges_t	*	bus_addr_ranges
1419  * int			bus_addr_entries
1420  * pci_regspec_t *	bus_assigned_addr
1421  * int			bus_assigned_entries
1422  * pf_data_t *		bus_pfd
1423  * pcie_domain_t *	bus_dom
1424  * int			bus_mps
1425  * uint64_t		bus_cfgacc_base
1426  * void	*		bus_plat_private
1427  *
1428  * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
1429  * is set:
1430  *
1431  * dev_info_t *		bus_dip
1432  * dev_info_t *		bus_rp_dip
1433  * ddi_acc_handle_t	bus_cfg_hdl
1434  * uint_t		bus_fm_flags
1435  * pcie_req_id_t	bus_bdf
1436  * pcie_req_id_t	bus_rp_bdf
1437  * uint32_t		bus_dev_ven_id
1438  * uint8_t		bus_rev_id
1439  * uint8_t		bus_hdr_type
1440  * uint16_t		bus_dev_type
1441  * uint8_t		<bus_bdg_secbus>
1442  * uint16_t		bus_pcie_off
1443  * uint16_t		bus_aer_off
1444  * uint16_t		bus_pcix_off
1445  * uint16_t		bus_ecc_ver
1446  * pci_bus_range_t	<bus_bus_range>
1447  * ppb_ranges_t	*	<bus_addr_ranges>
1448  * int			<bus_addr_entries>
1449  * pci_regspec_t *	<bus_assigned_addr>
1450  * int			<bus_assigned_entries>
1451  * pf_data_t *		<bus_pfd>
1452  * pcie_domain_t *	bus_dom
1453  * int			bus_mps
1454  * uint64_t		bus_cfgacc_base
1455  * void	*		<bus_plat_private>
1456  */
1457 
1458 pcie_bus_t *
1459 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags)
1460 {
1461 	uint16_t	status, base, baseptr, num_cap;
1462 	uint32_t	capid;
1463 	int		range_size;
1464 	pcie_bus_t	*bus_p = NULL;
1465 	dev_info_t	*rcdip;
1466 	dev_info_t	*pdip;
1467 	const char	*errstr = NULL;
1468 
1469 	if (!(flags & PCIE_BUS_INITIAL))
1470 		goto initial_done;
1471 
1472 	bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
1473 
1474 	bus_p->bus_dip = dip;
1475 	bus_p->bus_bdf = bdf;
1476 
1477 	rcdip = pcie_get_rc_dip(dip);
1478 	ASSERT(rcdip != NULL);
1479 
1480 	/* Save the Vendor ID, Device ID and revision ID */
1481 	bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID);
1482 	bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID);
1483 	/* Save the Header Type */
1484 	bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER);
1485 	bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M;
1486 
1487 	/*
1488 	 * Figure out the device type and all the relavant capability offsets
1489 	 */
1490 	/* set default value */
1491 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
1492 
1493 	status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT);
1494 	if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP))
1495 		goto caps_done; /* capability not supported */
1496 
1497 	/* Relevant conventional capabilities first */
1498 
1499 	/* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
1500 	num_cap = 2;
1501 
1502 	switch (bus_p->bus_hdr_type) {
1503 	case PCI_HEADER_ZERO:
1504 		baseptr = PCI_CONF_CAP_PTR;
1505 		break;
1506 	case PCI_HEADER_PPB:
1507 		baseptr = PCI_BCNF_CAP_PTR;
1508 		break;
1509 	case PCI_HEADER_CARDBUS:
1510 		baseptr = PCI_CBUS_CAP_PTR;
1511 		break;
1512 	default:
1513 		cmn_err(CE_WARN, "%s: unexpected pci header type:%x",
1514 		    __func__, bus_p->bus_hdr_type);
1515 		goto caps_done;
1516 	}
1517 
1518 	base = baseptr;
1519 	for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap;
1520 	    base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) {
1521 		capid = pci_cfgacc_get8(rcdip, bdf, base);
1522 		switch (capid) {
1523 		case PCI_CAP_ID_PCI_E:
1524 			bus_p->bus_pcie_off = base;
1525 			bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf,
1526 			    base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1527 
1528 			/* Check and save PCIe hotplug capability information */
1529 			if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) &&
1530 			    (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP)
1531 			    & PCIE_PCIECAP_SLOT_IMPL) &&
1532 			    (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP)
1533 			    & PCIE_SLOTCAP_HP_CAPABLE))
1534 				bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
1535 
1536 			num_cap--;
1537 			break;
1538 		case PCI_CAP_ID_PCIX:
1539 			bus_p->bus_pcix_off = base;
1540 			if (PCIE_IS_BDG(bus_p))
1541 				bus_p->bus_ecc_ver =
1542 				    pci_cfgacc_get16(rcdip, bdf, base +
1543 				    PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
1544 			else
1545 				bus_p->bus_ecc_ver =
1546 				    pci_cfgacc_get16(rcdip, bdf, base +
1547 				    PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
1548 			num_cap--;
1549 			break;
1550 		default:
1551 			break;
1552 		}
1553 	}
1554 
1555 	/* Check and save PCI hotplug (SHPC) capability information */
1556 	if (PCIE_IS_BDG(bus_p)) {
1557 		base = baseptr;
1558 		for (base = pci_cfgacc_get8(rcdip, bdf, base);
1559 		    base; base = pci_cfgacc_get8(rcdip, bdf,
1560 		    base + PCI_CAP_NEXT_PTR)) {
1561 			capid = pci_cfgacc_get8(rcdip, bdf, base);
1562 			if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
1563 				bus_p->bus_pci_hp_off = base;
1564 				bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE;
1565 				break;
1566 			}
1567 		}
1568 	}
1569 
1570 	/* Then, relevant extended capabilities */
1571 
1572 	if (!PCIE_IS_PCIE(bus_p))
1573 		goto caps_done;
1574 
1575 	/* Extended caps: PCIE_EXT_CAP_ID_AER */
1576 	for (base = PCIE_EXT_CAP; base; base = (capid >>
1577 	    PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) {
1578 		capid = pci_cfgacc_get32(rcdip, bdf, base);
1579 		if (capid == PCI_CAP_EINVAL32)
1580 			break;
1581 		if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK)
1582 		    == PCIE_EXT_CAP_ID_AER) {
1583 			bus_p->bus_aer_off = base;
1584 			break;
1585 		}
1586 	}
1587 
1588 	/*
1589 	 * Save and record speed information about the device.
1590 	 */
1591 
1592 caps_done:
1593 	/* save RP dip and RP bdf */
1594 	if (PCIE_IS_RP(bus_p)) {
1595 		bus_p->bus_rp_dip = dip;
1596 		bus_p->bus_rp_bdf = bus_p->bus_bdf;
1597 	} else {
1598 		for (pdip = ddi_get_parent(dip); pdip;
1599 		    pdip = ddi_get_parent(pdip)) {
1600 			pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip);
1601 
1602 			/*
1603 			 * If RP dip and RP bdf in parent's bus_t have
1604 			 * been initialized, simply use these instead of
1605 			 * continuing up to the RC.
1606 			 */
1607 			if (parent_bus_p->bus_rp_dip != NULL) {
1608 				bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip;
1609 				bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf;
1610 				break;
1611 			}
1612 
1613 			/*
1614 			 * When debugging be aware that some NVIDIA x86
1615 			 * architectures have 2 nodes for each RP, One at Bus
1616 			 * 0x0 and one at Bus 0x80.  The requester is from Bus
1617 			 * 0x80
1618 			 */
1619 			if (PCIE_IS_ROOT(parent_bus_p)) {
1620 				bus_p->bus_rp_dip = pdip;
1621 				bus_p->bus_rp_bdf = parent_bus_p->bus_bdf;
1622 				break;
1623 			}
1624 		}
1625 	}
1626 
1627 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
1628 	(void) atomic_swap_uint(&bus_p->bus_fm_flags, 0);
1629 	bus_p->bus_mps = 0;
1630 
1631 	ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p);
1632 
1633 	if (PCIE_IS_HOTPLUG_CAPABLE(dip))
1634 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1635 		    "hotplug-capable");
1636 
1637 initial_done:
1638 	if (!(flags & PCIE_BUS_FINAL))
1639 		goto final_done;
1640 
1641 	/* already initialized? */
1642 	bus_p = PCIE_DIP2BUS(dip);
1643 
1644 	/* Save the Range information if device is a switch/bridge */
1645 	if (PCIE_IS_BDG(bus_p)) {
1646 		/* get "bus_range" property */
1647 		range_size = sizeof (pci_bus_range_t);
1648 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1649 		    "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size)
1650 		    != DDI_PROP_SUCCESS) {
1651 			errstr = "Cannot find \"bus-range\" property";
1652 			cmn_err(CE_WARN,
1653 			    "PCIE init err info failed BDF 0x%x:%s\n",
1654 			    bus_p->bus_bdf, errstr);
1655 		}
1656 
1657 		/* get secondary bus number */
1658 		rcdip = pcie_get_rc_dip(dip);
1659 		ASSERT(rcdip != NULL);
1660 
1661 		bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip,
1662 		    bus_p->bus_bdf, PCI_BCNF_SECBUS);
1663 
1664 		/* Get "ranges" property */
1665 		if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1666 		    "ranges", (caddr_t)&bus_p->bus_addr_ranges,
1667 		    &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS)
1668 			bus_p->bus_addr_entries = 0;
1669 		bus_p->bus_addr_entries /= sizeof (ppb_ranges_t);
1670 	}
1671 
1672 	/* save "assigned-addresses" property array, ignore failues */
1673 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1674 	    "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr,
1675 	    &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS)
1676 		bus_p->bus_assigned_entries /= sizeof (pci_regspec_t);
1677 	else
1678 		bus_p->bus_assigned_entries = 0;
1679 
1680 	pcie_init_pfd(dip);
1681 
1682 	pcie_init_plat(dip);
1683 
1684 	pcie_capture_speeds(dip);
1685 
1686 final_done:
1687 
1688 	PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
1689 	    ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf,
1690 	    bus_p->bus_bdg_secbus);
1691 #ifdef DEBUG
1692 	if (bus_p != NULL) {
1693 		pcie_print_bus(bus_p);
1694 	}
1695 #endif
1696 
1697 	return (bus_p);
1698 }
1699 
1700 /*
1701  * Invoked before destroying devinfo node, mostly during hotplug
1702  * operation to free pcie_bus_t data structure
1703  */
1704 /* ARGSUSED */
1705 void
1706 pcie_fini_bus(dev_info_t *dip, uint8_t flags)
1707 {
1708 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
1709 	ASSERT(bus_p);
1710 
1711 	if (flags & PCIE_BUS_INITIAL) {
1712 		pcie_fini_plat(dip);
1713 		pcie_fini_pfd(dip);
1714 
1715 		kmem_free(bus_p->bus_assigned_addr,
1716 		    (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries));
1717 		kmem_free(bus_p->bus_addr_ranges,
1718 		    (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries));
1719 		/* zero out the fields that have been destroyed */
1720 		bus_p->bus_assigned_addr = NULL;
1721 		bus_p->bus_addr_ranges = NULL;
1722 		bus_p->bus_assigned_entries = 0;
1723 		bus_p->bus_addr_entries = 0;
1724 	}
1725 
1726 	if (flags & PCIE_BUS_FINAL) {
1727 		if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
1728 			(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1729 			    "hotplug-capable");
1730 		}
1731 
1732 		ndi_set_bus_private(dip, B_TRUE, 0, NULL);
1733 		kmem_free(bus_p, sizeof (pcie_bus_t));
1734 	}
1735 }
1736 
1737 int
1738 pcie_postattach_child(dev_info_t *cdip)
1739 {
1740 	pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
1741 
1742 	if (!bus_p)
1743 		return (DDI_FAILURE);
1744 
1745 	return (pcie_enable_ce(cdip));
1746 }
1747 
1748 /*
1749  * PCI-Express child device de-initialization.
1750  * This function disables generic pci-express interrupts and error
1751  * handling.
1752  */
1753 void
1754 pcie_uninitchild(dev_info_t *cdip)
1755 {
1756 	pcie_disable_errors(cdip);
1757 	pcie_fini_cfghdl(cdip);
1758 	pcie_fini_dom(cdip);
1759 }
1760 
1761 /*
1762  * find the root complex dip
1763  */
1764 dev_info_t *
1765 pcie_get_rc_dip(dev_info_t *dip)
1766 {
1767 	dev_info_t *rcdip;
1768 	pcie_bus_t *rc_bus_p;
1769 
1770 	for (rcdip = ddi_get_parent(dip); rcdip;
1771 	    rcdip = ddi_get_parent(rcdip)) {
1772 		rc_bus_p = PCIE_DIP2BUS(rcdip);
1773 		if (rc_bus_p && PCIE_IS_RC(rc_bus_p))
1774 			break;
1775 	}
1776 
1777 	return (rcdip);
1778 }
1779 
1780 boolean_t
1781 pcie_is_pci_device(dev_info_t *dip)
1782 {
1783 	dev_info_t	*pdip;
1784 	char		*device_type;
1785 
1786 	pdip = ddi_get_parent(dip);
1787 	if (pdip == NULL)
1788 		return (B_FALSE);
1789 
1790 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
1791 	    "device_type", &device_type) != DDI_PROP_SUCCESS)
1792 		return (B_FALSE);
1793 
1794 	if (strcmp(device_type, "pciex") != 0 &&
1795 	    strcmp(device_type, "pci") != 0) {
1796 		ddi_prop_free(device_type);
1797 		return (B_FALSE);
1798 	}
1799 
1800 	ddi_prop_free(device_type);
1801 	return (B_TRUE);
1802 }
1803 
1804 typedef struct {
1805 	boolean_t	init;
1806 	uint8_t		flags;
1807 } pcie_bus_arg_t;
1808 
1809 /*ARGSUSED*/
1810 static int
1811 pcie_fab_do_init_fini(dev_info_t *dip, void *arg)
1812 {
1813 	pcie_req_id_t	bdf;
1814 	pcie_bus_arg_t	*bus_arg = (pcie_bus_arg_t *)arg;
1815 
1816 	if (!pcie_is_pci_device(dip))
1817 		goto out;
1818 
1819 	if (bus_arg->init) {
1820 		if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS)
1821 			goto out;
1822 
1823 		(void) pcie_init_bus(dip, bdf, bus_arg->flags);
1824 	} else {
1825 		(void) pcie_fini_bus(dip, bus_arg->flags);
1826 	}
1827 
1828 	return (DDI_WALK_CONTINUE);
1829 
1830 out:
1831 	return (DDI_WALK_PRUNECHILD);
1832 }
1833 
1834 void
1835 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags)
1836 {
1837 	int		circular_count;
1838 	dev_info_t	*dip = ddi_get_child(rcdip);
1839 	pcie_bus_arg_t	arg;
1840 
1841 	arg.init = B_TRUE;
1842 	arg.flags = flags;
1843 
1844 	ndi_devi_enter(rcdip, &circular_count);
1845 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1846 	ndi_devi_exit(rcdip, circular_count);
1847 }
1848 
1849 void
1850 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags)
1851 {
1852 	int		circular_count;
1853 	dev_info_t	*dip = ddi_get_child(rcdip);
1854 	pcie_bus_arg_t	arg;
1855 
1856 	arg.init = B_FALSE;
1857 	arg.flags = flags;
1858 
1859 	ndi_devi_enter(rcdip, &circular_count);
1860 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1861 	ndi_devi_exit(rcdip, circular_count);
1862 }
1863 
1864 void
1865 pcie_enable_errors(dev_info_t *dip)
1866 {
1867 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1868 	uint16_t	reg16, tmp16;
1869 	uint32_t	reg32, tmp32;
1870 
1871 	ASSERT(bus_p);
1872 
1873 	/*
1874 	 * Clear any pending errors
1875 	 */
1876 	pcie_clear_errors(dip);
1877 
1878 	if (!PCIE_IS_PCIE(bus_p))
1879 		return;
1880 
1881 	/*
1882 	 * Enable Baseline Error Handling but leave CE reporting off (poweron
1883 	 * default).
1884 	 */
1885 	if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) !=
1886 	    PCI_CAP_EINVAL16) {
1887 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1888 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1889 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1890 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1891 		    (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN));
1892 
1893 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
1894 		PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
1895 	}
1896 
1897 	/* Enable Root Port Baseline Error Receiving */
1898 	if (PCIE_IS_ROOT(bus_p) &&
1899 	    (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) !=
1900 	    PCI_CAP_EINVAL16) {
1901 
1902 		tmp16 = pcie_serr_disable_flag ?
1903 		    (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) :
1904 		    pcie_root_ctrl_default;
1905 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16);
1906 		PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL,
1907 		    reg16);
1908 	}
1909 
1910 	/*
1911 	 * Enable PCI-Express Advanced Error Handling if Exists
1912 	 */
1913 	if (!PCIE_HAS_AER(bus_p))
1914 		return;
1915 
1916 	/* Set Uncorrectable Severity */
1917 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) !=
1918 	    PCI_CAP_EINVAL32) {
1919 		tmp32 = pcie_aer_uce_severity;
1920 
1921 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32);
1922 		PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV,
1923 		    reg32);
1924 	}
1925 
1926 	/* Enable Uncorrectable errors */
1927 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) !=
1928 	    PCI_CAP_EINVAL32) {
1929 		tmp32 = pcie_aer_uce_mask;
1930 
1931 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32);
1932 		PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK,
1933 		    reg32);
1934 	}
1935 
1936 	/* Enable ECRC generation and checking */
1937 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1938 	    PCI_CAP_EINVAL32) {
1939 		tmp32 = reg32 | pcie_ecrc_value;
1940 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32);
1941 		PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32);
1942 	}
1943 
1944 	/* Enable Secondary Uncorrectable errors if this is a bridge */
1945 	if (!PCIE_IS_PCIE_BDG(bus_p))
1946 		goto root;
1947 
1948 	/* Set Uncorrectable Severity */
1949 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) !=
1950 	    PCI_CAP_EINVAL32) {
1951 		tmp32 = pcie_aer_suce_severity;
1952 
1953 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32);
1954 		PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV,
1955 		    reg32);
1956 	}
1957 
1958 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) !=
1959 	    PCI_CAP_EINVAL32) {
1960 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask);
1961 		PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32,
1962 		    PCIE_AER_SUCE_MASK, reg32);
1963 	}
1964 
1965 root:
1966 	/*
1967 	 * Enable Root Control this is a Root device
1968 	 */
1969 	if (!PCIE_IS_ROOT(bus_p))
1970 		return;
1971 
1972 	if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1973 	    PCI_CAP_EINVAL16) {
1974 		PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD,
1975 		    pcie_root_error_cmd_default);
1976 		PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16,
1977 		    PCIE_AER_RE_CMD, reg16);
1978 	}
1979 }
1980 
1981 /*
1982  * This function is used for enabling CE reporting and setting the AER CE mask.
1983  * When called from outside the pcie module it should always be preceded by
1984  * a call to pcie_enable_errors.
1985  */
1986 int
1987 pcie_enable_ce(dev_info_t *dip)
1988 {
1989 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1990 	uint16_t	device_sts, device_ctl;
1991 	uint32_t	tmp_pcie_aer_ce_mask;
1992 
1993 	if (!PCIE_IS_PCIE(bus_p))
1994 		return (DDI_SUCCESS);
1995 
1996 	/*
1997 	 * The "pcie_ce_mask" property is used to control both the CE reporting
1998 	 * enable field in the device control register and the AER CE mask. We
1999 	 * leave CE reporting disabled if pcie_ce_mask is set to -1.
2000 	 */
2001 
2002 	tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2003 	    DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask);
2004 
2005 	if (tmp_pcie_aer_ce_mask == (uint32_t)-1) {
2006 		/*
2007 		 * Nothing to do since CE reporting has already been disabled.
2008 		 */
2009 		return (DDI_SUCCESS);
2010 	}
2011 
2012 	if (PCIE_HAS_AER(bus_p)) {
2013 		/* Enable AER CE */
2014 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask);
2015 		PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK,
2016 		    0);
2017 
2018 		/* Clear any pending AER CE errors */
2019 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1);
2020 	}
2021 
2022 	/* clear any pending CE errors */
2023 	if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) !=
2024 	    PCI_CAP_EINVAL16)
2025 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS,
2026 		    device_sts & (~PCIE_DEVSTS_CE_DETECTED));
2027 
2028 	/* Enable CE reporting */
2029 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
2030 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL,
2031 	    (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default);
2032 	PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl);
2033 
2034 	return (DDI_SUCCESS);
2035 }
2036 
2037 /* ARGSUSED */
2038 void
2039 pcie_disable_errors(dev_info_t *dip)
2040 {
2041 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
2042 	uint16_t	device_ctl;
2043 	uint32_t	aer_reg;
2044 
2045 	if (!PCIE_IS_PCIE(bus_p))
2046 		return;
2047 
2048 	/*
2049 	 * Disable PCI-Express Baseline Error Handling
2050 	 */
2051 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
2052 	device_ctl &= ~PCIE_DEVCTL_ERR_MASK;
2053 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl);
2054 
2055 	/*
2056 	 * Disable PCI-Express Advanced Error Handling if Exists
2057 	 */
2058 	if (!PCIE_HAS_AER(bus_p))
2059 		goto root;
2060 
2061 	/* Disable Uncorrectable errors */
2062 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS);
2063 
2064 	/* Disable Correctable errors */
2065 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS);
2066 
2067 	/* Disable ECRC generation and checking */
2068 	if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
2069 	    PCI_CAP_EINVAL32) {
2070 		aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
2071 		    PCIE_AER_CTL_ECRC_CHECK_ENA);
2072 
2073 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg);
2074 	}
2075 	/*
2076 	 * Disable Secondary Uncorrectable errors if this is a bridge
2077 	 */
2078 	if (!PCIE_IS_PCIE_BDG(bus_p))
2079 		goto root;
2080 
2081 	PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS);
2082 
2083 root:
2084 	/*
2085 	 * disable Root Control this is a Root device
2086 	 */
2087 	if (!PCIE_IS_ROOT(bus_p))
2088 		return;
2089 
2090 	if (!pcie_serr_disable_flag) {
2091 		device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL);
2092 		device_ctl &= ~PCIE_ROOT_SYS_ERR;
2093 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl);
2094 	}
2095 
2096 	if (!PCIE_HAS_AER(bus_p))
2097 		return;
2098 
2099 	if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
2100 	    PCI_CAP_EINVAL16) {
2101 		device_ctl &= ~pcie_root_error_cmd_default;
2102 		PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl);
2103 	}
2104 }
2105 
2106 /*
2107  * Extract bdf from "reg" property.
2108  */
2109 int
2110 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf)
2111 {
2112 	pci_regspec_t	*regspec;
2113 	int		reglen;
2114 
2115 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2116 	    "reg", (int **)&regspec, (uint_t *)&reglen) != DDI_SUCCESS)
2117 		return (DDI_FAILURE);
2118 
2119 	if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
2120 		ddi_prop_free(regspec);
2121 		return (DDI_FAILURE);
2122 	}
2123 
2124 	/* Get phys_hi from first element.  All have same bdf. */
2125 	*bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8;
2126 
2127 	ddi_prop_free(regspec);
2128 	return (DDI_SUCCESS);
2129 }
2130 
2131 dev_info_t *
2132 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
2133 {
2134 	dev_info_t *cdip = rdip;
2135 
2136 	for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
2137 		;
2138 
2139 	return (cdip);
2140 }
2141 
2142 uint32_t
2143 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip)
2144 {
2145 	dev_info_t *cdip;
2146 
2147 	/*
2148 	 * As part of the probing, the PCI fcode interpreter may setup a DMA
2149 	 * request if a given card has a fcode on it using dip and rdip of the
2150 	 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this
2151 	 * case, return a invalid value for the bdf since we cannot get to the
2152 	 * bdf value of the actual device which will be initiating this DMA.
2153 	 */
2154 	if (rdip == dip)
2155 		return (PCIE_INVALID_BDF);
2156 
2157 	cdip = pcie_get_my_childs_dip(dip, rdip);
2158 
2159 	/*
2160 	 * For a given rdip, return the bdf value of dip's (px or pcieb)
2161 	 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
2162 	 *
2163 	 * XXX - For now, return a invalid bdf value for all PCI and PCI-X
2164 	 * devices since this needs more work.
2165 	 */
2166 	return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
2167 	    PCIE_INVALID_BDF : PCI_GET_BDF(cdip));
2168 }
2169 
2170 uint32_t
2171 pcie_get_aer_uce_mask()
2172 {
2173 	return (pcie_aer_uce_mask);
2174 }
2175 uint32_t
2176 pcie_get_aer_ce_mask()
2177 {
2178 	return (pcie_aer_ce_mask);
2179 }
2180 uint32_t
2181 pcie_get_aer_suce_mask()
2182 {
2183 	return (pcie_aer_suce_mask);
2184 }
2185 uint32_t
2186 pcie_get_serr_mask()
2187 {
2188 	return (pcie_serr_disable_flag);
2189 }
2190 
2191 void
2192 pcie_set_aer_uce_mask(uint32_t mask)
2193 {
2194 	pcie_aer_uce_mask = mask;
2195 	if (mask & PCIE_AER_UCE_UR)
2196 		pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN;
2197 	else
2198 		pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN;
2199 
2200 	if (mask & PCIE_AER_UCE_ECRC)
2201 		pcie_ecrc_value = 0;
2202 }
2203 
2204 void
2205 pcie_set_aer_ce_mask(uint32_t mask)
2206 {
2207 	pcie_aer_ce_mask = mask;
2208 }
2209 void
2210 pcie_set_aer_suce_mask(uint32_t mask)
2211 {
2212 	pcie_aer_suce_mask = mask;
2213 }
2214 void
2215 pcie_set_serr_mask(uint32_t mask)
2216 {
2217 	pcie_serr_disable_flag = mask;
2218 }
2219 
2220 /*
2221  * Is the rdip a child of dip.	Used for checking certain CTLOPS from bubbling
2222  * up erronously.  Ex.	ISA ctlops to a PCI-PCI Bridge.
2223  */
2224 boolean_t
2225 pcie_is_child(dev_info_t *dip, dev_info_t *rdip)
2226 {
2227 	dev_info_t	*cdip = ddi_get_child(dip);
2228 	for (; cdip; cdip = ddi_get_next_sibling(cdip))
2229 		if (cdip == rdip)
2230 			break;
2231 	return (cdip != NULL);
2232 }
2233 
2234 boolean_t
2235 pcie_is_link_disabled(dev_info_t *dip)
2236 {
2237 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2238 
2239 	if (PCIE_IS_PCIE(bus_p)) {
2240 		if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) &
2241 		    PCIE_LINKCTL_LINK_DISABLE)
2242 			return (B_TRUE);
2243 	}
2244 	return (B_FALSE);
2245 }
2246 
2247 /*
2248  * Initialize the MPS for a root port.
2249  *
2250  * dip - dip of root port device.
2251  */
2252 void
2253 pcie_init_root_port_mps(dev_info_t *dip)
2254 {
2255 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
2256 	int rp_cap, max_supported = pcie_max_mps;
2257 
2258 	(void) pcie_get_fabric_mps(ddi_get_parent(dip),
2259 	    ddi_get_child(dip), &max_supported);
2260 
2261 	rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, 0,
2262 	    bus_p->bus_pcie_off, PCIE_DEVCAP) &
2263 	    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2264 
2265 	if (rp_cap < max_supported)
2266 		max_supported = rp_cap;
2267 
2268 	bus_p->bus_mps = max_supported;
2269 	(void) pcie_initchild_mps(dip);
2270 }
2271 
2272 /*
2273  * Initialize the Maximum Payload Size of a device.
2274  *
2275  * cdip - dip of device.
2276  *
2277  * returns - DDI_SUCCESS or DDI_FAILURE
2278  */
2279 int
2280 pcie_initchild_mps(dev_info_t *cdip)
2281 {
2282 	pcie_bus_t	*bus_p;
2283 	dev_info_t	*pdip = ddi_get_parent(cdip);
2284 	uint8_t		dev_type;
2285 
2286 	bus_p = PCIE_DIP2BUS(cdip);
2287 	if (bus_p == NULL) {
2288 		PCIE_DBG("%s: BUS not found.\n",
2289 		    ddi_driver_name(cdip));
2290 		return (DDI_FAILURE);
2291 	}
2292 
2293 	dev_type = bus_p->bus_dev_type;
2294 
2295 	/*
2296 	 * For ARI Devices, only function zero's MPS needs to be set.
2297 	 */
2298 	if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
2299 	    (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) {
2300 		pcie_req_id_t child_bdf;
2301 
2302 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2303 			return (DDI_FAILURE);
2304 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
2305 			return (DDI_SUCCESS);
2306 	}
2307 
2308 	if (PCIE_IS_PCIE(bus_p)) {
2309 		int suggested_mrrs, fabric_mps;
2310 		uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl;
2311 
2312 		dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
2313 		if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p :
2314 		    PCIE_DIP2BUS(pdip))->bus_mps) < 0) {
2315 			dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2316 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
2317 			    (pcie_devctl_default &
2318 			    (PCIE_DEVCTL_MAX_READ_REQ_MASK |
2319 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
2320 
2321 			PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2322 			return (DDI_SUCCESS);
2323 		}
2324 
2325 		device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
2326 		    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2327 
2328 		device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >>
2329 		    PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
2330 
2331 		if (device_mps_cap < fabric_mps)
2332 			device_mrrs = device_mps = device_mps_cap;
2333 		else
2334 			device_mps = (uint16_t)fabric_mps;
2335 
2336 		suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
2337 		    cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs);
2338 
2339 		if ((device_mps == fabric_mps) ||
2340 		    (suggested_mrrs < device_mrrs))
2341 			device_mrrs = (uint16_t)suggested_mrrs;
2342 
2343 		/*
2344 		 * Replace MPS and MRRS settings.
2345 		 */
2346 		dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2347 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK);
2348 
2349 		dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) |
2350 		    device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT);
2351 
2352 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2353 
2354 		bus_p->bus_mps = device_mps;
2355 	}
2356 
2357 	return (DDI_SUCCESS);
2358 }
2359 
2360 /*
2361  * Scans a device tree/branch for a maximum payload size capabilities.
2362  *
2363  * rc_dip - dip of Root Complex.
2364  * dip - dip of device where scan will begin.
2365  * max_supported (IN) - maximum allowable MPS.
2366  * max_supported (OUT) - maximum payload size capability of fabric.
2367  */
2368 void
2369 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2370 {
2371 	if (dip == NULL)
2372 		return;
2373 
2374 	/*
2375 	 * Perform a fabric scan to obtain Maximum Payload Capabilities
2376 	 */
2377 	(void) pcie_scan_mps(rc_dip, dip, max_supported);
2378 
2379 	PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported);
2380 }
2381 
2382 /*
2383  * Scans fabric and determines Maximum Payload Size based on
2384  * highest common denominator alogorithm
2385  */
2386 static void
2387 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2388 {
2389 	int circular_count;
2390 	pcie_max_supported_t max_pay_load_supported;
2391 
2392 	max_pay_load_supported.dip = rc_dip;
2393 	max_pay_load_supported.highest_common_mps = *max_supported;
2394 
2395 	ndi_devi_enter(ddi_get_parent(dip), &circular_count);
2396 	ddi_walk_devs(dip, pcie_get_max_supported,
2397 	    (void *)&max_pay_load_supported);
2398 	ndi_devi_exit(ddi_get_parent(dip), circular_count);
2399 
2400 	*max_supported = max_pay_load_supported.highest_common_mps;
2401 }
2402 
2403 /*
2404  * Called as part of the Maximum Payload Size scan.
2405  */
2406 static int
2407 pcie_get_max_supported(dev_info_t *dip, void *arg)
2408 {
2409 	uint32_t max_supported;
2410 	uint16_t cap_ptr;
2411 	pcie_max_supported_t *current = (pcie_max_supported_t *)arg;
2412 	pci_regspec_t *reg;
2413 	int rlen;
2414 	caddr_t virt;
2415 	ddi_acc_handle_t config_handle;
2416 
2417 	if (ddi_get_child(current->dip) == NULL) {
2418 		goto fail1;
2419 	}
2420 
2421 	if (pcie_dev(dip) == DDI_FAILURE) {
2422 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2423 		    "Not a PCIe dev\n", ddi_driver_name(dip));
2424 		goto fail1;
2425 	}
2426 
2427 	/*
2428 	 * If the suggested-mrrs property exists, then don't include this
2429 	 * device in the MPS capabilities scan.
2430 	 */
2431 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2432 	    "suggested-mrrs") != 0)
2433 		goto fail1;
2434 
2435 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
2436 	    (caddr_t)&reg, &rlen) != DDI_PROP_SUCCESS) {
2437 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2438 		    "Can not read reg\n", ddi_driver_name(dip));
2439 		goto fail1;
2440 	}
2441 
2442 	if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt,
2443 	    &config_handle) != DDI_SUCCESS) {
2444 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  pcie_map_phys "
2445 		    "failed\n", ddi_driver_name(dip));
2446 		goto fail2;
2447 	}
2448 
2449 	if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) ==
2450 	    DDI_FAILURE) {
2451 		goto fail3;
2452 	}
2453 
2454 	max_supported = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2455 	    PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2456 
2457 	PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip),
2458 	    max_supported);
2459 
2460 	if (max_supported < current->highest_common_mps)
2461 		current->highest_common_mps = max_supported;
2462 
2463 fail3:
2464 	pcie_unmap_phys(&config_handle, reg);
2465 fail2:
2466 	kmem_free(reg, rlen);
2467 fail1:
2468 	return (DDI_WALK_CONTINUE);
2469 }
2470 
2471 /*
2472  * Determines if there are any root ports attached to a root complex.
2473  *
2474  * dip - dip of root complex
2475  *
2476  * Returns - DDI_SUCCESS if there is at least one root port otherwise
2477  *	     DDI_FAILURE.
2478  */
2479 int
2480 pcie_root_port(dev_info_t *dip)
2481 {
2482 	int port_type;
2483 	uint16_t cap_ptr;
2484 	ddi_acc_handle_t config_handle;
2485 	dev_info_t *cdip = ddi_get_child(dip);
2486 
2487 	/*
2488 	 * Determine if any of the children of the passed in dip
2489 	 * are root ports.
2490 	 */
2491 	for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
2492 
2493 		if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS)
2494 			continue;
2495 
2496 		if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E,
2497 		    &cap_ptr)) == DDI_FAILURE) {
2498 			pci_config_teardown(&config_handle);
2499 			continue;
2500 		}
2501 
2502 		port_type = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2503 		    PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
2504 
2505 		pci_config_teardown(&config_handle);
2506 
2507 		if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
2508 			return (DDI_SUCCESS);
2509 	}
2510 
2511 	/* No root ports were found */
2512 
2513 	return (DDI_FAILURE);
2514 }
2515 
2516 /*
2517  * Function that determines if a device a PCIe device.
2518  *
2519  * dip - dip of device.
2520  *
2521  * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
2522  */
2523 int
2524 pcie_dev(dev_info_t *dip)
2525 {
2526 	/* get parent device's device_type property */
2527 	char *device_type;
2528 	int rc = DDI_FAILURE;
2529 	dev_info_t *pdip = ddi_get_parent(dip);
2530 
2531 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
2532 	    DDI_PROP_DONTPASS, "device_type", &device_type)
2533 	    != DDI_PROP_SUCCESS) {
2534 		return (DDI_FAILURE);
2535 	}
2536 
2537 	if (strcmp(device_type, "pciex") == 0)
2538 		rc = DDI_SUCCESS;
2539 	else
2540 		rc = DDI_FAILURE;
2541 
2542 	ddi_prop_free(device_type);
2543 	return (rc);
2544 }
2545 
2546 /*
2547  * Function to map in a device's memory space.
2548  */
2549 static int
2550 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
2551     caddr_t *addrp, ddi_acc_handle_t *handlep)
2552 {
2553 	ddi_map_req_t mr;
2554 	ddi_acc_hdl_t *hp;
2555 	int result;
2556 	ddi_device_acc_attr_t attr;
2557 
2558 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2559 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2560 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2561 	attr.devacc_attr_access = DDI_CAUTIOUS_ACC;
2562 
2563 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
2564 	hp = impl_acc_hdl_get(*handlep);
2565 	hp->ah_vers = VERS_ACCHDL;
2566 	hp->ah_dip = dip;
2567 	hp->ah_rnumber = 0;
2568 	hp->ah_offset = 0;
2569 	hp->ah_len = 0;
2570 	hp->ah_acc = attr;
2571 
2572 	mr.map_op = DDI_MO_MAP_LOCKED;
2573 	mr.map_type = DDI_MT_REGSPEC;
2574 	mr.map_obj.rp = (struct regspec *)phys_spec;
2575 	mr.map_prot = PROT_READ | PROT_WRITE;
2576 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2577 	mr.map_handlep = hp;
2578 	mr.map_vers = DDI_MAP_VERSION;
2579 
2580 	result = ddi_map(dip, &mr, 0, 0, addrp);
2581 
2582 	if (result != DDI_SUCCESS) {
2583 		impl_acc_hdl_free(*handlep);
2584 		*handlep = (ddi_acc_handle_t)NULL;
2585 	} else {
2586 		hp->ah_addr = *addrp;
2587 	}
2588 
2589 	return (result);
2590 }
2591 
2592 /*
2593  * Map out memory that was mapped in with pcie_map_phys();
2594  */
2595 static void
2596 pcie_unmap_phys(ddi_acc_handle_t *handlep,  pci_regspec_t *ph)
2597 {
2598 	ddi_map_req_t mr;
2599 	ddi_acc_hdl_t *hp;
2600 
2601 	hp = impl_acc_hdl_get(*handlep);
2602 	ASSERT(hp);
2603 
2604 	mr.map_op = DDI_MO_UNMAP;
2605 	mr.map_type = DDI_MT_REGSPEC;
2606 	mr.map_obj.rp = (struct regspec *)ph;
2607 	mr.map_prot = PROT_READ | PROT_WRITE;
2608 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2609 	mr.map_handlep = hp;
2610 	mr.map_vers = DDI_MAP_VERSION;
2611 
2612 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
2613 	    hp->ah_len, &hp->ah_addr);
2614 
2615 	impl_acc_hdl_free(*handlep);
2616 	*handlep = (ddi_acc_handle_t)NULL;
2617 }
2618 
2619 void
2620 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val)
2621 {
2622 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2623 	bus_p->bus_pfd->pe_rber_fatal = val;
2624 }
2625 
2626 /*
2627  * Return parent Root Port's pe_rber_fatal value.
2628  */
2629 boolean_t
2630 pcie_get_rber_fatal(dev_info_t *dip)
2631 {
2632 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2633 	pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip);
2634 	return (rp_bus_p->bus_pfd->pe_rber_fatal);
2635 }
2636 
2637 int
2638 pcie_ari_supported(dev_info_t *dip)
2639 {
2640 	uint32_t devcap2;
2641 	uint16_t pciecap;
2642 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2643 	uint8_t dev_type;
2644 
2645 	PCIE_DBG("pcie_ari_supported: dip=%p\n", dip);
2646 
2647 	if (bus_p == NULL)
2648 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2649 
2650 	dev_type = bus_p->bus_dev_type;
2651 
2652 	if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
2653 	    (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT))
2654 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2655 
2656 	if (pcie_disable_ari) {
2657 		PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip);
2658 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2659 	}
2660 
2661 	pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
2662 
2663 	if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) {
2664 		PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip);
2665 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2666 	}
2667 
2668 	devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2);
2669 
2670 	PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
2671 	    dip, devcap2);
2672 
2673 	if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
2674 		PCIE_DBG("pcie_ari_supported: "
2675 		    "dip=%p: ARI Forwarding is supported\n", dip);
2676 		return (PCIE_ARI_FORW_SUPPORTED);
2677 	}
2678 	return (PCIE_ARI_FORW_NOT_SUPPORTED);
2679 }
2680 
2681 int
2682 pcie_ari_enable(dev_info_t *dip)
2683 {
2684 	uint16_t devctl2;
2685 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2686 
2687 	PCIE_DBG("pcie_ari_enable: dip=%p\n", dip);
2688 
2689 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2690 		return (DDI_FAILURE);
2691 
2692 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2693 	devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN;
2694 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2695 
2696 	PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
2697 	    dip, devctl2);
2698 
2699 	return (DDI_SUCCESS);
2700 }
2701 
2702 int
2703 pcie_ari_disable(dev_info_t *dip)
2704 {
2705 	uint16_t devctl2;
2706 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2707 
2708 	PCIE_DBG("pcie_ari_disable: dip=%p\n", dip);
2709 
2710 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2711 		return (DDI_FAILURE);
2712 
2713 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2714 	devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN;
2715 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2716 
2717 	PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
2718 	    dip, devctl2);
2719 
2720 	return (DDI_SUCCESS);
2721 }
2722 
2723 int
2724 pcie_ari_is_enabled(dev_info_t *dip)
2725 {
2726 	uint16_t devctl2;
2727 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2728 
2729 	PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip);
2730 
2731 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2732 		return (PCIE_ARI_FORW_DISABLED);
2733 
2734 	devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2);
2735 
2736 	PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
2737 	    dip, devctl2);
2738 
2739 	if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
2740 		PCIE_DBG("pcie_ari_is_enabled: "
2741 		    "dip=%p: ARI Forwarding is enabled\n", dip);
2742 		return (PCIE_ARI_FORW_ENABLED);
2743 	}
2744 
2745 	return (PCIE_ARI_FORW_DISABLED);
2746 }
2747 
2748 int
2749 pcie_ari_device(dev_info_t *dip)
2750 {
2751 	ddi_acc_handle_t handle;
2752 	uint16_t cap_ptr;
2753 
2754 	PCIE_DBG("pcie_ari_device: dip=%p\n", dip);
2755 
2756 	/*
2757 	 * XXX - This function may be called before the bus_p structure
2758 	 * has been populated.  This code can be changed to remove
2759 	 * pci_config_setup()/pci_config_teardown() when the RFE
2760 	 * to populate the bus_p structures early in boot is putback.
2761 	 */
2762 
2763 	/* First make sure it is a PCIe device */
2764 
2765 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2766 		return (PCIE_NOT_ARI_DEVICE);
2767 
2768 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
2769 	    != DDI_SUCCESS) {
2770 		pci_config_teardown(&handle);
2771 		return (PCIE_NOT_ARI_DEVICE);
2772 	}
2773 
2774 	/* Locate the ARI Capability */
2775 
2776 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI),
2777 	    &cap_ptr)) == DDI_FAILURE) {
2778 		pci_config_teardown(&handle);
2779 		return (PCIE_NOT_ARI_DEVICE);
2780 	}
2781 
2782 	/* ARI Capability was found so it must be a ARI device */
2783 	PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip);
2784 
2785 	pci_config_teardown(&handle);
2786 	return (PCIE_ARI_DEVICE);
2787 }
2788 
2789 int
2790 pcie_ari_get_next_function(dev_info_t *dip, int *func)
2791 {
2792 	uint32_t val;
2793 	uint16_t cap_ptr, next_function;
2794 	ddi_acc_handle_t handle;
2795 
2796 	/*
2797 	 * XXX - This function may be called before the bus_p structure
2798 	 * has been populated.  This code can be changed to remove
2799 	 * pci_config_setup()/pci_config_teardown() when the RFE
2800 	 * to populate the bus_p structures early in boot is putback.
2801 	 */
2802 
2803 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2804 		return (DDI_FAILURE);
2805 
2806 	if ((PCI_CAP_LOCATE(handle,
2807 	    PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) {
2808 		pci_config_teardown(&handle);
2809 		return (DDI_FAILURE);
2810 	}
2811 
2812 	val = PCI_CAP_GET32(handle, 0, cap_ptr, PCIE_ARI_CAP);
2813 
2814 	next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) &
2815 	    PCIE_ARI_CAP_NEXT_FUNC_MASK;
2816 
2817 	pci_config_teardown(&handle);
2818 
2819 	*func = next_function;
2820 
2821 	return (DDI_SUCCESS);
2822 }
2823 
2824 dev_info_t *
2825 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function)
2826 {
2827 	pcie_req_id_t child_bdf;
2828 	dev_info_t *cdip;
2829 
2830 	for (cdip = ddi_get_child(dip); cdip;
2831 	    cdip = ddi_get_next_sibling(cdip)) {
2832 
2833 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2834 			return (NULL);
2835 
2836 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function)
2837 			return (cdip);
2838 	}
2839 	return (NULL);
2840 }
2841 
2842 #ifdef	DEBUG
2843 
2844 static void
2845 pcie_print_bus(pcie_bus_t *bus_p)
2846 {
2847 	pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip);
2848 	pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags);
2849 
2850 	pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf);
2851 	pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id);
2852 	pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id);
2853 	pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type);
2854 	pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type);
2855 	pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus);
2856 	pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off);
2857 	pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off);
2858 	pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off);
2859 	pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver);
2860 }
2861 
2862 /*
2863  * For debugging purposes set pcie_dbg_print != 0 to see printf messages
2864  * during interrupt.
2865  *
2866  * When a proper solution is in place this code will disappear.
2867  * Potential solutions are:
2868  * o circular buffers
2869  * o taskq to print at lower pil
2870  */
2871 int pcie_dbg_print = 0;
2872 void
2873 pcie_dbg(char *fmt, ...)
2874 {
2875 	va_list ap;
2876 
2877 	if (!pcie_debug_flags) {
2878 		return;
2879 	}
2880 	va_start(ap, fmt);
2881 	if (servicing_interrupt()) {
2882 		if (pcie_dbg_print) {
2883 			prom_vprintf(fmt, ap);
2884 		}
2885 	} else {
2886 		prom_vprintf(fmt, ap);
2887 	}
2888 	va_end(ap);
2889 }
2890 #endif	/* DEBUG */
2891 
2892 #if defined(__i386) || defined(__amd64)
2893 static void
2894 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range,
2895     boolean_t *empty_mem_range)
2896 {
2897 	uint8_t	class, subclass;
2898 	uint_t	val;
2899 
2900 	class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
2901 	subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);
2902 
2903 	if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) {
2904 		val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) &
2905 		    PCI_BCNF_IO_MASK) << 8);
2906 		/*
2907 		 * Assuming that a zero based io_range[0] implies an
2908 		 * invalid I/O range.  Likewise for mem_range[0].
2909 		 */
2910 		if (val == 0)
2911 			*empty_io_range = B_TRUE;
2912 		val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) &
2913 		    PCI_BCNF_MEM_MASK) << 16);
2914 		if (val == 0)
2915 			*empty_mem_range = B_TRUE;
2916 	}
2917 }
2918 
2919 #endif /* defined(__i386) || defined(__amd64) */
2920 
2921 boolean_t
2922 pcie_link_bw_supported(dev_info_t *dip)
2923 {
2924 	uint32_t linkcap;
2925 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2926 
2927 	if (!PCIE_IS_PCIE(bus_p)) {
2928 		return (B_FALSE);
2929 	}
2930 
2931 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
2932 		return (B_FALSE);
2933 	}
2934 
2935 	linkcap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
2936 	return ((linkcap & PCIE_LINKCAP_LINK_BW_NOTIFY_CAP) != 0);
2937 }
2938 
2939 int
2940 pcie_link_bw_enable(dev_info_t *dip)
2941 {
2942 	uint16_t linkctl;
2943 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2944 
2945 	if (pcie_disable_lbw != 0) {
2946 		return (DDI_FAILURE);
2947 	}
2948 
2949 	if (!pcie_link_bw_supported(dip)) {
2950 		return (DDI_FAILURE);
2951 	}
2952 
2953 	mutex_init(&bus_p->bus_lbw_mutex, NULL, MUTEX_DRIVER, NULL);
2954 	cv_init(&bus_p->bus_lbw_cv, NULL, CV_DRIVER, NULL);
2955 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
2956 	linkctl |= PCIE_LINKCTL_LINK_BW_INTR_EN;
2957 	linkctl |= PCIE_LINKCTL_LINK_AUTO_BW_INTR_EN;
2958 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, linkctl);
2959 
2960 	bus_p->bus_lbw_pbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2961 	bus_p->bus_lbw_cbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2962 	bus_p->bus_lbw_state |= PCIE_LBW_S_ENABLED;
2963 
2964 	return (DDI_SUCCESS);
2965 }
2966 
2967 int
2968 pcie_link_bw_disable(dev_info_t *dip)
2969 {
2970 	uint16_t linkctl;
2971 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2972 
2973 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_ENABLED) == 0) {
2974 		return (DDI_FAILURE);
2975 	}
2976 
2977 	mutex_enter(&bus_p->bus_lbw_mutex);
2978 	while ((bus_p->bus_lbw_state &
2979 	    (PCIE_LBW_S_DISPATCHED | PCIE_LBW_S_RUNNING)) != 0) {
2980 		cv_wait(&bus_p->bus_lbw_cv, &bus_p->bus_lbw_mutex);
2981 	}
2982 	mutex_exit(&bus_p->bus_lbw_mutex);
2983 
2984 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
2985 	linkctl &= ~PCIE_LINKCTL_LINK_BW_INTR_EN;
2986 	linkctl &= ~PCIE_LINKCTL_LINK_AUTO_BW_INTR_EN;
2987 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, linkctl);
2988 
2989 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_ENABLED;
2990 	kmem_free(bus_p->bus_lbw_pbuf, MAXPATHLEN);
2991 	kmem_free(bus_p->bus_lbw_cbuf, MAXPATHLEN);
2992 	bus_p->bus_lbw_pbuf = NULL;
2993 	bus_p->bus_lbw_cbuf = NULL;
2994 
2995 	mutex_destroy(&bus_p->bus_lbw_mutex);
2996 	cv_destroy(&bus_p->bus_lbw_cv);
2997 
2998 	return (DDI_SUCCESS);
2999 }
3000 
3001 void
3002 pcie_link_bw_taskq(void *arg)
3003 {
3004 	dev_info_t *dip = arg;
3005 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3006 	dev_info_t *cdip;
3007 	boolean_t again;
3008 	sysevent_t *se;
3009 	sysevent_value_t se_val;
3010 	sysevent_id_t eid;
3011 	sysevent_attr_list_t *ev_attr_list;
3012 	int circular;
3013 
3014 top:
3015 	ndi_devi_enter(dip, &circular);
3016 	se = NULL;
3017 	ev_attr_list = NULL;
3018 	mutex_enter(&bus_p->bus_lbw_mutex);
3019 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_DISPATCHED;
3020 	bus_p->bus_lbw_state |= PCIE_LBW_S_RUNNING;
3021 	mutex_exit(&bus_p->bus_lbw_mutex);
3022 
3023 	/*
3024 	 * Update our own speeds as we've likely changed something.
3025 	 */
3026 	pcie_capture_speeds(dip);
3027 
3028 	/*
3029 	 * Walk our children. We only care about updating this on function 0
3030 	 * because the PCIe specification requires that these all be the same
3031 	 * otherwise.
3032 	 */
3033 	for (cdip = ddi_get_child(dip); cdip != NULL;
3034 	    cdip = ddi_get_next_sibling(cdip)) {
3035 		pcie_bus_t *cbus_p = PCIE_DIP2BUS(cdip);
3036 
3037 		if (cbus_p == NULL) {
3038 			continue;
3039 		}
3040 
3041 		if ((cbus_p->bus_bdf & PCIE_REQ_ID_FUNC_MASK) != 0) {
3042 			continue;
3043 		}
3044 
3045 		/*
3046 		 * It's possible that this can fire while a child is otherwise
3047 		 * only partially constructed. Therefore, if we don't have the
3048 		 * config handle, don't bother updating the child.
3049 		 */
3050 		if (cbus_p->bus_cfg_hdl == NULL) {
3051 			continue;
3052 		}
3053 
3054 		pcie_capture_speeds(cdip);
3055 		break;
3056 	}
3057 
3058 	se = sysevent_alloc(EC_PCIE, ESC_PCIE_LINK_STATE,
3059 	    ILLUMOS_KERN_PUB "pcie", SE_SLEEP);
3060 
3061 	(void) ddi_pathname(dip, bus_p->bus_lbw_pbuf);
3062 	se_val.value_type = SE_DATA_TYPE_STRING;
3063 	se_val.value.sv_string = bus_p->bus_lbw_pbuf;
3064 	if (sysevent_add_attr(&ev_attr_list, PCIE_EV_DETECTOR_PATH, &se_val,
3065 	    SE_SLEEP) != 0) {
3066 		ndi_devi_exit(dip, circular);
3067 		goto err;
3068 	}
3069 
3070 	if (cdip != NULL) {
3071 		(void) ddi_pathname(cdip, bus_p->bus_lbw_cbuf);
3072 
3073 		se_val.value_type = SE_DATA_TYPE_STRING;
3074 		se_val.value.sv_string = bus_p->bus_lbw_cbuf;
3075 
3076 		/*
3077 		 * If this fails, that's OK. We'd rather get the event off and
3078 		 * there's a chance that there may not be anything there for us.
3079 		 */
3080 		(void) sysevent_add_attr(&ev_attr_list, PCIE_EV_CHILD_PATH,
3081 		    &se_val, SE_SLEEP);
3082 	}
3083 
3084 	ndi_devi_exit(dip, circular);
3085 
3086 	/*
3087 	 * Before we generate and send down a sysevent, we need to tell the
3088 	 * system that parts of the devinfo cache need to be invalidated. While
3089 	 * the function below takes several args, it ignores them all. Because
3090 	 * this is a global invalidation, we don't bother trying to do much more
3091 	 * than requesting a global invalidation, lest we accidentally kick off
3092 	 * several in a row.
3093 	 */
3094 	ddi_prop_cache_invalidate(DDI_DEV_T_NONE, NULL, NULL, 0);
3095 
3096 	if (sysevent_attach_attributes(se, ev_attr_list) != 0) {
3097 		goto err;
3098 	}
3099 	ev_attr_list = NULL;
3100 
3101 	if (log_sysevent(se, SE_SLEEP, &eid) != 0) {
3102 		goto err;
3103 	}
3104 
3105 err:
3106 	sysevent_free_attr(ev_attr_list);
3107 	sysevent_free(se);
3108 
3109 	mutex_enter(&bus_p->bus_lbw_mutex);
3110 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_RUNNING;
3111 	cv_broadcast(&bus_p->bus_lbw_cv);
3112 	again = (bus_p->bus_lbw_state & PCIE_LBW_S_DISPATCHED) != 0;
3113 	mutex_exit(&bus_p->bus_lbw_mutex);
3114 
3115 	if (again) {
3116 		goto top;
3117 	}
3118 }
3119 
3120 int
3121 pcie_link_bw_intr(dev_info_t *dip)
3122 {
3123 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3124 	uint16_t linksts;
3125 	uint16_t flags = PCIE_LINKSTS_LINK_BW_MGMT | PCIE_LINKSTS_AUTO_BW;
3126 
3127 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_ENABLED) == 0) {
3128 		return (DDI_INTR_UNCLAIMED);
3129 	}
3130 
3131 	linksts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3132 	if ((linksts & flags) == 0) {
3133 		return (DDI_INTR_UNCLAIMED);
3134 	}
3135 
3136 	/*
3137 	 * Check if we've already dispatched this event. If we have already
3138 	 * dispatched it, then there's nothing else to do, we coalesce multiple
3139 	 * events.
3140 	 */
3141 	mutex_enter(&bus_p->bus_lbw_mutex);
3142 	bus_p->bus_lbw_nevents++;
3143 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_DISPATCHED) == 0) {
3144 		if ((bus_p->bus_lbw_state & PCIE_LBW_S_RUNNING) == 0) {
3145 			taskq_dispatch_ent(pcie_link_tq, pcie_link_bw_taskq,
3146 			    dip, 0, &bus_p->bus_lbw_ent);
3147 		}
3148 
3149 		bus_p->bus_lbw_state |= PCIE_LBW_S_DISPATCHED;
3150 	}
3151 	mutex_exit(&bus_p->bus_lbw_mutex);
3152 
3153 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKSTS, flags);
3154 	return (DDI_INTR_CLAIMED);
3155 }
3156 
3157 int
3158 pcie_link_set_target(dev_info_t *dip, pcie_link_speed_t speed)
3159 {
3160 	uint16_t ctl2, rval;
3161 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3162 
3163 	if (!PCIE_IS_PCIE(bus_p)) {
3164 		return (ENOTSUP);
3165 	}
3166 
3167 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
3168 		return (ENOTSUP);
3169 	}
3170 
3171 	switch (speed) {
3172 	case PCIE_LINK_SPEED_2_5:
3173 		rval = PCIE_LINKCTL2_TARGET_SPEED_2_5;
3174 		break;
3175 	case PCIE_LINK_SPEED_5:
3176 		rval = PCIE_LINKCTL2_TARGET_SPEED_5;
3177 		break;
3178 	case PCIE_LINK_SPEED_8:
3179 		rval = PCIE_LINKCTL2_TARGET_SPEED_8;
3180 		break;
3181 	case PCIE_LINK_SPEED_16:
3182 		rval = PCIE_LINKCTL2_TARGET_SPEED_16;
3183 		break;
3184 	default:
3185 		return (EINVAL);
3186 	}
3187 
3188 	mutex_enter(&bus_p->bus_speed_mutex);
3189 	bus_p->bus_target_speed = speed;
3190 	bus_p->bus_speed_flags |= PCIE_LINK_F_ADMIN_TARGET;
3191 
3192 	ctl2 = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL2);
3193 	ctl2 &= ~PCIE_LINKCTL2_TARGET_SPEED_MASK;
3194 	ctl2 |= rval;
3195 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL2, ctl2);
3196 	mutex_exit(&bus_p->bus_speed_mutex);
3197 
3198 	/*
3199 	 * Make sure our updates have been reflected in devinfo.
3200 	 */
3201 	pcie_capture_speeds(dip);
3202 
3203 	return (0);
3204 }
3205 
3206 int
3207 pcie_link_retrain(dev_info_t *dip)
3208 {
3209 	uint16_t ctl;
3210 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3211 
3212 	if (!PCIE_IS_PCIE(bus_p)) {
3213 		return (ENOTSUP);
3214 	}
3215 
3216 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
3217 		return (ENOTSUP);
3218 	}
3219 
3220 	/*
3221 	 * The PCIe specification suggests that we make sure that the link isn't
3222 	 * in training before issuing this command in case there was a state
3223 	 * machine transition prior to when we got here. We wait and then go
3224 	 * ahead and issue the command anyways.
3225 	 */
3226 	for (uint32_t i = 0; i < pcie_link_retrain_count; i++) {
3227 		uint16_t sts;
3228 
3229 		sts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3230 		if ((sts & PCIE_LINKSTS_LINK_TRAINING) == 0)
3231 			break;
3232 		delay(drv_usectohz(pcie_link_retrain_delay_ms * 1000));
3233 	}
3234 
3235 	ctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
3236 	ctl |= PCIE_LINKCTL_RETRAIN_LINK;
3237 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, ctl);
3238 
3239 	/*
3240 	 * Wait again to see if it clears before returning to the user.
3241 	 */
3242 	for (uint32_t i = 0; i < pcie_link_retrain_count; i++) {
3243 		uint16_t sts;
3244 
3245 		sts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3246 		if ((sts & PCIE_LINKSTS_LINK_TRAINING) == 0)
3247 			break;
3248 		delay(drv_usectohz(pcie_link_retrain_delay_ms * 1000));
3249 	}
3250 
3251 	return (0);
3252 }
3253