xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie.c (revision b666b5bebdde1067e2d0f2b91fd28a5243311a1a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2019 Joyent, Inc.
25  */
26 
27 #include <sys/sysmacros.h>
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/modctl.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/util.h>
36 #include <sys/promif.h>
37 #include <sys/disp.h>
38 #include <sys/stat.h>
39 #include <sys/file.h>
40 #include <sys/pci_cap.h>
41 #include <sys/pci_impl.h>
42 #include <sys/pcie_impl.h>
43 #include <sys/hotplug/pci/pcie_hp.h>
44 #include <sys/hotplug/pci/pciehpc.h>
45 #include <sys/hotplug/pci/pcishpc.h>
46 #include <sys/hotplug/pci/pcicfg.h>
47 #include <sys/pci_cfgacc.h>
48 #include <sys/sysevent.h>
49 #include <sys/sysevent/eventdefs.h>
50 #include <sys/sysevent/pcie.h>
51 
52 /* Local functions prototypes */
53 static void pcie_init_pfd(dev_info_t *);
54 static void pcie_fini_pfd(dev_info_t *);
55 
56 #if defined(__i386) || defined(__amd64)
57 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *);
58 #endif /* defined(__i386) || defined(__amd64) */
59 
60 #ifdef DEBUG
61 uint_t pcie_debug_flags = 0;
62 static void pcie_print_bus(pcie_bus_t *bus_p);
63 void pcie_dbg(char *fmt, ...);
64 #endif /* DEBUG */
65 
66 /* Variable to control default PCI-Express config settings */
67 ushort_t pcie_command_default =
68     PCI_COMM_SERR_ENABLE |
69     PCI_COMM_WAIT_CYC_ENAB |
70     PCI_COMM_PARITY_DETECT |
71     PCI_COMM_ME |
72     PCI_COMM_MAE |
73     PCI_COMM_IO;
74 
75 /* xxx_fw are bits that are controlled by FW and should not be modified */
76 ushort_t pcie_command_default_fw =
77     PCI_COMM_SPEC_CYC |
78     PCI_COMM_MEMWR_INVAL |
79     PCI_COMM_PALETTE_SNOOP |
80     PCI_COMM_WAIT_CYC_ENAB |
81     0xF800; /* Reserved Bits */
82 
83 ushort_t pcie_bdg_command_default_fw =
84     PCI_BCNF_BCNTRL_ISA_ENABLE |
85     PCI_BCNF_BCNTRL_VGA_ENABLE |
86     0xF000; /* Reserved Bits */
87 
88 /* PCI-Express Base error defaults */
89 ushort_t pcie_base_err_default =
90     PCIE_DEVCTL_CE_REPORTING_EN |
91     PCIE_DEVCTL_NFE_REPORTING_EN |
92     PCIE_DEVCTL_FE_REPORTING_EN |
93     PCIE_DEVCTL_UR_REPORTING_EN;
94 
95 /* PCI-Express Device Control Register */
96 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN |
97     PCIE_DEVCTL_MAX_READ_REQ_512;
98 
99 /* PCI-Express AER Root Control Register */
100 #define	PCIE_ROOT_SYS_ERR	(PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
101 				PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \
102 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN)
103 
104 ushort_t pcie_root_ctrl_default =
105     PCIE_ROOTCTL_SYS_ERR_ON_CE_EN |
106     PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
107     PCIE_ROOTCTL_SYS_ERR_ON_FE_EN;
108 
109 /* PCI-Express Root Error Command Register */
110 ushort_t pcie_root_error_cmd_default =
111     PCIE_AER_RE_CMD_CE_REP_EN |
112     PCIE_AER_RE_CMD_NFE_REP_EN |
113     PCIE_AER_RE_CMD_FE_REP_EN;
114 
115 /* ECRC settings in the PCIe AER Control Register */
116 uint32_t pcie_ecrc_value =
117     PCIE_AER_CTL_ECRC_GEN_ENA |
118     PCIE_AER_CTL_ECRC_CHECK_ENA;
119 
120 /*
121  * If a particular platform wants to disable certain errors such as UR/MA,
122  * instead of using #defines have the platform's PCIe Root Complex driver set
123  * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions.  For
124  * x86 the closest thing to a PCIe root complex driver is NPE.	For SPARC the
125  * closest PCIe root complex driver is PX.
126  *
127  * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
128  * systems may want to disable SERR in general.  For root ports, enabling SERR
129  * causes NMIs which are not handled and results in a watchdog timeout error.
130  */
131 uint32_t pcie_aer_uce_mask = 0;		/* AER UE Mask */
132 uint32_t pcie_aer_ce_mask = 0;		/* AER CE Mask */
133 uint32_t pcie_aer_suce_mask = 0;	/* AER Secondary UE Mask */
134 uint32_t pcie_serr_disable_flag = 0;	/* Disable SERR */
135 
136 /* Default severities needed for eversholt.  Error handling doesn't care */
137 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \
138     PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \
139     PCIE_AER_UCE_TRAINING;
140 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \
141     PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \
142     PCIE_AER_SUCE_USC_MSG_DATA_ERR;
143 
144 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5;
145 int pcie_disable_ari = 0;
146 
147 /*
148  * On some platforms, such as the AMD B450 chipset, we've seen an odd
149  * relationship between enabling link bandwidth notifications and AERs about
150  * ECRC errors. This provides a mechanism to disable it.
151  */
152 int pcie_disable_lbw = 0;
153 
154 /*
155  * Amount of time to wait for an in-progress retraining. The default is to try
156  * 500 times in 10ms chunks, thus a total of 5s.
157  */
158 uint32_t pcie_link_retrain_count = 500;
159 uint32_t pcie_link_retrain_delay_ms = 10;
160 
161 taskq_t *pcie_link_tq;
162 kmutex_t pcie_link_tq_mutex;
163 
164 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip,
165 	int *max_supported);
166 static int pcie_get_max_supported(dev_info_t *dip, void *arg);
167 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
168     caddr_t *addrp, ddi_acc_handle_t *handlep);
169 static void pcie_unmap_phys(ddi_acc_handle_t *handlep,	pci_regspec_t *ph);
170 static int pcie_link_bw_intr(dev_info_t *);
171 static void pcie_capture_speeds(dev_info_t *);
172 
173 dev_info_t *pcie_get_rc_dip(dev_info_t *dip);
174 
175 /*
176  * modload support
177  */
178 
179 static struct modlmisc modlmisc	= {
180 	&mod_miscops,	/* Type	of module */
181 	"PCI Express Framework Module"
182 };
183 
184 static struct modlinkage modlinkage = {
185 	MODREV_1,
186 	(void	*)&modlmisc,
187 	NULL
188 };
189 
190 /*
191  * Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
192  * Currently used to send the pci.fabric ereports whose payload depends on the
193  * type of PCI device it is being sent for.
194  */
195 char		*pcie_nv_buf;
196 nv_alloc_t	*pcie_nvap;
197 nvlist_t	*pcie_nvl;
198 
199 int
200 _init(void)
201 {
202 	int rval;
203 
204 	pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP);
205 	pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ);
206 	pcie_nvl = fm_nvlist_create(pcie_nvap);
207 	mutex_init(&pcie_link_tq_mutex, NULL, MUTEX_DRIVER, NULL);
208 
209 	if ((rval = mod_install(&modlinkage)) != 0) {
210 		mutex_destroy(&pcie_link_tq_mutex);
211 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
212 		fm_nva_xdestroy(pcie_nvap);
213 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
214 	}
215 	return (rval);
216 }
217 
218 int
219 _fini()
220 {
221 	int		rval;
222 
223 	if ((rval = mod_remove(&modlinkage)) == 0) {
224 		if (pcie_link_tq != NULL) {
225 			taskq_destroy(pcie_link_tq);
226 		}
227 		mutex_destroy(&pcie_link_tq_mutex);
228 		fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
229 		fm_nva_xdestroy(pcie_nvap);
230 		kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
231 	}
232 	return (rval);
233 }
234 
235 int
236 _info(struct modinfo *modinfop)
237 {
238 	return (mod_info(&modlinkage, modinfop));
239 }
240 
241 /* ARGSUSED */
242 int
243 pcie_init(dev_info_t *dip, caddr_t arg)
244 {
245 	int	ret = DDI_SUCCESS;
246 
247 	/*
248 	 * Our _init function is too early to create a taskq. Create the pcie
249 	 * link management taskq here now instead.
250 	 */
251 	mutex_enter(&pcie_link_tq_mutex);
252 	if (pcie_link_tq == NULL) {
253 		pcie_link_tq = taskq_create("pcie_link", 1, minclsyspri, 0, 0,
254 		    0);
255 	}
256 	mutex_exit(&pcie_link_tq_mutex);
257 
258 
259 	/*
260 	 * Create a "devctl" minor node to support DEVCTL_DEVICE_*
261 	 * and DEVCTL_BUS_* ioctls to this bus.
262 	 */
263 	if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR,
264 	    PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR),
265 	    DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
266 		PCIE_DBG("Failed to create devctl minor node for %s%d\n",
267 		    ddi_driver_name(dip), ddi_get_instance(dip));
268 
269 		return (ret);
270 	}
271 
272 	if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) {
273 		/*
274 		 * On some x86 platforms, we observed unexpected hotplug
275 		 * initialization failures in recent years. The known cause
276 		 * is a hardware issue: while the problem PCI bridges have
277 		 * the Hotplug Capable registers set, the machine actually
278 		 * does not implement the expected ACPI object.
279 		 *
280 		 * We don't want to stop PCI driver attach and system boot
281 		 * just because of this hotplug initialization failure.
282 		 * Continue with a debug message printed.
283 		 */
284 		PCIE_DBG("%s%d: Failed setting hotplug framework\n",
285 		    ddi_driver_name(dip), ddi_get_instance(dip));
286 
287 #if defined(__sparc)
288 		ddi_remove_minor_node(dip, "devctl");
289 
290 		return (ret);
291 #endif /* defined(__sparc) */
292 	}
293 
294 	return (DDI_SUCCESS);
295 }
296 
297 /* ARGSUSED */
298 int
299 pcie_uninit(dev_info_t *dip)
300 {
301 	int	ret = DDI_SUCCESS;
302 
303 	if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED)
304 		(void) pcie_ari_disable(dip);
305 
306 	if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) {
307 		PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
308 		    ddi_driver_name(dip), ddi_get_instance(dip));
309 
310 		return (ret);
311 	}
312 
313 	if (pcie_link_bw_supported(dip)) {
314 		(void) pcie_link_bw_disable(dip);
315 	}
316 
317 	ddi_remove_minor_node(dip, "devctl");
318 
319 	return (ret);
320 }
321 
322 /*
323  * PCIe module interface for enabling hotplug interrupt.
324  *
325  * It should be called after pcie_init() is done and bus driver's
326  * interrupt handlers have being attached.
327  */
328 int
329 pcie_hpintr_enable(dev_info_t *dip)
330 {
331 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
332 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
333 
334 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
335 		(void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p);
336 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
337 		(void) pcishpc_enable_irqs(ctrl_p);
338 	}
339 	return (DDI_SUCCESS);
340 }
341 
342 /*
343  * PCIe module interface for disabling hotplug interrupt.
344  *
345  * It should be called before pcie_uninit() is called and bus driver's
346  * interrupt handlers is dettached.
347  */
348 int
349 pcie_hpintr_disable(dev_info_t *dip)
350 {
351 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
352 	pcie_hp_ctrl_t	*ctrl_p = PCIE_GET_HP_CTRL(dip);
353 
354 	if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
355 		(void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p);
356 	} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
357 		(void) pcishpc_disable_irqs(ctrl_p);
358 	}
359 	return (DDI_SUCCESS);
360 }
361 
362 /* ARGSUSED */
363 int
364 pcie_intr(dev_info_t *dip)
365 {
366 	int hp, lbw;
367 
368 	hp = pcie_hp_intr(dip);
369 	lbw = pcie_link_bw_intr(dip);
370 
371 	if (hp == DDI_INTR_CLAIMED || lbw == DDI_INTR_CLAIMED) {
372 		return (DDI_INTR_CLAIMED);
373 	}
374 
375 	return (DDI_INTR_UNCLAIMED);
376 }
377 
378 /* ARGSUSED */
379 int
380 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp)
381 {
382 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
383 
384 	/*
385 	 * Make sure the open is for the right file type.
386 	 */
387 	if (otyp != OTYP_CHR)
388 		return (EINVAL);
389 
390 	/*
391 	 * Handle the open by tracking the device state.
392 	 */
393 	if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) ||
394 	    ((flags & FEXCL) &&
395 	    (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) {
396 		return (EBUSY);
397 	}
398 
399 	if (flags & FEXCL)
400 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
401 	else
402 		bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN;
403 
404 	return (0);
405 }
406 
407 /* ARGSUSED */
408 int
409 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp)
410 {
411 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
412 
413 	if (otyp != OTYP_CHR)
414 		return (EINVAL);
415 
416 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
417 
418 	return (0);
419 }
420 
421 /* ARGSUSED */
422 int
423 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode,
424     cred_t *credp, int *rvalp)
425 {
426 	struct devctl_iocdata	*dcp;
427 	uint_t			bus_state;
428 	int			rv = DDI_SUCCESS;
429 
430 	/*
431 	 * We can use the generic implementation for devctl ioctl
432 	 */
433 	switch (cmd) {
434 	case DEVCTL_DEVICE_GETSTATE:
435 	case DEVCTL_DEVICE_ONLINE:
436 	case DEVCTL_DEVICE_OFFLINE:
437 	case DEVCTL_BUS_GETSTATE:
438 		return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
439 	default:
440 		break;
441 	}
442 
443 	/*
444 	 * read devctl ioctl data
445 	 */
446 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
447 		return (EFAULT);
448 
449 	switch (cmd) {
450 	case DEVCTL_BUS_QUIESCE:
451 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
452 			if (bus_state == BUS_QUIESCED)
453 				break;
454 		(void) ndi_set_bus_state(dip, BUS_QUIESCED);
455 		break;
456 	case DEVCTL_BUS_UNQUIESCE:
457 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
458 			if (bus_state == BUS_ACTIVE)
459 				break;
460 		(void) ndi_set_bus_state(dip, BUS_ACTIVE);
461 		break;
462 	case DEVCTL_BUS_RESET:
463 	case DEVCTL_BUS_RESETALL:
464 	case DEVCTL_DEVICE_RESET:
465 		rv = ENOTSUP;
466 		break;
467 	default:
468 		rv = ENOTTY;
469 	}
470 
471 	ndi_dc_freehdl(dcp);
472 	return (rv);
473 }
474 
475 /* ARGSUSED */
476 int
477 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
478     int flags, char *name, caddr_t valuep, int *lengthp)
479 {
480 	if (dev == DDI_DEV_T_ANY)
481 		goto skip;
482 
483 	if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
484 	    strcmp(name, "pci-occupant") == 0) {
485 		int	pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev));
486 
487 		pcie_hp_create_occupant_props(dip, dev, pci_dev);
488 	}
489 
490 skip:
491 	return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
492 }
493 
494 int
495 pcie_init_cfghdl(dev_info_t *cdip)
496 {
497 	pcie_bus_t		*bus_p;
498 	ddi_acc_handle_t	eh = NULL;
499 
500 	bus_p = PCIE_DIP2BUS(cdip);
501 	if (bus_p == NULL)
502 		return (DDI_FAILURE);
503 
504 	/* Create an config access special to error handling */
505 	if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) {
506 		cmn_err(CE_WARN, "Cannot setup config access"
507 		    " for BDF 0x%x\n", bus_p->bus_bdf);
508 		return (DDI_FAILURE);
509 	}
510 
511 	bus_p->bus_cfg_hdl = eh;
512 	return (DDI_SUCCESS);
513 }
514 
515 void
516 pcie_fini_cfghdl(dev_info_t *cdip)
517 {
518 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(cdip);
519 
520 	pci_config_teardown(&bus_p->bus_cfg_hdl);
521 }
522 
523 void
524 pcie_determine_serial(dev_info_t *dip)
525 {
526 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
527 	ddi_acc_handle_t	h;
528 	uint16_t		cap;
529 	uchar_t			serial[8];
530 	uint32_t		low, high;
531 
532 	if (!PCIE_IS_PCIE(bus_p))
533 		return;
534 
535 	h = bus_p->bus_cfg_hdl;
536 
537 	if ((PCI_CAP_LOCATE(h, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap)) ==
538 	    DDI_FAILURE)
539 		return;
540 
541 	high = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_UPPER_DW);
542 	low = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_LOWER_DW);
543 
544 	/*
545 	 * Here, we're trying to figure out if we had an invalid PCIe read. From
546 	 * looking at the contents of the value, it can be hard to tell the
547 	 * difference between a value that has all 1s correctly versus if we had
548 	 * an error. In this case, we only assume it's invalid if both register
549 	 * reads are invalid. We also only use 32-bit reads as we're not sure if
550 	 * all devices will support these as 64-bit reads, while we know that
551 	 * they'll support these as 32-bit reads.
552 	 */
553 	if (high == PCI_EINVAL32 && low == PCI_EINVAL32)
554 		return;
555 
556 	serial[0] = low & 0xff;
557 	serial[1] = (low >> 8) & 0xff;
558 	serial[2] = (low >> 16) & 0xff;
559 	serial[3] = (low >> 24) & 0xff;
560 	serial[4] = high & 0xff;
561 	serial[5] = (high >> 8) & 0xff;
562 	serial[6] = (high >> 16) & 0xff;
563 	serial[7] = (high >> 24) & 0xff;
564 
565 	(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "pcie-serial",
566 	    serial, sizeof (serial));
567 }
568 
569 static void
570 pcie_determine_aspm(dev_info_t *dip)
571 {
572 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
573 	uint32_t	linkcap;
574 	uint16_t	linkctl;
575 
576 	if (!PCIE_IS_PCIE(bus_p))
577 		return;
578 
579 	linkcap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
580 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
581 
582 	switch (linkcap & PCIE_LINKCAP_ASPM_SUP_MASK) {
583 	case PCIE_LINKCAP_ASPM_SUP_L0S:
584 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
585 		    "pcie-aspm-support", "l0s");
586 		break;
587 	case PCIE_LINKCAP_ASPM_SUP_L1:
588 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
589 		    "pcie-aspm-support", "l1");
590 		break;
591 	case PCIE_LINKCAP_ASPM_SUP_L0S_L1:
592 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
593 		    "pcie-aspm-support", "l0s,l1");
594 		break;
595 	default:
596 		return;
597 	}
598 
599 	switch (linkctl & PCIE_LINKCTL_ASPM_CTL_MASK) {
600 	case PCIE_LINKCTL_ASPM_CTL_DIS:
601 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
602 		    "pcie-aspm-state", "disabled");
603 		break;
604 	case PCIE_LINKCTL_ASPM_CTL_L0S:
605 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
606 		    "pcie-aspm-state", "l0s");
607 		break;
608 	case PCIE_LINKCTL_ASPM_CTL_L1:
609 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
610 		    "pcie-aspm-state", "l1");
611 		break;
612 	case PCIE_LINKCTL_ASPM_CTL_L0S_L1:
613 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
614 		    "pcie-aspm-state", "l0s,l1");
615 		break;
616 	}
617 }
618 
619 /*
620  * PCI-Express child device initialization.
621  * This function enables generic pci-express interrupts and error
622  * handling.
623  *
624  * @param pdip		root dip (root nexus's dip)
625  * @param cdip		child's dip (device's dip)
626  * @return		DDI_SUCCESS or DDI_FAILURE
627  */
628 /* ARGSUSED */
629 int
630 pcie_initchild(dev_info_t *cdip)
631 {
632 	uint16_t		tmp16, reg16;
633 	pcie_bus_t		*bus_p;
634 	uint32_t		devid, venid;
635 
636 	bus_p = PCIE_DIP2BUS(cdip);
637 	if (bus_p == NULL) {
638 		PCIE_DBG("%s: BUS not found.\n",
639 		    ddi_driver_name(cdip));
640 
641 		return (DDI_FAILURE);
642 	}
643 
644 	if (pcie_init_cfghdl(cdip) != DDI_SUCCESS)
645 		return (DDI_FAILURE);
646 
647 	/*
648 	 * Update pcie_bus_t with real Vendor Id Device Id.
649 	 *
650 	 * For assigned devices in IOV environment, the OBP will return
651 	 * faked device id/vendor id on configration read and for both
652 	 * properties in root domain. translate_devid() function will
653 	 * update the properties with real device-id/vendor-id on such
654 	 * platforms, so that we can utilize the properties here to get
655 	 * real device-id/vendor-id and overwrite the faked ids.
656 	 *
657 	 * For unassigned devices or devices in non-IOV environment, the
658 	 * operation below won't make a difference.
659 	 *
660 	 * The IOV implementation only supports assignment of PCIE
661 	 * endpoint devices. Devices under pci-pci bridges don't need
662 	 * operation like this.
663 	 */
664 	devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
665 	    "device-id", -1);
666 	venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
667 	    "vendor-id", -1);
668 	bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff);
669 
670 	/* Clear the device's status register */
671 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT);
672 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16);
673 
674 	/* Setup the device's command register */
675 	reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM);
676 	tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default;
677 
678 #if defined(__i386) || defined(__amd64)
679 	boolean_t empty_io_range = B_FALSE;
680 	boolean_t empty_mem_range = B_FALSE;
681 	/*
682 	 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem
683 	 * access as it can cause a hang if enabled.
684 	 */
685 	pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range,
686 	    &empty_mem_range);
687 	if ((empty_io_range == B_TRUE) &&
688 	    (pcie_command_default & PCI_COMM_IO)) {
689 		tmp16 &= ~PCI_COMM_IO;
690 		PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
691 		    ddi_driver_name(cdip), bus_p->bus_bdf);
692 	}
693 	if ((empty_mem_range == B_TRUE) &&
694 	    (pcie_command_default & PCI_COMM_MAE)) {
695 		tmp16 &= ~PCI_COMM_MAE;
696 		PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
697 		    ddi_driver_name(cdip), bus_p->bus_bdf);
698 	}
699 #endif /* defined(__i386) || defined(__amd64) */
700 
701 	if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p))
702 		tmp16 &= ~PCI_COMM_SERR_ENABLE;
703 
704 	PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16);
705 	PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16);
706 
707 	/*
708 	 * If the device has a bus control register then program it
709 	 * based on the settings in the command register.
710 	 */
711 	if (PCIE_IS_BDG(bus_p)) {
712 		/* Clear the device's secondary status register */
713 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
714 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16);
715 
716 		/* Setup the device's secondary command register */
717 		reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
718 		tmp16 = (reg16 & pcie_bdg_command_default_fw);
719 
720 		tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE;
721 		/*
722 		 * Workaround for this Nvidia bridge. Don't enable the SERR
723 		 * enable bit in the bridge control register as it could lead to
724 		 * bogus NMIs.
725 		 */
726 		if (bus_p->bus_dev_ven_id == 0x037010DE)
727 			tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE;
728 
729 		if (pcie_command_default & PCI_COMM_PARITY_DETECT)
730 			tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
731 
732 		/*
733 		 * Enable Master Abort Mode only if URs have not been masked.
734 		 * For PCI and PCIe-PCI bridges, enabling this bit causes a
735 		 * Master Aborts/UR to be forwarded as a UR/TA or SERR.  If this
736 		 * bit is masked, posted requests are dropped and non-posted
737 		 * requests are returned with -1.
738 		 */
739 		if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
740 			tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE;
741 		else
742 			tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
743 		PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16);
744 		PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL,
745 		    reg16);
746 	}
747 
748 	if (PCIE_IS_PCIE(bus_p)) {
749 		/* Setup PCIe device control register */
750 		reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
751 		/* note: MPS/MRRS are initialized in pcie_initchild_mps() */
752 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
753 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
754 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
755 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
756 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
757 		PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
758 
759 		/* Enable PCIe errors */
760 		pcie_enable_errors(cdip);
761 
762 		pcie_determine_serial(cdip);
763 
764 		pcie_determine_aspm(cdip);
765 
766 		pcie_capture_speeds(cdip);
767 	}
768 
769 	bus_p->bus_ari = B_FALSE;
770 	if ((pcie_ari_is_enabled(ddi_get_parent(cdip))
771 	    == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip)
772 	    == PCIE_ARI_DEVICE)) {
773 		bus_p->bus_ari = B_TRUE;
774 	}
775 
776 	if (pcie_initchild_mps(cdip) == DDI_FAILURE) {
777 		pcie_fini_cfghdl(cdip);
778 		return (DDI_FAILURE);
779 	}
780 
781 	return (DDI_SUCCESS);
782 }
783 
784 static void
785 pcie_init_pfd(dev_info_t *dip)
786 {
787 	pf_data_t	*pfd_p = PCIE_ZALLOC(pf_data_t);
788 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
789 
790 	PCIE_DIP2PFD(dip) = pfd_p;
791 
792 	pfd_p->pe_bus_p = bus_p;
793 	pfd_p->pe_severity_flags = 0;
794 	pfd_p->pe_severity_mask = 0;
795 	pfd_p->pe_orig_severity_flags = 0;
796 	pfd_p->pe_lock = B_FALSE;
797 	pfd_p->pe_valid = B_FALSE;
798 
799 	/* Allocate the root fault struct for both RC and RP */
800 	if (PCIE_IS_ROOT(bus_p)) {
801 		PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
802 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
803 		PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
804 	}
805 
806 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
807 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
808 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
809 
810 	if (PCIE_IS_BDG(bus_p))
811 		PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
812 
813 	if (PCIE_IS_PCIE(bus_p)) {
814 		PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
815 
816 		if (PCIE_IS_RP(bus_p))
817 			PCIE_RP_REG(pfd_p) =
818 			    PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
819 
820 		PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
821 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
822 
823 		if (PCIE_IS_RP(bus_p)) {
824 			PCIE_ADV_RP_REG(pfd_p) =
825 			    PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
826 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
827 			    PCIE_INVALID_BDF;
828 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
829 			    PCIE_INVALID_BDF;
830 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
831 			PCIE_ADV_BDG_REG(pfd_p) =
832 			    PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t);
833 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
834 			    PCIE_INVALID_BDF;
835 		}
836 
837 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
838 			PCIX_BDG_ERR_REG(pfd_p) =
839 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
840 
841 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
842 				PCIX_BDG_ECC_REG(pfd_p, 0) =
843 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
844 				PCIX_BDG_ECC_REG(pfd_p, 1) =
845 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
846 			}
847 		}
848 	} else if (PCIE_IS_PCIX(bus_p)) {
849 		if (PCIE_IS_BDG(bus_p)) {
850 			PCIX_BDG_ERR_REG(pfd_p) =
851 			    PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
852 
853 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
854 				PCIX_BDG_ECC_REG(pfd_p, 0) =
855 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
856 				PCIX_BDG_ECC_REG(pfd_p, 1) =
857 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
858 			}
859 		} else {
860 			PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t);
861 
862 			if (PCIX_ECC_VERSION_CHECK(bus_p))
863 				PCIX_ECC_REG(pfd_p) =
864 				    PCIE_ZALLOC(pf_pcix_ecc_regs_t);
865 		}
866 	}
867 }
868 
869 static void
870 pcie_fini_pfd(dev_info_t *dip)
871 {
872 	pf_data_t	*pfd_p = PCIE_DIP2PFD(dip);
873 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
874 
875 	if (PCIE_IS_PCIE(bus_p)) {
876 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
877 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
878 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
879 				    sizeof (pf_pcix_ecc_regs_t));
880 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
881 				    sizeof (pf_pcix_ecc_regs_t));
882 			}
883 
884 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
885 			    sizeof (pf_pcix_bdg_err_regs_t));
886 		}
887 
888 		if (PCIE_IS_RP(bus_p))
889 			kmem_free(PCIE_ADV_RP_REG(pfd_p),
890 			    sizeof (pf_pcie_adv_rp_err_regs_t));
891 		else if (PCIE_IS_PCIE_BDG(bus_p))
892 			kmem_free(PCIE_ADV_BDG_REG(pfd_p),
893 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
894 
895 		kmem_free(PCIE_ADV_REG(pfd_p),
896 		    sizeof (pf_pcie_adv_err_regs_t));
897 
898 		if (PCIE_IS_RP(bus_p))
899 			kmem_free(PCIE_RP_REG(pfd_p),
900 			    sizeof (pf_pcie_rp_err_regs_t));
901 
902 		kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
903 	} else if (PCIE_IS_PCIX(bus_p)) {
904 		if (PCIE_IS_BDG(bus_p)) {
905 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
906 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
907 				    sizeof (pf_pcix_ecc_regs_t));
908 				kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
909 				    sizeof (pf_pcix_ecc_regs_t));
910 			}
911 
912 			kmem_free(PCIX_BDG_ERR_REG(pfd_p),
913 			    sizeof (pf_pcix_bdg_err_regs_t));
914 		} else {
915 			if (PCIX_ECC_VERSION_CHECK(bus_p))
916 				kmem_free(PCIX_ECC_REG(pfd_p),
917 				    sizeof (pf_pcix_ecc_regs_t));
918 
919 			kmem_free(PCIX_ERR_REG(pfd_p),
920 			    sizeof (pf_pcix_err_regs_t));
921 		}
922 	}
923 
924 	if (PCIE_IS_BDG(bus_p))
925 		kmem_free(PCI_BDG_ERR_REG(pfd_p),
926 		    sizeof (pf_pci_bdg_err_regs_t));
927 
928 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
929 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
930 
931 	if (PCIE_IS_ROOT(bus_p)) {
932 		kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
933 		kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
934 	}
935 
936 	kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t));
937 
938 	PCIE_DIP2PFD(dip) = NULL;
939 }
940 
941 
942 /*
943  * Special functions to allocate pf_data_t's for PCIe root complexes.
944  * Note: Root Complex not Root Port
945  */
946 void
947 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p)
948 {
949 	pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip);
950 	pfd_p->pe_severity_flags = 0;
951 	pfd_p->pe_severity_mask = 0;
952 	pfd_p->pe_orig_severity_flags = 0;
953 	pfd_p->pe_lock = B_FALSE;
954 	pfd_p->pe_valid = B_FALSE;
955 
956 	PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
957 	PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
958 	PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
959 	PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
960 	PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
961 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
962 	PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
963 	PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
964 	PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
965 	PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
966 	PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
967 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF;
968 	PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF;
969 
970 	PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity;
971 }
972 
973 void
974 pcie_rc_fini_pfd(pf_data_t *pfd_p)
975 {
976 	kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t));
977 	kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t));
978 	kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t));
979 	kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
980 	kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
981 	kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
982 	kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
983 	kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
984 	kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
985 }
986 
987 /*
988  * init pcie_bus_t for root complex
989  *
990  * Only a few of the fields in bus_t is valid for root complex.
991  * The fields that are bracketed are initialized in this routine:
992  *
993  * dev_info_t *		<bus_dip>
994  * dev_info_t *		bus_rp_dip
995  * ddi_acc_handle_t	bus_cfg_hdl
996  * uint_t		<bus_fm_flags>
997  * pcie_req_id_t	bus_bdf
998  * pcie_req_id_t	bus_rp_bdf
999  * uint32_t		bus_dev_ven_id
1000  * uint8_t		bus_rev_id
1001  * uint8_t		<bus_hdr_type>
1002  * uint16_t		<bus_dev_type>
1003  * uint8_t		bus_bdg_secbus
1004  * uint16_t		bus_pcie_off
1005  * uint16_t		<bus_aer_off>
1006  * uint16_t		bus_pcix_off
1007  * uint16_t		bus_ecc_ver
1008  * pci_bus_range_t	bus_bus_range
1009  * ppb_ranges_t	*	bus_addr_ranges
1010  * int			bus_addr_entries
1011  * pci_regspec_t *	bus_assigned_addr
1012  * int			bus_assigned_entries
1013  * pf_data_t *		bus_pfd
1014  * pcie_domain_t *	<bus_dom>
1015  * int			bus_mps
1016  * uint64_t		bus_cfgacc_base
1017  * void	*		bus_plat_private
1018  */
1019 void
1020 pcie_rc_init_bus(dev_info_t *dip)
1021 {
1022 	pcie_bus_t *bus_p;
1023 
1024 	bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
1025 	bus_p->bus_dip = dip;
1026 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO;
1027 	bus_p->bus_hdr_type = PCI_HEADER_ONE;
1028 
1029 	/* Fake that there are AER logs */
1030 	bus_p->bus_aer_off = (uint16_t)-1;
1031 
1032 	/* Needed only for handle lookup */
1033 	atomic_or_uint(&bus_p->bus_fm_flags, PF_FM_READY);
1034 
1035 	ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p);
1036 
1037 	PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t);
1038 }
1039 
1040 void
1041 pcie_rc_fini_bus(dev_info_t *dip)
1042 {
1043 	pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip);
1044 	ndi_set_bus_private(dip, B_FALSE, 0, NULL);
1045 	kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t));
1046 	kmem_free(bus_p, sizeof (pcie_bus_t));
1047 }
1048 
1049 static int
1050 pcie_width_to_int(pcie_link_width_t width)
1051 {
1052 	switch (width) {
1053 	case PCIE_LINK_WIDTH_X1:
1054 		return (1);
1055 	case PCIE_LINK_WIDTH_X2:
1056 		return (2);
1057 	case PCIE_LINK_WIDTH_X4:
1058 		return (4);
1059 	case PCIE_LINK_WIDTH_X8:
1060 		return (8);
1061 	case PCIE_LINK_WIDTH_X12:
1062 		return (12);
1063 	case PCIE_LINK_WIDTH_X16:
1064 		return (16);
1065 	case PCIE_LINK_WIDTH_X32:
1066 		return (32);
1067 	default:
1068 		return (0);
1069 	}
1070 }
1071 
1072 /*
1073  * Return the speed in Transfers / second. This is a signed quantity to match
1074  * the ndi/ddi property interfaces.
1075  */
1076 static int64_t
1077 pcie_speed_to_int(pcie_link_speed_t speed)
1078 {
1079 	switch (speed) {
1080 	case PCIE_LINK_SPEED_2_5:
1081 		return (2500000000LL);
1082 	case PCIE_LINK_SPEED_5:
1083 		return (5000000000LL);
1084 	case PCIE_LINK_SPEED_8:
1085 		return (8000000000LL);
1086 	case PCIE_LINK_SPEED_16:
1087 		return (16000000000LL);
1088 	default:
1089 		return (0);
1090 	}
1091 }
1092 
1093 /*
1094  * Translate the recorded speed information into devinfo properties.
1095  */
1096 static void
1097 pcie_speeds_to_devinfo(dev_info_t *dip, pcie_bus_t *bus_p)
1098 {
1099 	if (bus_p->bus_max_width != PCIE_LINK_WIDTH_UNKNOWN) {
1100 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1101 		    "pcie-link-maximum-width",
1102 		    pcie_width_to_int(bus_p->bus_max_width));
1103 	}
1104 
1105 	if (bus_p->bus_cur_width != PCIE_LINK_WIDTH_UNKNOWN) {
1106 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
1107 		    "pcie-link-current-width",
1108 		    pcie_width_to_int(bus_p->bus_cur_width));
1109 	}
1110 
1111 	if (bus_p->bus_cur_speed != PCIE_LINK_SPEED_UNKNOWN) {
1112 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1113 		    "pcie-link-current-speed",
1114 		    pcie_speed_to_int(bus_p->bus_cur_speed));
1115 	}
1116 
1117 	if (bus_p->bus_max_speed != PCIE_LINK_SPEED_UNKNOWN) {
1118 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1119 		    "pcie-link-maximum-speed",
1120 		    pcie_speed_to_int(bus_p->bus_max_speed));
1121 	}
1122 
1123 	if (bus_p->bus_target_speed != PCIE_LINK_SPEED_UNKNOWN) {
1124 		(void) ndi_prop_update_int64(DDI_DEV_T_NONE, dip,
1125 		    "pcie-link-target-speed",
1126 		    pcie_speed_to_int(bus_p->bus_target_speed));
1127 	}
1128 
1129 	if ((bus_p->bus_speed_flags & PCIE_LINK_F_ADMIN_TARGET) != 0) {
1130 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1131 		    "pcie-link-admin-target-speed");
1132 	}
1133 
1134 	if (bus_p->bus_sup_speed != PCIE_LINK_SPEED_UNKNOWN) {
1135 		int64_t speeds[4];
1136 		uint_t nspeeds = 0;
1137 
1138 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_2_5) {
1139 			speeds[nspeeds++] =
1140 			    pcie_speed_to_int(PCIE_LINK_SPEED_2_5);
1141 		}
1142 
1143 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_5) {
1144 			speeds[nspeeds++] =
1145 			    pcie_speed_to_int(PCIE_LINK_SPEED_5);
1146 		}
1147 
1148 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_8) {
1149 			speeds[nspeeds++] =
1150 			    pcie_speed_to_int(PCIE_LINK_SPEED_8);
1151 		}
1152 
1153 		if (bus_p->bus_sup_speed & PCIE_LINK_SPEED_16) {
1154 			speeds[nspeeds++] =
1155 			    pcie_speed_to_int(PCIE_LINK_SPEED_16);
1156 		}
1157 
1158 		(void) ndi_prop_update_int64_array(DDI_DEV_T_NONE, dip,
1159 		    "pcie-link-supported-speeds", speeds, nspeeds);
1160 	}
1161 }
1162 
1163 /*
1164  * We need to capture the supported, maximum, and current device speed and
1165  * width. The way that this has been done has changed over time.
1166  *
1167  * Prior to PCIe Gen 3, there were only current and supported speed fields.
1168  * These were found in the link status and link capabilities registers of the
1169  * PCI express capability. With the change to PCIe Gen 3, the information in the
1170  * link capabilities changed to the maximum value. The supported speeds vector
1171  * was moved to the link capabilities 2 register.
1172  *
1173  * Now, a device may not implement some of these registers. To determine whether
1174  * or not it's here, we have to do the following. First, we need to check the
1175  * revision of the PCI express capability. The link capabilities 2 register did
1176  * not exist prior to version 2 of this capability. If a modern device does not
1177  * implement it, it is supposed to return zero for the register.
1178  */
1179 static void
1180 pcie_capture_speeds(dev_info_t *dip)
1181 {
1182 	uint16_t	vers, status;
1183 	uint32_t	cap, cap2, ctl2;
1184 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1185 
1186 	if (!PCIE_IS_PCIE(bus_p))
1187 		return;
1188 
1189 	vers = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
1190 	if (vers == PCI_EINVAL16)
1191 		return;
1192 	vers &= PCIE_PCIECAP_VER_MASK;
1193 
1194 	/*
1195 	 * Verify the capability's version.
1196 	 */
1197 	switch (vers) {
1198 	case PCIE_PCIECAP_VER_1_0:
1199 		cap2 = 0;
1200 		ctl2 = 0;
1201 		break;
1202 	case PCIE_PCIECAP_VER_2_0:
1203 		cap2 = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP2);
1204 		if (cap2 == PCI_EINVAL32)
1205 			cap2 = 0;
1206 		ctl2 = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL2);
1207 		if (ctl2 == PCI_EINVAL16)
1208 			ctl2 = 0;
1209 		break;
1210 	default:
1211 		/* Don't try and handle an unknown version */
1212 		return;
1213 	}
1214 
1215 	status = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
1216 	cap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
1217 	if (status == PCI_EINVAL16 || cap == PCI_EINVAL32)
1218 		return;
1219 
1220 	mutex_enter(&bus_p->bus_speed_mutex);
1221 
1222 	switch (status & PCIE_LINKSTS_SPEED_MASK) {
1223 	case PCIE_LINKSTS_SPEED_2_5:
1224 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_2_5;
1225 		break;
1226 	case PCIE_LINKSTS_SPEED_5:
1227 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_5;
1228 		break;
1229 	case PCIE_LINKSTS_SPEED_8:
1230 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_8;
1231 		break;
1232 	case PCIE_LINKSTS_SPEED_16:
1233 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_16;
1234 		break;
1235 	default:
1236 		bus_p->bus_cur_speed = PCIE_LINK_SPEED_UNKNOWN;
1237 		break;
1238 	}
1239 
1240 	switch (status & PCIE_LINKSTS_NEG_WIDTH_MASK) {
1241 	case PCIE_LINKSTS_NEG_WIDTH_X1:
1242 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X1;
1243 		break;
1244 	case PCIE_LINKSTS_NEG_WIDTH_X2:
1245 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X2;
1246 		break;
1247 	case PCIE_LINKSTS_NEG_WIDTH_X4:
1248 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X4;
1249 		break;
1250 	case PCIE_LINKSTS_NEG_WIDTH_X8:
1251 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X8;
1252 		break;
1253 	case PCIE_LINKSTS_NEG_WIDTH_X12:
1254 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X12;
1255 		break;
1256 	case PCIE_LINKSTS_NEG_WIDTH_X16:
1257 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X16;
1258 		break;
1259 	case PCIE_LINKSTS_NEG_WIDTH_X32:
1260 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_X32;
1261 		break;
1262 	default:
1263 		bus_p->bus_cur_width = PCIE_LINK_WIDTH_UNKNOWN;
1264 		break;
1265 	}
1266 
1267 	switch (cap & PCIE_LINKCAP_MAX_WIDTH_MASK) {
1268 	case PCIE_LINKCAP_MAX_WIDTH_X1:
1269 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X1;
1270 		break;
1271 	case PCIE_LINKCAP_MAX_WIDTH_X2:
1272 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X2;
1273 		break;
1274 	case PCIE_LINKCAP_MAX_WIDTH_X4:
1275 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X4;
1276 		break;
1277 	case PCIE_LINKCAP_MAX_WIDTH_X8:
1278 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X8;
1279 		break;
1280 	case PCIE_LINKCAP_MAX_WIDTH_X12:
1281 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X12;
1282 		break;
1283 	case PCIE_LINKCAP_MAX_WIDTH_X16:
1284 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X16;
1285 		break;
1286 	case PCIE_LINKCAP_MAX_WIDTH_X32:
1287 		bus_p->bus_max_width = PCIE_LINK_WIDTH_X32;
1288 		break;
1289 	default:
1290 		bus_p->bus_max_width = PCIE_LINK_WIDTH_UNKNOWN;
1291 		break;
1292 	}
1293 
1294 	/*
1295 	 * If we have the Link Capabilities 2, then we can get the supported
1296 	 * speeds from it and treat the bits in Link Capabilities 1 as the
1297 	 * maximum. If we don't, then we need to follow the Implementation Note
1298 	 * in the standard under Link Capabilities 2. Effectively, this means
1299 	 * that if the value of 10b is set in Link Capabilities register, that
1300 	 * it supports both 2.5 and 5 GT/s speeds.
1301 	 */
1302 	if (cap2 != 0) {
1303 		if (cap2 & PCIE_LINKCAP2_SPEED_2_5)
1304 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_2_5;
1305 		if (cap2 & PCIE_LINKCAP2_SPEED_5)
1306 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_5;
1307 		if (cap2 & PCIE_LINKCAP2_SPEED_8)
1308 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_8;
1309 		if (cap2 & PCIE_LINKCAP2_SPEED_16)
1310 			bus_p->bus_sup_speed |= PCIE_LINK_SPEED_16;
1311 
1312 		switch (cap & PCIE_LINKCAP_MAX_SPEED_MASK) {
1313 		case PCIE_LINKCAP_MAX_SPEED_2_5:
1314 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1315 			break;
1316 		case PCIE_LINKCAP_MAX_SPEED_5:
1317 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1318 			break;
1319 		case PCIE_LINKCAP_MAX_SPEED_8:
1320 			bus_p->bus_max_speed = PCIE_LINK_SPEED_8;
1321 			break;
1322 		case PCIE_LINKCAP_MAX_SPEED_16:
1323 			bus_p->bus_max_speed = PCIE_LINK_SPEED_16;
1324 			break;
1325 		default:
1326 			bus_p->bus_max_speed = PCIE_LINK_SPEED_UNKNOWN;
1327 			break;
1328 		}
1329 	} else {
1330 		if (cap & PCIE_LINKCAP_MAX_SPEED_5) {
1331 			bus_p->bus_max_speed = PCIE_LINK_SPEED_5;
1332 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5 |
1333 			    PCIE_LINK_SPEED_5;
1334 		} else if (cap & PCIE_LINKCAP_MAX_SPEED_2_5) {
1335 			bus_p->bus_max_speed = PCIE_LINK_SPEED_2_5;
1336 			bus_p->bus_sup_speed = PCIE_LINK_SPEED_2_5;
1337 		}
1338 	}
1339 
1340 	switch (ctl2 & PCIE_LINKCTL2_TARGET_SPEED_MASK) {
1341 	case PCIE_LINKCTL2_TARGET_SPEED_2_5:
1342 		bus_p->bus_target_speed = PCIE_LINK_SPEED_2_5;
1343 		break;
1344 	case PCIE_LINKCTL2_TARGET_SPEED_5:
1345 		bus_p->bus_target_speed = PCIE_LINK_SPEED_5;
1346 		break;
1347 	case PCIE_LINKCTL2_TARGET_SPEED_8:
1348 		bus_p->bus_target_speed = PCIE_LINK_SPEED_8;
1349 		break;
1350 	case PCIE_LINKCTL2_TARGET_SPEED_16:
1351 		bus_p->bus_target_speed = PCIE_LINK_SPEED_16;
1352 		break;
1353 	default:
1354 		bus_p->bus_target_speed = PCIE_LINK_SPEED_UNKNOWN;
1355 		break;
1356 	}
1357 
1358 	pcie_speeds_to_devinfo(dip, bus_p);
1359 	mutex_exit(&bus_p->bus_speed_mutex);
1360 }
1361 
1362 /*
1363  * partially init pcie_bus_t for device (dip,bdf) for accessing pci
1364  * config space
1365  *
1366  * This routine is invoked during boot, either after creating a devinfo node
1367  * (x86 case) or during px driver attach (sparc case); it is also invoked
1368  * in hotplug context after a devinfo node is created.
1369  *
1370  * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
1371  * is set:
1372  *
1373  * dev_info_t *		<bus_dip>
1374  * dev_info_t *		<bus_rp_dip>
1375  * ddi_acc_handle_t	bus_cfg_hdl
1376  * uint_t		bus_fm_flags
1377  * pcie_req_id_t	<bus_bdf>
1378  * pcie_req_id_t	<bus_rp_bdf>
1379  * uint32_t		<bus_dev_ven_id>
1380  * uint8_t		<bus_rev_id>
1381  * uint8_t		<bus_hdr_type>
1382  * uint16_t		<bus_dev_type>
1383  * uint8_t		<bus_bdg_secbus
1384  * uint16_t		<bus_pcie_off>
1385  * uint16_t		<bus_aer_off>
1386  * uint16_t		<bus_pcix_off>
1387  * uint16_t		<bus_ecc_ver>
1388  * pci_bus_range_t	bus_bus_range
1389  * ppb_ranges_t	*	bus_addr_ranges
1390  * int			bus_addr_entries
1391  * pci_regspec_t *	bus_assigned_addr
1392  * int			bus_assigned_entries
1393  * pf_data_t *		bus_pfd
1394  * pcie_domain_t *	bus_dom
1395  * int			bus_mps
1396  * uint64_t		bus_cfgacc_base
1397  * void	*		bus_plat_private
1398  *
1399  * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
1400  * is set:
1401  *
1402  * dev_info_t *		bus_dip
1403  * dev_info_t *		bus_rp_dip
1404  * ddi_acc_handle_t	bus_cfg_hdl
1405  * uint_t		bus_fm_flags
1406  * pcie_req_id_t	bus_bdf
1407  * pcie_req_id_t	bus_rp_bdf
1408  * uint32_t		bus_dev_ven_id
1409  * uint8_t		bus_rev_id
1410  * uint8_t		bus_hdr_type
1411  * uint16_t		bus_dev_type
1412  * uint8_t		<bus_bdg_secbus>
1413  * uint16_t		bus_pcie_off
1414  * uint16_t		bus_aer_off
1415  * uint16_t		bus_pcix_off
1416  * uint16_t		bus_ecc_ver
1417  * pci_bus_range_t	<bus_bus_range>
1418  * ppb_ranges_t	*	<bus_addr_ranges>
1419  * int			<bus_addr_entries>
1420  * pci_regspec_t *	<bus_assigned_addr>
1421  * int			<bus_assigned_entries>
1422  * pf_data_t *		<bus_pfd>
1423  * pcie_domain_t *	bus_dom
1424  * int			bus_mps
1425  * uint64_t		bus_cfgacc_base
1426  * void	*		<bus_plat_private>
1427  */
1428 
1429 pcie_bus_t *
1430 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags)
1431 {
1432 	uint16_t	status, base, baseptr, num_cap;
1433 	uint32_t	capid;
1434 	int		range_size;
1435 	pcie_bus_t	*bus_p = NULL;
1436 	dev_info_t	*rcdip;
1437 	dev_info_t	*pdip;
1438 	const char	*errstr = NULL;
1439 
1440 	if (!(flags & PCIE_BUS_INITIAL))
1441 		goto initial_done;
1442 
1443 	bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
1444 
1445 	bus_p->bus_dip = dip;
1446 	bus_p->bus_bdf = bdf;
1447 
1448 	rcdip = pcie_get_rc_dip(dip);
1449 	ASSERT(rcdip != NULL);
1450 
1451 	/* Save the Vendor ID, Device ID and revision ID */
1452 	bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID);
1453 	bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID);
1454 	/* Save the Header Type */
1455 	bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER);
1456 	bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M;
1457 
1458 	/*
1459 	 * Figure out the device type and all the relavant capability offsets
1460 	 */
1461 	/* set default value */
1462 	bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
1463 
1464 	status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT);
1465 	if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP))
1466 		goto caps_done; /* capability not supported */
1467 
1468 	/* Relevant conventional capabilities first */
1469 
1470 	/* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
1471 	num_cap = 2;
1472 
1473 	switch (bus_p->bus_hdr_type) {
1474 	case PCI_HEADER_ZERO:
1475 		baseptr = PCI_CONF_CAP_PTR;
1476 		break;
1477 	case PCI_HEADER_PPB:
1478 		baseptr = PCI_BCNF_CAP_PTR;
1479 		break;
1480 	case PCI_HEADER_CARDBUS:
1481 		baseptr = PCI_CBUS_CAP_PTR;
1482 		break;
1483 	default:
1484 		cmn_err(CE_WARN, "%s: unexpected pci header type:%x",
1485 		    __func__, bus_p->bus_hdr_type);
1486 		goto caps_done;
1487 	}
1488 
1489 	base = baseptr;
1490 	for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap;
1491 	    base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) {
1492 		capid = pci_cfgacc_get8(rcdip, bdf, base);
1493 		switch (capid) {
1494 		case PCI_CAP_ID_PCI_E:
1495 			bus_p->bus_pcie_off = base;
1496 			bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf,
1497 			    base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1498 
1499 			/* Check and save PCIe hotplug capability information */
1500 			if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) &&
1501 			    (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP)
1502 			    & PCIE_PCIECAP_SLOT_IMPL) &&
1503 			    (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP)
1504 			    & PCIE_SLOTCAP_HP_CAPABLE))
1505 				bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
1506 
1507 			num_cap--;
1508 			break;
1509 		case PCI_CAP_ID_PCIX:
1510 			bus_p->bus_pcix_off = base;
1511 			if (PCIE_IS_BDG(bus_p))
1512 				bus_p->bus_ecc_ver =
1513 				    pci_cfgacc_get16(rcdip, bdf, base +
1514 				    PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
1515 			else
1516 				bus_p->bus_ecc_ver =
1517 				    pci_cfgacc_get16(rcdip, bdf, base +
1518 				    PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
1519 			num_cap--;
1520 			break;
1521 		default:
1522 			break;
1523 		}
1524 	}
1525 
1526 	/* Check and save PCI hotplug (SHPC) capability information */
1527 	if (PCIE_IS_BDG(bus_p)) {
1528 		base = baseptr;
1529 		for (base = pci_cfgacc_get8(rcdip, bdf, base);
1530 		    base; base = pci_cfgacc_get8(rcdip, bdf,
1531 		    base + PCI_CAP_NEXT_PTR)) {
1532 			capid = pci_cfgacc_get8(rcdip, bdf, base);
1533 			if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
1534 				bus_p->bus_pci_hp_off = base;
1535 				bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE;
1536 				break;
1537 			}
1538 		}
1539 	}
1540 
1541 	/* Then, relevant extended capabilities */
1542 
1543 	if (!PCIE_IS_PCIE(bus_p))
1544 		goto caps_done;
1545 
1546 	/* Extended caps: PCIE_EXT_CAP_ID_AER */
1547 	for (base = PCIE_EXT_CAP; base; base = (capid >>
1548 	    PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) {
1549 		capid = pci_cfgacc_get32(rcdip, bdf, base);
1550 		if (capid == PCI_CAP_EINVAL32)
1551 			break;
1552 		if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK)
1553 		    == PCIE_EXT_CAP_ID_AER) {
1554 			bus_p->bus_aer_off = base;
1555 			break;
1556 		}
1557 	}
1558 
1559 	/*
1560 	 * Save and record speed information about the device.
1561 	 */
1562 
1563 caps_done:
1564 	/* save RP dip and RP bdf */
1565 	if (PCIE_IS_RP(bus_p)) {
1566 		bus_p->bus_rp_dip = dip;
1567 		bus_p->bus_rp_bdf = bus_p->bus_bdf;
1568 	} else {
1569 		for (pdip = ddi_get_parent(dip); pdip;
1570 		    pdip = ddi_get_parent(pdip)) {
1571 			pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip);
1572 
1573 			/*
1574 			 * If RP dip and RP bdf in parent's bus_t have
1575 			 * been initialized, simply use these instead of
1576 			 * continuing up to the RC.
1577 			 */
1578 			if (parent_bus_p->bus_rp_dip != NULL) {
1579 				bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip;
1580 				bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf;
1581 				break;
1582 			}
1583 
1584 			/*
1585 			 * When debugging be aware that some NVIDIA x86
1586 			 * architectures have 2 nodes for each RP, One at Bus
1587 			 * 0x0 and one at Bus 0x80.  The requester is from Bus
1588 			 * 0x80
1589 			 */
1590 			if (PCIE_IS_ROOT(parent_bus_p)) {
1591 				bus_p->bus_rp_dip = pdip;
1592 				bus_p->bus_rp_bdf = parent_bus_p->bus_bdf;
1593 				break;
1594 			}
1595 		}
1596 	}
1597 
1598 	bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
1599 	(void) atomic_swap_uint(&bus_p->bus_fm_flags, 0);
1600 	bus_p->bus_mps = 0;
1601 
1602 	ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p);
1603 
1604 	if (PCIE_IS_HOTPLUG_CAPABLE(dip))
1605 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1606 		    "hotplug-capable");
1607 
1608 initial_done:
1609 	if (!(flags & PCIE_BUS_FINAL))
1610 		goto final_done;
1611 
1612 	/* already initialized? */
1613 	bus_p = PCIE_DIP2BUS(dip);
1614 
1615 	/* Save the Range information if device is a switch/bridge */
1616 	if (PCIE_IS_BDG(bus_p)) {
1617 		/* get "bus_range" property */
1618 		range_size = sizeof (pci_bus_range_t);
1619 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1620 		    "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size)
1621 		    != DDI_PROP_SUCCESS) {
1622 			errstr = "Cannot find \"bus-range\" property";
1623 			cmn_err(CE_WARN,
1624 			    "PCIE init err info failed BDF 0x%x:%s\n",
1625 			    bus_p->bus_bdf, errstr);
1626 		}
1627 
1628 		/* get secondary bus number */
1629 		rcdip = pcie_get_rc_dip(dip);
1630 		ASSERT(rcdip != NULL);
1631 
1632 		bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip,
1633 		    bus_p->bus_bdf, PCI_BCNF_SECBUS);
1634 
1635 		/* Get "ranges" property */
1636 		if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1637 		    "ranges", (caddr_t)&bus_p->bus_addr_ranges,
1638 		    &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS)
1639 			bus_p->bus_addr_entries = 0;
1640 		bus_p->bus_addr_entries /= sizeof (ppb_ranges_t);
1641 	}
1642 
1643 	/* save "assigned-addresses" property array, ignore failues */
1644 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1645 	    "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr,
1646 	    &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS)
1647 		bus_p->bus_assigned_entries /= sizeof (pci_regspec_t);
1648 	else
1649 		bus_p->bus_assigned_entries = 0;
1650 
1651 	pcie_init_pfd(dip);
1652 
1653 	pcie_init_plat(dip);
1654 
1655 final_done:
1656 
1657 	PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
1658 	    ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf,
1659 	    bus_p->bus_bdg_secbus);
1660 #ifdef DEBUG
1661 	if (bus_p != NULL) {
1662 		pcie_print_bus(bus_p);
1663 	}
1664 #endif
1665 
1666 	return (bus_p);
1667 }
1668 
1669 /*
1670  * Invoked before destroying devinfo node, mostly during hotplug
1671  * operation to free pcie_bus_t data structure
1672  */
1673 /* ARGSUSED */
1674 void
1675 pcie_fini_bus(dev_info_t *dip, uint8_t flags)
1676 {
1677 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
1678 	ASSERT(bus_p);
1679 
1680 	if (flags & PCIE_BUS_INITIAL) {
1681 		pcie_fini_plat(dip);
1682 		pcie_fini_pfd(dip);
1683 
1684 		kmem_free(bus_p->bus_assigned_addr,
1685 		    (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries));
1686 		kmem_free(bus_p->bus_addr_ranges,
1687 		    (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries));
1688 		/* zero out the fields that have been destroyed */
1689 		bus_p->bus_assigned_addr = NULL;
1690 		bus_p->bus_addr_ranges = NULL;
1691 		bus_p->bus_assigned_entries = 0;
1692 		bus_p->bus_addr_entries = 0;
1693 	}
1694 
1695 	if (flags & PCIE_BUS_FINAL) {
1696 		if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
1697 			(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1698 			    "hotplug-capable");
1699 		}
1700 
1701 		ndi_set_bus_private(dip, B_TRUE, 0, NULL);
1702 		kmem_free(bus_p, sizeof (pcie_bus_t));
1703 	}
1704 }
1705 
1706 int
1707 pcie_postattach_child(dev_info_t *cdip)
1708 {
1709 	pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
1710 
1711 	if (!bus_p)
1712 		return (DDI_FAILURE);
1713 
1714 	return (pcie_enable_ce(cdip));
1715 }
1716 
1717 /*
1718  * PCI-Express child device de-initialization.
1719  * This function disables generic pci-express interrupts and error
1720  * handling.
1721  */
1722 void
1723 pcie_uninitchild(dev_info_t *cdip)
1724 {
1725 	pcie_disable_errors(cdip);
1726 	pcie_fini_cfghdl(cdip);
1727 	pcie_fini_dom(cdip);
1728 }
1729 
1730 /*
1731  * find the root complex dip
1732  */
1733 dev_info_t *
1734 pcie_get_rc_dip(dev_info_t *dip)
1735 {
1736 	dev_info_t *rcdip;
1737 	pcie_bus_t *rc_bus_p;
1738 
1739 	for (rcdip = ddi_get_parent(dip); rcdip;
1740 	    rcdip = ddi_get_parent(rcdip)) {
1741 		rc_bus_p = PCIE_DIP2BUS(rcdip);
1742 		if (rc_bus_p && PCIE_IS_RC(rc_bus_p))
1743 			break;
1744 	}
1745 
1746 	return (rcdip);
1747 }
1748 
1749 boolean_t
1750 pcie_is_pci_device(dev_info_t *dip)
1751 {
1752 	dev_info_t	*pdip;
1753 	char		*device_type;
1754 
1755 	pdip = ddi_get_parent(dip);
1756 	if (pdip == NULL)
1757 		return (B_FALSE);
1758 
1759 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
1760 	    "device_type", &device_type) != DDI_PROP_SUCCESS)
1761 		return (B_FALSE);
1762 
1763 	if (strcmp(device_type, "pciex") != 0 &&
1764 	    strcmp(device_type, "pci") != 0) {
1765 		ddi_prop_free(device_type);
1766 		return (B_FALSE);
1767 	}
1768 
1769 	ddi_prop_free(device_type);
1770 	return (B_TRUE);
1771 }
1772 
1773 typedef struct {
1774 	boolean_t	init;
1775 	uint8_t		flags;
1776 } pcie_bus_arg_t;
1777 
1778 /*ARGSUSED*/
1779 static int
1780 pcie_fab_do_init_fini(dev_info_t *dip, void *arg)
1781 {
1782 	pcie_req_id_t	bdf;
1783 	pcie_bus_arg_t	*bus_arg = (pcie_bus_arg_t *)arg;
1784 
1785 	if (!pcie_is_pci_device(dip))
1786 		goto out;
1787 
1788 	if (bus_arg->init) {
1789 		if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS)
1790 			goto out;
1791 
1792 		(void) pcie_init_bus(dip, bdf, bus_arg->flags);
1793 	} else {
1794 		(void) pcie_fini_bus(dip, bus_arg->flags);
1795 	}
1796 
1797 	return (DDI_WALK_CONTINUE);
1798 
1799 out:
1800 	return (DDI_WALK_PRUNECHILD);
1801 }
1802 
1803 void
1804 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags)
1805 {
1806 	int		circular_count;
1807 	dev_info_t	*dip = ddi_get_child(rcdip);
1808 	pcie_bus_arg_t	arg;
1809 
1810 	arg.init = B_TRUE;
1811 	arg.flags = flags;
1812 
1813 	ndi_devi_enter(rcdip, &circular_count);
1814 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1815 	ndi_devi_exit(rcdip, circular_count);
1816 }
1817 
1818 void
1819 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags)
1820 {
1821 	int		circular_count;
1822 	dev_info_t	*dip = ddi_get_child(rcdip);
1823 	pcie_bus_arg_t	arg;
1824 
1825 	arg.init = B_FALSE;
1826 	arg.flags = flags;
1827 
1828 	ndi_devi_enter(rcdip, &circular_count);
1829 	ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1830 	ndi_devi_exit(rcdip, circular_count);
1831 }
1832 
1833 void
1834 pcie_enable_errors(dev_info_t *dip)
1835 {
1836 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1837 	uint16_t	reg16, tmp16;
1838 	uint32_t	reg32, tmp32;
1839 
1840 	ASSERT(bus_p);
1841 
1842 	/*
1843 	 * Clear any pending errors
1844 	 */
1845 	pcie_clear_errors(dip);
1846 
1847 	if (!PCIE_IS_PCIE(bus_p))
1848 		return;
1849 
1850 	/*
1851 	 * Enable Baseline Error Handling but leave CE reporting off (poweron
1852 	 * default).
1853 	 */
1854 	if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) !=
1855 	    PCI_CAP_EINVAL16) {
1856 		tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1857 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1858 		    (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1859 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1860 		    (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN));
1861 
1862 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
1863 		PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
1864 	}
1865 
1866 	/* Enable Root Port Baseline Error Receiving */
1867 	if (PCIE_IS_ROOT(bus_p) &&
1868 	    (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) !=
1869 	    PCI_CAP_EINVAL16) {
1870 
1871 		tmp16 = pcie_serr_disable_flag ?
1872 		    (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) :
1873 		    pcie_root_ctrl_default;
1874 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16);
1875 		PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL,
1876 		    reg16);
1877 	}
1878 
1879 	/*
1880 	 * Enable PCI-Express Advanced Error Handling if Exists
1881 	 */
1882 	if (!PCIE_HAS_AER(bus_p))
1883 		return;
1884 
1885 	/* Set Uncorrectable Severity */
1886 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) !=
1887 	    PCI_CAP_EINVAL32) {
1888 		tmp32 = pcie_aer_uce_severity;
1889 
1890 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32);
1891 		PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV,
1892 		    reg32);
1893 	}
1894 
1895 	/* Enable Uncorrectable errors */
1896 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) !=
1897 	    PCI_CAP_EINVAL32) {
1898 		tmp32 = pcie_aer_uce_mask;
1899 
1900 		PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32);
1901 		PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK,
1902 		    reg32);
1903 	}
1904 
1905 	/* Enable ECRC generation and checking */
1906 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1907 	    PCI_CAP_EINVAL32) {
1908 		tmp32 = reg32 | pcie_ecrc_value;
1909 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32);
1910 		PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32);
1911 	}
1912 
1913 	/* Enable Secondary Uncorrectable errors if this is a bridge */
1914 	if (!PCIE_IS_PCIE_BDG(bus_p))
1915 		goto root;
1916 
1917 	/* Set Uncorrectable Severity */
1918 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) !=
1919 	    PCI_CAP_EINVAL32) {
1920 		tmp32 = pcie_aer_suce_severity;
1921 
1922 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32);
1923 		PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV,
1924 		    reg32);
1925 	}
1926 
1927 	if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) !=
1928 	    PCI_CAP_EINVAL32) {
1929 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask);
1930 		PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32,
1931 		    PCIE_AER_SUCE_MASK, reg32);
1932 	}
1933 
1934 root:
1935 	/*
1936 	 * Enable Root Control this is a Root device
1937 	 */
1938 	if (!PCIE_IS_ROOT(bus_p))
1939 		return;
1940 
1941 	if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1942 	    PCI_CAP_EINVAL16) {
1943 		PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD,
1944 		    pcie_root_error_cmd_default);
1945 		PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16,
1946 		    PCIE_AER_RE_CMD, reg16);
1947 	}
1948 }
1949 
1950 /*
1951  * This function is used for enabling CE reporting and setting the AER CE mask.
1952  * When called from outside the pcie module it should always be preceded by
1953  * a call to pcie_enable_errors.
1954  */
1955 int
1956 pcie_enable_ce(dev_info_t *dip)
1957 {
1958 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1959 	uint16_t	device_sts, device_ctl;
1960 	uint32_t	tmp_pcie_aer_ce_mask;
1961 
1962 	if (!PCIE_IS_PCIE(bus_p))
1963 		return (DDI_SUCCESS);
1964 
1965 	/*
1966 	 * The "pcie_ce_mask" property is used to control both the CE reporting
1967 	 * enable field in the device control register and the AER CE mask. We
1968 	 * leave CE reporting disabled if pcie_ce_mask is set to -1.
1969 	 */
1970 
1971 	tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1972 	    DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask);
1973 
1974 	if (tmp_pcie_aer_ce_mask == (uint32_t)-1) {
1975 		/*
1976 		 * Nothing to do since CE reporting has already been disabled.
1977 		 */
1978 		return (DDI_SUCCESS);
1979 	}
1980 
1981 	if (PCIE_HAS_AER(bus_p)) {
1982 		/* Enable AER CE */
1983 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask);
1984 		PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK,
1985 		    0);
1986 
1987 		/* Clear any pending AER CE errors */
1988 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1);
1989 	}
1990 
1991 	/* clear any pending CE errors */
1992 	if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) !=
1993 	    PCI_CAP_EINVAL16)
1994 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS,
1995 		    device_sts & (~PCIE_DEVSTS_CE_DETECTED));
1996 
1997 	/* Enable CE reporting */
1998 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1999 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL,
2000 	    (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default);
2001 	PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl);
2002 
2003 	return (DDI_SUCCESS);
2004 }
2005 
2006 /* ARGSUSED */
2007 void
2008 pcie_disable_errors(dev_info_t *dip)
2009 {
2010 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
2011 	uint16_t	device_ctl;
2012 	uint32_t	aer_reg;
2013 
2014 	if (!PCIE_IS_PCIE(bus_p))
2015 		return;
2016 
2017 	/*
2018 	 * Disable PCI-Express Baseline Error Handling
2019 	 */
2020 	device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
2021 	device_ctl &= ~PCIE_DEVCTL_ERR_MASK;
2022 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl);
2023 
2024 	/*
2025 	 * Disable PCI-Express Advanced Error Handling if Exists
2026 	 */
2027 	if (!PCIE_HAS_AER(bus_p))
2028 		goto root;
2029 
2030 	/* Disable Uncorrectable errors */
2031 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS);
2032 
2033 	/* Disable Correctable errors */
2034 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS);
2035 
2036 	/* Disable ECRC generation and checking */
2037 	if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
2038 	    PCI_CAP_EINVAL32) {
2039 		aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
2040 		    PCIE_AER_CTL_ECRC_CHECK_ENA);
2041 
2042 		PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg);
2043 	}
2044 	/*
2045 	 * Disable Secondary Uncorrectable errors if this is a bridge
2046 	 */
2047 	if (!PCIE_IS_PCIE_BDG(bus_p))
2048 		goto root;
2049 
2050 	PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS);
2051 
2052 root:
2053 	/*
2054 	 * disable Root Control this is a Root device
2055 	 */
2056 	if (!PCIE_IS_ROOT(bus_p))
2057 		return;
2058 
2059 	if (!pcie_serr_disable_flag) {
2060 		device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL);
2061 		device_ctl &= ~PCIE_ROOT_SYS_ERR;
2062 		PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl);
2063 	}
2064 
2065 	if (!PCIE_HAS_AER(bus_p))
2066 		return;
2067 
2068 	if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
2069 	    PCI_CAP_EINVAL16) {
2070 		device_ctl &= ~pcie_root_error_cmd_default;
2071 		PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl);
2072 	}
2073 }
2074 
2075 /*
2076  * Extract bdf from "reg" property.
2077  */
2078 int
2079 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf)
2080 {
2081 	pci_regspec_t	*regspec;
2082 	int		reglen;
2083 
2084 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2085 	    "reg", (int **)&regspec, (uint_t *)&reglen) != DDI_SUCCESS)
2086 		return (DDI_FAILURE);
2087 
2088 	if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
2089 		ddi_prop_free(regspec);
2090 		return (DDI_FAILURE);
2091 	}
2092 
2093 	/* Get phys_hi from first element.  All have same bdf. */
2094 	*bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8;
2095 
2096 	ddi_prop_free(regspec);
2097 	return (DDI_SUCCESS);
2098 }
2099 
2100 dev_info_t *
2101 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
2102 {
2103 	dev_info_t *cdip = rdip;
2104 
2105 	for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
2106 		;
2107 
2108 	return (cdip);
2109 }
2110 
2111 uint32_t
2112 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip)
2113 {
2114 	dev_info_t *cdip;
2115 
2116 	/*
2117 	 * As part of the probing, the PCI fcode interpreter may setup a DMA
2118 	 * request if a given card has a fcode on it using dip and rdip of the
2119 	 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this
2120 	 * case, return a invalid value for the bdf since we cannot get to the
2121 	 * bdf value of the actual device which will be initiating this DMA.
2122 	 */
2123 	if (rdip == dip)
2124 		return (PCIE_INVALID_BDF);
2125 
2126 	cdip = pcie_get_my_childs_dip(dip, rdip);
2127 
2128 	/*
2129 	 * For a given rdip, return the bdf value of dip's (px or pcieb)
2130 	 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
2131 	 *
2132 	 * XXX - For now, return a invalid bdf value for all PCI and PCI-X
2133 	 * devices since this needs more work.
2134 	 */
2135 	return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
2136 	    PCIE_INVALID_BDF : PCI_GET_BDF(cdip));
2137 }
2138 
2139 uint32_t
2140 pcie_get_aer_uce_mask()
2141 {
2142 	return (pcie_aer_uce_mask);
2143 }
2144 uint32_t
2145 pcie_get_aer_ce_mask()
2146 {
2147 	return (pcie_aer_ce_mask);
2148 }
2149 uint32_t
2150 pcie_get_aer_suce_mask()
2151 {
2152 	return (pcie_aer_suce_mask);
2153 }
2154 uint32_t
2155 pcie_get_serr_mask()
2156 {
2157 	return (pcie_serr_disable_flag);
2158 }
2159 
2160 void
2161 pcie_set_aer_uce_mask(uint32_t mask)
2162 {
2163 	pcie_aer_uce_mask = mask;
2164 	if (mask & PCIE_AER_UCE_UR)
2165 		pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN;
2166 	else
2167 		pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN;
2168 
2169 	if (mask & PCIE_AER_UCE_ECRC)
2170 		pcie_ecrc_value = 0;
2171 }
2172 
2173 void
2174 pcie_set_aer_ce_mask(uint32_t mask)
2175 {
2176 	pcie_aer_ce_mask = mask;
2177 }
2178 void
2179 pcie_set_aer_suce_mask(uint32_t mask)
2180 {
2181 	pcie_aer_suce_mask = mask;
2182 }
2183 void
2184 pcie_set_serr_mask(uint32_t mask)
2185 {
2186 	pcie_serr_disable_flag = mask;
2187 }
2188 
2189 /*
2190  * Is the rdip a child of dip.	Used for checking certain CTLOPS from bubbling
2191  * up erronously.  Ex.	ISA ctlops to a PCI-PCI Bridge.
2192  */
2193 boolean_t
2194 pcie_is_child(dev_info_t *dip, dev_info_t *rdip)
2195 {
2196 	dev_info_t	*cdip = ddi_get_child(dip);
2197 	for (; cdip; cdip = ddi_get_next_sibling(cdip))
2198 		if (cdip == rdip)
2199 			break;
2200 	return (cdip != NULL);
2201 }
2202 
2203 boolean_t
2204 pcie_is_link_disabled(dev_info_t *dip)
2205 {
2206 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2207 
2208 	if (PCIE_IS_PCIE(bus_p)) {
2209 		if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) &
2210 		    PCIE_LINKCTL_LINK_DISABLE)
2211 			return (B_TRUE);
2212 	}
2213 	return (B_FALSE);
2214 }
2215 
2216 /*
2217  * Initialize the MPS for a root port.
2218  *
2219  * dip - dip of root port device.
2220  */
2221 void
2222 pcie_init_root_port_mps(dev_info_t *dip)
2223 {
2224 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
2225 	int rp_cap, max_supported = pcie_max_mps;
2226 
2227 	(void) pcie_get_fabric_mps(ddi_get_parent(dip),
2228 	    ddi_get_child(dip), &max_supported);
2229 
2230 	rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, 0,
2231 	    bus_p->bus_pcie_off, PCIE_DEVCAP) &
2232 	    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2233 
2234 	if (rp_cap < max_supported)
2235 		max_supported = rp_cap;
2236 
2237 	bus_p->bus_mps = max_supported;
2238 	(void) pcie_initchild_mps(dip);
2239 }
2240 
2241 /*
2242  * Initialize the Maximum Payload Size of a device.
2243  *
2244  * cdip - dip of device.
2245  *
2246  * returns - DDI_SUCCESS or DDI_FAILURE
2247  */
2248 int
2249 pcie_initchild_mps(dev_info_t *cdip)
2250 {
2251 	pcie_bus_t	*bus_p;
2252 	dev_info_t	*pdip = ddi_get_parent(cdip);
2253 	uint8_t		dev_type;
2254 
2255 	bus_p = PCIE_DIP2BUS(cdip);
2256 	if (bus_p == NULL) {
2257 		PCIE_DBG("%s: BUS not found.\n",
2258 		    ddi_driver_name(cdip));
2259 		return (DDI_FAILURE);
2260 	}
2261 
2262 	dev_type = bus_p->bus_dev_type;
2263 
2264 	/*
2265 	 * For ARI Devices, only function zero's MPS needs to be set.
2266 	 */
2267 	if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
2268 	    (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) {
2269 		pcie_req_id_t child_bdf;
2270 
2271 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2272 			return (DDI_FAILURE);
2273 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
2274 			return (DDI_SUCCESS);
2275 	}
2276 
2277 	if (PCIE_IS_PCIE(bus_p)) {
2278 		int suggested_mrrs, fabric_mps;
2279 		uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl;
2280 
2281 		dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
2282 		if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p :
2283 		    PCIE_DIP2BUS(pdip))->bus_mps) < 0) {
2284 			dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2285 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
2286 			    (pcie_devctl_default &
2287 			    (PCIE_DEVCTL_MAX_READ_REQ_MASK |
2288 			    PCIE_DEVCTL_MAX_PAYLOAD_MASK));
2289 
2290 			PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2291 			return (DDI_SUCCESS);
2292 		}
2293 
2294 		device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
2295 		    PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2296 
2297 		device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >>
2298 		    PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
2299 
2300 		if (device_mps_cap < fabric_mps)
2301 			device_mrrs = device_mps = device_mps_cap;
2302 		else
2303 			device_mps = (uint16_t)fabric_mps;
2304 
2305 		suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
2306 		    cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs);
2307 
2308 		if ((device_mps == fabric_mps) ||
2309 		    (suggested_mrrs < device_mrrs))
2310 			device_mrrs = (uint16_t)suggested_mrrs;
2311 
2312 		/*
2313 		 * Replace MPS and MRRS settings.
2314 		 */
2315 		dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
2316 		    PCIE_DEVCTL_MAX_PAYLOAD_MASK);
2317 
2318 		dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) |
2319 		    device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT);
2320 
2321 		PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
2322 
2323 		bus_p->bus_mps = device_mps;
2324 	}
2325 
2326 	return (DDI_SUCCESS);
2327 }
2328 
2329 /*
2330  * Scans a device tree/branch for a maximum payload size capabilities.
2331  *
2332  * rc_dip - dip of Root Complex.
2333  * dip - dip of device where scan will begin.
2334  * max_supported (IN) - maximum allowable MPS.
2335  * max_supported (OUT) - maximum payload size capability of fabric.
2336  */
2337 void
2338 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2339 {
2340 	if (dip == NULL)
2341 		return;
2342 
2343 	/*
2344 	 * Perform a fabric scan to obtain Maximum Payload Capabilities
2345 	 */
2346 	(void) pcie_scan_mps(rc_dip, dip, max_supported);
2347 
2348 	PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported);
2349 }
2350 
2351 /*
2352  * Scans fabric and determines Maximum Payload Size based on
2353  * highest common denominator alogorithm
2354  */
2355 static void
2356 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
2357 {
2358 	int circular_count;
2359 	pcie_max_supported_t max_pay_load_supported;
2360 
2361 	max_pay_load_supported.dip = rc_dip;
2362 	max_pay_load_supported.highest_common_mps = *max_supported;
2363 
2364 	ndi_devi_enter(ddi_get_parent(dip), &circular_count);
2365 	ddi_walk_devs(dip, pcie_get_max_supported,
2366 	    (void *)&max_pay_load_supported);
2367 	ndi_devi_exit(ddi_get_parent(dip), circular_count);
2368 
2369 	*max_supported = max_pay_load_supported.highest_common_mps;
2370 }
2371 
2372 /*
2373  * Called as part of the Maximum Payload Size scan.
2374  */
2375 static int
2376 pcie_get_max_supported(dev_info_t *dip, void *arg)
2377 {
2378 	uint32_t max_supported;
2379 	uint16_t cap_ptr;
2380 	pcie_max_supported_t *current = (pcie_max_supported_t *)arg;
2381 	pci_regspec_t *reg;
2382 	int rlen;
2383 	caddr_t virt;
2384 	ddi_acc_handle_t config_handle;
2385 
2386 	if (ddi_get_child(current->dip) == NULL) {
2387 		goto fail1;
2388 	}
2389 
2390 	if (pcie_dev(dip) == DDI_FAILURE) {
2391 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2392 		    "Not a PCIe dev\n", ddi_driver_name(dip));
2393 		goto fail1;
2394 	}
2395 
2396 	/*
2397 	 * If the suggested-mrrs property exists, then don't include this
2398 	 * device in the MPS capabilities scan.
2399 	 */
2400 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2401 	    "suggested-mrrs") != 0)
2402 		goto fail1;
2403 
2404 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
2405 	    (caddr_t)&reg, &rlen) != DDI_PROP_SUCCESS) {
2406 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  "
2407 		    "Can not read reg\n", ddi_driver_name(dip));
2408 		goto fail1;
2409 	}
2410 
2411 	if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt,
2412 	    &config_handle) != DDI_SUCCESS) {
2413 		PCIE_DBG("MPS: pcie_get_max_supported: %s:  pcie_map_phys "
2414 		    "failed\n", ddi_driver_name(dip));
2415 		goto fail2;
2416 	}
2417 
2418 	if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) ==
2419 	    DDI_FAILURE) {
2420 		goto fail3;
2421 	}
2422 
2423 	max_supported = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2424 	    PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK;
2425 
2426 	PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip),
2427 	    max_supported);
2428 
2429 	if (max_supported < current->highest_common_mps)
2430 		current->highest_common_mps = max_supported;
2431 
2432 fail3:
2433 	pcie_unmap_phys(&config_handle, reg);
2434 fail2:
2435 	kmem_free(reg, rlen);
2436 fail1:
2437 	return (DDI_WALK_CONTINUE);
2438 }
2439 
2440 /*
2441  * Determines if there are any root ports attached to a root complex.
2442  *
2443  * dip - dip of root complex
2444  *
2445  * Returns - DDI_SUCCESS if there is at least one root port otherwise
2446  *	     DDI_FAILURE.
2447  */
2448 int
2449 pcie_root_port(dev_info_t *dip)
2450 {
2451 	int port_type;
2452 	uint16_t cap_ptr;
2453 	ddi_acc_handle_t config_handle;
2454 	dev_info_t *cdip = ddi_get_child(dip);
2455 
2456 	/*
2457 	 * Determine if any of the children of the passed in dip
2458 	 * are root ports.
2459 	 */
2460 	for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
2461 
2462 		if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS)
2463 			continue;
2464 
2465 		if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E,
2466 		    &cap_ptr)) == DDI_FAILURE) {
2467 			pci_config_teardown(&config_handle);
2468 			continue;
2469 		}
2470 
2471 		port_type = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2472 		    PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
2473 
2474 		pci_config_teardown(&config_handle);
2475 
2476 		if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
2477 			return (DDI_SUCCESS);
2478 	}
2479 
2480 	/* No root ports were found */
2481 
2482 	return (DDI_FAILURE);
2483 }
2484 
2485 /*
2486  * Function that determines if a device a PCIe device.
2487  *
2488  * dip - dip of device.
2489  *
2490  * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
2491  */
2492 int
2493 pcie_dev(dev_info_t *dip)
2494 {
2495 	/* get parent device's device_type property */
2496 	char *device_type;
2497 	int rc = DDI_FAILURE;
2498 	dev_info_t *pdip = ddi_get_parent(dip);
2499 
2500 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
2501 	    DDI_PROP_DONTPASS, "device_type", &device_type)
2502 	    != DDI_PROP_SUCCESS) {
2503 		return (DDI_FAILURE);
2504 	}
2505 
2506 	if (strcmp(device_type, "pciex") == 0)
2507 		rc = DDI_SUCCESS;
2508 	else
2509 		rc = DDI_FAILURE;
2510 
2511 	ddi_prop_free(device_type);
2512 	return (rc);
2513 }
2514 
2515 /*
2516  * Function to map in a device's memory space.
2517  */
2518 static int
2519 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
2520     caddr_t *addrp, ddi_acc_handle_t *handlep)
2521 {
2522 	ddi_map_req_t mr;
2523 	ddi_acc_hdl_t *hp;
2524 	int result;
2525 	ddi_device_acc_attr_t attr;
2526 
2527 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2528 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2529 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2530 	attr.devacc_attr_access = DDI_CAUTIOUS_ACC;
2531 
2532 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
2533 	hp = impl_acc_hdl_get(*handlep);
2534 	hp->ah_vers = VERS_ACCHDL;
2535 	hp->ah_dip = dip;
2536 	hp->ah_rnumber = 0;
2537 	hp->ah_offset = 0;
2538 	hp->ah_len = 0;
2539 	hp->ah_acc = attr;
2540 
2541 	mr.map_op = DDI_MO_MAP_LOCKED;
2542 	mr.map_type = DDI_MT_REGSPEC;
2543 	mr.map_obj.rp = (struct regspec *)phys_spec;
2544 	mr.map_prot = PROT_READ | PROT_WRITE;
2545 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2546 	mr.map_handlep = hp;
2547 	mr.map_vers = DDI_MAP_VERSION;
2548 
2549 	result = ddi_map(dip, &mr, 0, 0, addrp);
2550 
2551 	if (result != DDI_SUCCESS) {
2552 		impl_acc_hdl_free(*handlep);
2553 		*handlep = (ddi_acc_handle_t)NULL;
2554 	} else {
2555 		hp->ah_addr = *addrp;
2556 	}
2557 
2558 	return (result);
2559 }
2560 
2561 /*
2562  * Map out memory that was mapped in with pcie_map_phys();
2563  */
2564 static void
2565 pcie_unmap_phys(ddi_acc_handle_t *handlep,  pci_regspec_t *ph)
2566 {
2567 	ddi_map_req_t mr;
2568 	ddi_acc_hdl_t *hp;
2569 
2570 	hp = impl_acc_hdl_get(*handlep);
2571 	ASSERT(hp);
2572 
2573 	mr.map_op = DDI_MO_UNMAP;
2574 	mr.map_type = DDI_MT_REGSPEC;
2575 	mr.map_obj.rp = (struct regspec *)ph;
2576 	mr.map_prot = PROT_READ | PROT_WRITE;
2577 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
2578 	mr.map_handlep = hp;
2579 	mr.map_vers = DDI_MAP_VERSION;
2580 
2581 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
2582 	    hp->ah_len, &hp->ah_addr);
2583 
2584 	impl_acc_hdl_free(*handlep);
2585 	*handlep = (ddi_acc_handle_t)NULL;
2586 }
2587 
2588 void
2589 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val)
2590 {
2591 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2592 	bus_p->bus_pfd->pe_rber_fatal = val;
2593 }
2594 
2595 /*
2596  * Return parent Root Port's pe_rber_fatal value.
2597  */
2598 boolean_t
2599 pcie_get_rber_fatal(dev_info_t *dip)
2600 {
2601 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2602 	pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip);
2603 	return (rp_bus_p->bus_pfd->pe_rber_fatal);
2604 }
2605 
2606 int
2607 pcie_ari_supported(dev_info_t *dip)
2608 {
2609 	uint32_t devcap2;
2610 	uint16_t pciecap;
2611 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2612 	uint8_t dev_type;
2613 
2614 	PCIE_DBG("pcie_ari_supported: dip=%p\n", dip);
2615 
2616 	if (bus_p == NULL)
2617 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2618 
2619 	dev_type = bus_p->bus_dev_type;
2620 
2621 	if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
2622 	    (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT))
2623 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2624 
2625 	if (pcie_disable_ari) {
2626 		PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip);
2627 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2628 	}
2629 
2630 	pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
2631 
2632 	if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) {
2633 		PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip);
2634 		return (PCIE_ARI_FORW_NOT_SUPPORTED);
2635 	}
2636 
2637 	devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2);
2638 
2639 	PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
2640 	    dip, devcap2);
2641 
2642 	if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
2643 		PCIE_DBG("pcie_ari_supported: "
2644 		    "dip=%p: ARI Forwarding is supported\n", dip);
2645 		return (PCIE_ARI_FORW_SUPPORTED);
2646 	}
2647 	return (PCIE_ARI_FORW_NOT_SUPPORTED);
2648 }
2649 
2650 int
2651 pcie_ari_enable(dev_info_t *dip)
2652 {
2653 	uint16_t devctl2;
2654 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2655 
2656 	PCIE_DBG("pcie_ari_enable: dip=%p\n", dip);
2657 
2658 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2659 		return (DDI_FAILURE);
2660 
2661 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2662 	devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN;
2663 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2664 
2665 	PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
2666 	    dip, devctl2);
2667 
2668 	return (DDI_SUCCESS);
2669 }
2670 
2671 int
2672 pcie_ari_disable(dev_info_t *dip)
2673 {
2674 	uint16_t devctl2;
2675 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2676 
2677 	PCIE_DBG("pcie_ari_disable: dip=%p\n", dip);
2678 
2679 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2680 		return (DDI_FAILURE);
2681 
2682 	devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2683 	devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN;
2684 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2685 
2686 	PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
2687 	    dip, devctl2);
2688 
2689 	return (DDI_SUCCESS);
2690 }
2691 
2692 int
2693 pcie_ari_is_enabled(dev_info_t *dip)
2694 {
2695 	uint16_t devctl2;
2696 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2697 
2698 	PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip);
2699 
2700 	if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2701 		return (PCIE_ARI_FORW_DISABLED);
2702 
2703 	devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2);
2704 
2705 	PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
2706 	    dip, devctl2);
2707 
2708 	if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
2709 		PCIE_DBG("pcie_ari_is_enabled: "
2710 		    "dip=%p: ARI Forwarding is enabled\n", dip);
2711 		return (PCIE_ARI_FORW_ENABLED);
2712 	}
2713 
2714 	return (PCIE_ARI_FORW_DISABLED);
2715 }
2716 
2717 int
2718 pcie_ari_device(dev_info_t *dip)
2719 {
2720 	ddi_acc_handle_t handle;
2721 	uint16_t cap_ptr;
2722 
2723 	PCIE_DBG("pcie_ari_device: dip=%p\n", dip);
2724 
2725 	/*
2726 	 * XXX - This function may be called before the bus_p structure
2727 	 * has been populated.  This code can be changed to remove
2728 	 * pci_config_setup()/pci_config_teardown() when the RFE
2729 	 * to populate the bus_p structures early in boot is putback.
2730 	 */
2731 
2732 	/* First make sure it is a PCIe device */
2733 
2734 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2735 		return (PCIE_NOT_ARI_DEVICE);
2736 
2737 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
2738 	    != DDI_SUCCESS) {
2739 		pci_config_teardown(&handle);
2740 		return (PCIE_NOT_ARI_DEVICE);
2741 	}
2742 
2743 	/* Locate the ARI Capability */
2744 
2745 	if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI),
2746 	    &cap_ptr)) == DDI_FAILURE) {
2747 		pci_config_teardown(&handle);
2748 		return (PCIE_NOT_ARI_DEVICE);
2749 	}
2750 
2751 	/* ARI Capability was found so it must be a ARI device */
2752 	PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip);
2753 
2754 	pci_config_teardown(&handle);
2755 	return (PCIE_ARI_DEVICE);
2756 }
2757 
2758 int
2759 pcie_ari_get_next_function(dev_info_t *dip, int *func)
2760 {
2761 	uint32_t val;
2762 	uint16_t cap_ptr, next_function;
2763 	ddi_acc_handle_t handle;
2764 
2765 	/*
2766 	 * XXX - This function may be called before the bus_p structure
2767 	 * has been populated.  This code can be changed to remove
2768 	 * pci_config_setup()/pci_config_teardown() when the RFE
2769 	 * to populate the bus_p structures early in boot is putback.
2770 	 */
2771 
2772 	if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2773 		return (DDI_FAILURE);
2774 
2775 	if ((PCI_CAP_LOCATE(handle,
2776 	    PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) {
2777 		pci_config_teardown(&handle);
2778 		return (DDI_FAILURE);
2779 	}
2780 
2781 	val = PCI_CAP_GET32(handle, 0, cap_ptr, PCIE_ARI_CAP);
2782 
2783 	next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) &
2784 	    PCIE_ARI_CAP_NEXT_FUNC_MASK;
2785 
2786 	pci_config_teardown(&handle);
2787 
2788 	*func = next_function;
2789 
2790 	return (DDI_SUCCESS);
2791 }
2792 
2793 dev_info_t *
2794 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function)
2795 {
2796 	pcie_req_id_t child_bdf;
2797 	dev_info_t *cdip;
2798 
2799 	for (cdip = ddi_get_child(dip); cdip;
2800 	    cdip = ddi_get_next_sibling(cdip)) {
2801 
2802 		if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2803 			return (NULL);
2804 
2805 		if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function)
2806 			return (cdip);
2807 	}
2808 	return (NULL);
2809 }
2810 
2811 #ifdef	DEBUG
2812 
2813 static void
2814 pcie_print_bus(pcie_bus_t *bus_p)
2815 {
2816 	pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip);
2817 	pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags);
2818 
2819 	pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf);
2820 	pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id);
2821 	pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id);
2822 	pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type);
2823 	pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type);
2824 	pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus);
2825 	pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off);
2826 	pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off);
2827 	pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off);
2828 	pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver);
2829 }
2830 
2831 /*
2832  * For debugging purposes set pcie_dbg_print != 0 to see printf messages
2833  * during interrupt.
2834  *
2835  * When a proper solution is in place this code will disappear.
2836  * Potential solutions are:
2837  * o circular buffers
2838  * o taskq to print at lower pil
2839  */
2840 int pcie_dbg_print = 0;
2841 void
2842 pcie_dbg(char *fmt, ...)
2843 {
2844 	va_list ap;
2845 
2846 	if (!pcie_debug_flags) {
2847 		return;
2848 	}
2849 	va_start(ap, fmt);
2850 	if (servicing_interrupt()) {
2851 		if (pcie_dbg_print) {
2852 			prom_vprintf(fmt, ap);
2853 		}
2854 	} else {
2855 		prom_vprintf(fmt, ap);
2856 	}
2857 	va_end(ap);
2858 }
2859 #endif	/* DEBUG */
2860 
2861 #if defined(__i386) || defined(__amd64)
2862 static void
2863 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range,
2864     boolean_t *empty_mem_range)
2865 {
2866 	uint8_t	class, subclass;
2867 	uint_t	val;
2868 
2869 	class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
2870 	subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);
2871 
2872 	if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) {
2873 		val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) &
2874 		    PCI_BCNF_IO_MASK) << 8);
2875 		/*
2876 		 * Assuming that a zero based io_range[0] implies an
2877 		 * invalid I/O range.  Likewise for mem_range[0].
2878 		 */
2879 		if (val == 0)
2880 			*empty_io_range = B_TRUE;
2881 		val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) &
2882 		    PCI_BCNF_MEM_MASK) << 16);
2883 		if (val == 0)
2884 			*empty_mem_range = B_TRUE;
2885 	}
2886 }
2887 
2888 #endif /* defined(__i386) || defined(__amd64) */
2889 
2890 boolean_t
2891 pcie_link_bw_supported(dev_info_t *dip)
2892 {
2893 	uint32_t linkcap;
2894 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2895 
2896 	if (!PCIE_IS_PCIE(bus_p)) {
2897 		return (B_FALSE);
2898 	}
2899 
2900 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
2901 		return (B_FALSE);
2902 	}
2903 
2904 	linkcap = PCIE_CAP_GET(32, bus_p, PCIE_LINKCAP);
2905 	return ((linkcap & PCIE_LINKCAP_LINK_BW_NOTIFY_CAP) != 0);
2906 }
2907 
2908 int
2909 pcie_link_bw_enable(dev_info_t *dip)
2910 {
2911 	uint16_t linkctl;
2912 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2913 
2914 	if (pcie_disable_lbw != 0) {
2915 		return (DDI_FAILURE);
2916 	}
2917 
2918 	if (!pcie_link_bw_supported(dip)) {
2919 		return (DDI_FAILURE);
2920 	}
2921 
2922 	mutex_init(&bus_p->bus_lbw_mutex, NULL, MUTEX_DRIVER, NULL);
2923 	cv_init(&bus_p->bus_lbw_cv, NULL, CV_DRIVER, NULL);
2924 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
2925 	linkctl |= PCIE_LINKCTL_LINK_BW_INTR_EN;
2926 	linkctl |= PCIE_LINKCTL_LINK_AUTO_BW_INTR_EN;
2927 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, linkctl);
2928 
2929 	bus_p->bus_lbw_pbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2930 	bus_p->bus_lbw_cbuf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2931 	bus_p->bus_lbw_state |= PCIE_LBW_S_ENABLED;
2932 
2933 	return (DDI_SUCCESS);
2934 }
2935 
2936 int
2937 pcie_link_bw_disable(dev_info_t *dip)
2938 {
2939 	uint16_t linkctl;
2940 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2941 
2942 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_ENABLED) == 0) {
2943 		return (DDI_FAILURE);
2944 	}
2945 
2946 	mutex_enter(&bus_p->bus_lbw_mutex);
2947 	while ((bus_p->bus_lbw_state &
2948 	    (PCIE_LBW_S_DISPATCHED | PCIE_LBW_S_RUNNING)) != 0) {
2949 		cv_wait(&bus_p->bus_lbw_cv, &bus_p->bus_lbw_mutex);
2950 	}
2951 	mutex_exit(&bus_p->bus_lbw_mutex);
2952 
2953 	linkctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
2954 	linkctl &= ~PCIE_LINKCTL_LINK_BW_INTR_EN;
2955 	linkctl &= ~PCIE_LINKCTL_LINK_AUTO_BW_INTR_EN;
2956 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, linkctl);
2957 
2958 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_ENABLED;
2959 	kmem_free(bus_p->bus_lbw_pbuf, MAXPATHLEN);
2960 	kmem_free(bus_p->bus_lbw_cbuf, MAXPATHLEN);
2961 	bus_p->bus_lbw_pbuf = NULL;
2962 	bus_p->bus_lbw_cbuf = NULL;
2963 
2964 	mutex_destroy(&bus_p->bus_lbw_mutex);
2965 	cv_destroy(&bus_p->bus_lbw_cv);
2966 
2967 	return (DDI_SUCCESS);
2968 }
2969 
2970 void
2971 pcie_link_bw_taskq(void *arg)
2972 {
2973 	dev_info_t *dip = arg;
2974 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2975 	dev_info_t *cdip;
2976 	boolean_t again;
2977 	sysevent_t *se;
2978 	sysevent_value_t se_val;
2979 	sysevent_id_t eid;
2980 	sysevent_attr_list_t *ev_attr_list;
2981 	int circular;
2982 
2983 top:
2984 	ndi_devi_enter(dip, &circular);
2985 	se = NULL;
2986 	ev_attr_list = NULL;
2987 	mutex_enter(&bus_p->bus_lbw_mutex);
2988 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_DISPATCHED;
2989 	bus_p->bus_lbw_state |= PCIE_LBW_S_RUNNING;
2990 	mutex_exit(&bus_p->bus_lbw_mutex);
2991 
2992 	/*
2993 	 * Update our own speeds as we've likely changed something.
2994 	 */
2995 	pcie_capture_speeds(dip);
2996 
2997 	/*
2998 	 * Walk our children. We only care about updating this on function 0
2999 	 * because the PCIe specification requires that these all be the same
3000 	 * otherwise.
3001 	 */
3002 	for (cdip = ddi_get_child(dip); cdip != NULL;
3003 	    cdip = ddi_get_next_sibling(cdip)) {
3004 		pcie_bus_t *cbus_p = PCIE_DIP2BUS(cdip);
3005 
3006 		if (cbus_p == NULL) {
3007 			continue;
3008 		}
3009 
3010 		if ((cbus_p->bus_bdf & PCIE_REQ_ID_FUNC_MASK) != 0) {
3011 			continue;
3012 		}
3013 
3014 		/*
3015 		 * It's possible that this can fire while a child is otherwise
3016 		 * only partially constructed. Therefore, if we don't have the
3017 		 * config handle, don't bother updating the child.
3018 		 */
3019 		if (cbus_p->bus_cfg_hdl == NULL) {
3020 			continue;
3021 		}
3022 
3023 		pcie_capture_speeds(cdip);
3024 		break;
3025 	}
3026 
3027 	se = sysevent_alloc(EC_PCIE, ESC_PCIE_LINK_STATE,
3028 	    ILLUMOS_KERN_PUB "pcie", SE_SLEEP);
3029 
3030 	(void) ddi_pathname(dip, bus_p->bus_lbw_pbuf);
3031 	se_val.value_type = SE_DATA_TYPE_STRING;
3032 	se_val.value.sv_string = bus_p->bus_lbw_pbuf;
3033 	if (sysevent_add_attr(&ev_attr_list, PCIE_EV_DETECTOR_PATH, &se_val,
3034 	    SE_SLEEP) != 0) {
3035 		ndi_devi_exit(dip, circular);
3036 		goto err;
3037 	}
3038 
3039 	if (cdip != NULL) {
3040 		(void) ddi_pathname(cdip, bus_p->bus_lbw_cbuf);
3041 
3042 		se_val.value_type = SE_DATA_TYPE_STRING;
3043 		se_val.value.sv_string = bus_p->bus_lbw_cbuf;
3044 
3045 		/*
3046 		 * If this fails, that's OK. We'd rather get the event off and
3047 		 * there's a chance that there may not be anything there for us.
3048 		 */
3049 		(void) sysevent_add_attr(&ev_attr_list, PCIE_EV_CHILD_PATH,
3050 		    &se_val, SE_SLEEP);
3051 	}
3052 
3053 	ndi_devi_exit(dip, circular);
3054 
3055 	/*
3056 	 * Before we generate and send down a sysevent, we need to tell the
3057 	 * system that parts of the devinfo cache need to be invalidated. While
3058 	 * the function below takes several args, it ignores them all. Because
3059 	 * this is a global invalidation, we don't bother trying to do much more
3060 	 * than requesting a global invalidation, lest we accidentally kick off
3061 	 * several in a row.
3062 	 */
3063 	ddi_prop_cache_invalidate(DDI_DEV_T_NONE, NULL, NULL, 0);
3064 
3065 	if (sysevent_attach_attributes(se, ev_attr_list) != 0) {
3066 		goto err;
3067 	}
3068 	ev_attr_list = NULL;
3069 
3070 	if (log_sysevent(se, SE_SLEEP, &eid) != 0) {
3071 		goto err;
3072 	}
3073 
3074 err:
3075 	sysevent_free_attr(ev_attr_list);
3076 	sysevent_free(se);
3077 
3078 	mutex_enter(&bus_p->bus_lbw_mutex);
3079 	bus_p->bus_lbw_state &= ~PCIE_LBW_S_RUNNING;
3080 	cv_broadcast(&bus_p->bus_lbw_cv);
3081 	again = (bus_p->bus_lbw_state & PCIE_LBW_S_DISPATCHED) != 0;
3082 	mutex_exit(&bus_p->bus_lbw_mutex);
3083 
3084 	if (again) {
3085 		goto top;
3086 	}
3087 }
3088 
3089 int
3090 pcie_link_bw_intr(dev_info_t *dip)
3091 {
3092 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3093 	uint16_t linksts;
3094 	uint16_t flags = PCIE_LINKSTS_LINK_BW_MGMT | PCIE_LINKSTS_AUTO_BW;
3095 
3096 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_ENABLED) == 0) {
3097 		return (DDI_INTR_UNCLAIMED);
3098 	}
3099 
3100 	linksts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3101 	if ((linksts & flags) == 0) {
3102 		return (DDI_INTR_UNCLAIMED);
3103 	}
3104 
3105 	/*
3106 	 * Check if we've already dispatched this event. If we have already
3107 	 * dispatched it, then there's nothing else to do, we coalesce multiple
3108 	 * events.
3109 	 */
3110 	mutex_enter(&bus_p->bus_lbw_mutex);
3111 	bus_p->bus_lbw_nevents++;
3112 	if ((bus_p->bus_lbw_state & PCIE_LBW_S_DISPATCHED) == 0) {
3113 		if ((bus_p->bus_lbw_state & PCIE_LBW_S_RUNNING) == 0) {
3114 			taskq_dispatch_ent(pcie_link_tq, pcie_link_bw_taskq,
3115 			    dip, 0, &bus_p->bus_lbw_ent);
3116 		}
3117 
3118 		bus_p->bus_lbw_state |= PCIE_LBW_S_DISPATCHED;
3119 	}
3120 	mutex_exit(&bus_p->bus_lbw_mutex);
3121 
3122 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKSTS, flags);
3123 	return (DDI_INTR_CLAIMED);
3124 }
3125 
3126 int
3127 pcie_link_set_target(dev_info_t *dip, pcie_link_speed_t speed)
3128 {
3129 	uint16_t ctl2, rval;
3130 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3131 
3132 	if (!PCIE_IS_PCIE(bus_p)) {
3133 		return (ENOTSUP);
3134 	}
3135 
3136 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
3137 		return (ENOTSUP);
3138 	}
3139 
3140 	switch (speed) {
3141 	case PCIE_LINK_SPEED_2_5:
3142 		rval = PCIE_LINKCTL2_TARGET_SPEED_2_5;
3143 		break;
3144 	case PCIE_LINK_SPEED_5:
3145 		rval = PCIE_LINKCTL2_TARGET_SPEED_5;
3146 		break;
3147 	case PCIE_LINK_SPEED_8:
3148 		rval = PCIE_LINKCTL2_TARGET_SPEED_8;
3149 		break;
3150 	case PCIE_LINK_SPEED_16:
3151 		rval = PCIE_LINKCTL2_TARGET_SPEED_16;
3152 		break;
3153 	default:
3154 		return (EINVAL);
3155 	}
3156 
3157 	mutex_enter(&bus_p->bus_speed_mutex);
3158 	bus_p->bus_target_speed = speed;
3159 	bus_p->bus_speed_flags |= PCIE_LINK_F_ADMIN_TARGET;
3160 
3161 	ctl2 = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL2);
3162 	ctl2 &= ~PCIE_LINKCTL2_TARGET_SPEED_MASK;
3163 	ctl2 |= rval;
3164 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL2, ctl2);
3165 	mutex_exit(&bus_p->bus_speed_mutex);
3166 
3167 	/*
3168 	 * Make sure our updates have been reflected in devinfo.
3169 	 */
3170 	pcie_capture_speeds(dip);
3171 
3172 	return (0);
3173 }
3174 
3175 int
3176 pcie_link_retrain(dev_info_t *dip)
3177 {
3178 	uint16_t ctl;
3179 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
3180 
3181 	if (!PCIE_IS_PCIE(bus_p)) {
3182 		return (ENOTSUP);
3183 	}
3184 
3185 	if (!PCIE_IS_RP(bus_p) && !PCIE_IS_SWD(bus_p)) {
3186 		return (ENOTSUP);
3187 	}
3188 
3189 	/*
3190 	 * The PCIe specification suggests that we make sure that the link isn't
3191 	 * in training before issuing this command in case there was a state
3192 	 * machine transition prior to when we got here. We wait and then go
3193 	 * ahead and issue the command anyways.
3194 	 */
3195 	for (uint32_t i = 0; i < pcie_link_retrain_count; i++) {
3196 		uint16_t sts;
3197 
3198 		sts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3199 		if ((sts & PCIE_LINKSTS_LINK_TRAINING) == 0)
3200 			break;
3201 		delay(drv_usectohz(pcie_link_retrain_delay_ms * 1000));
3202 	}
3203 
3204 	ctl = PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL);
3205 	ctl |= PCIE_LINKCTL_RETRAIN_LINK;
3206 	PCIE_CAP_PUT(16, bus_p, PCIE_LINKCTL, ctl);
3207 
3208 	/*
3209 	 * Wait again to see if it clears before returning to the user.
3210 	 */
3211 	for (uint32_t i = 0; i < pcie_link_retrain_count; i++) {
3212 		uint16_t sts;
3213 
3214 		sts = PCIE_CAP_GET(16, bus_p, PCIE_LINKSTS);
3215 		if ((sts & PCIE_LINKSTS_LINK_TRAINING) == 0)
3216 			break;
3217 		delay(drv_usectohz(pcie_link_retrain_delay_ms * 1000));
3218 	}
3219 
3220 	return (0);
3221 }
3222