1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <sys/sysmacros.h>
27 #include <sys/types.h>
28 #include <sys/kmem.h>
29 #include <sys/modctl.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/sunndi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/promif.h>
36 #include <sys/disp.h>
37 #include <sys/stat.h>
38 #include <sys/file.h>
39 #include <sys/pci_cap.h>
40 #include <sys/pci_impl.h>
41 #include <sys/pcie_impl.h>
42 #include <sys/hotplug/pci/pcie_hp.h>
43 #include <sys/hotplug/pci/pciehpc.h>
44 #include <sys/hotplug/pci/pcishpc.h>
45 #include <sys/hotplug/pci/pcicfg.h>
46 #include <sys/pci_cfgacc.h>
47
48 /* Local functions prototypes */
49 static void pcie_init_pfd(dev_info_t *);
50 static void pcie_fini_pfd(dev_info_t *);
51
52 #if defined(__i386) || defined(__amd64)
53 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *);
54 #endif /* defined(__i386) || defined(__amd64) */
55
56 #ifdef DEBUG
57 uint_t pcie_debug_flags = 0;
58 static void pcie_print_bus(pcie_bus_t *bus_p);
59 void pcie_dbg(char *fmt, ...);
60 #endif /* DEBUG */
61
62 /* Variable to control default PCI-Express config settings */
63 ushort_t pcie_command_default =
64 PCI_COMM_SERR_ENABLE |
65 PCI_COMM_WAIT_CYC_ENAB |
66 PCI_COMM_PARITY_DETECT |
67 PCI_COMM_ME |
68 PCI_COMM_MAE |
69 PCI_COMM_IO;
70
71 /* xxx_fw are bits that are controlled by FW and should not be modified */
72 ushort_t pcie_command_default_fw =
73 PCI_COMM_SPEC_CYC |
74 PCI_COMM_MEMWR_INVAL |
75 PCI_COMM_PALETTE_SNOOP |
76 PCI_COMM_WAIT_CYC_ENAB |
77 0xF800; /* Reserved Bits */
78
79 ushort_t pcie_bdg_command_default_fw =
80 PCI_BCNF_BCNTRL_ISA_ENABLE |
81 PCI_BCNF_BCNTRL_VGA_ENABLE |
82 0xF000; /* Reserved Bits */
83
84 /* PCI-Express Base error defaults */
85 ushort_t pcie_base_err_default =
86 PCIE_DEVCTL_CE_REPORTING_EN |
87 PCIE_DEVCTL_NFE_REPORTING_EN |
88 PCIE_DEVCTL_FE_REPORTING_EN |
89 PCIE_DEVCTL_UR_REPORTING_EN;
90
91 /* PCI-Express Device Control Register */
92 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN |
93 PCIE_DEVCTL_MAX_READ_REQ_512;
94
95 /* PCI-Express AER Root Control Register */
96 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
97 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \
98 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN)
99
100 ushort_t pcie_root_ctrl_default =
101 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN |
102 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
103 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN;
104
105 /* PCI-Express Root Error Command Register */
106 ushort_t pcie_root_error_cmd_default =
107 PCIE_AER_RE_CMD_CE_REP_EN |
108 PCIE_AER_RE_CMD_NFE_REP_EN |
109 PCIE_AER_RE_CMD_FE_REP_EN;
110
111 /* ECRC settings in the PCIe AER Control Register */
112 uint32_t pcie_ecrc_value =
113 PCIE_AER_CTL_ECRC_GEN_ENA |
114 PCIE_AER_CTL_ECRC_CHECK_ENA;
115
116 /*
117 * If a particular platform wants to disable certain errors such as UR/MA,
118 * instead of using #defines have the platform's PCIe Root Complex driver set
119 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For
120 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the
121 * closest PCIe root complex driver is PX.
122 *
123 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
124 * systems may want to disable SERR in general. For root ports, enabling SERR
125 * causes NMIs which are not handled and results in a watchdog timeout error.
126 */
127 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */
128 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */
129 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */
130 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */
131
132 /* Default severities needed for eversholt. Error handling doesn't care */
133 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \
134 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \
135 PCIE_AER_UCE_TRAINING;
136 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \
137 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \
138 PCIE_AER_SUCE_USC_MSG_DATA_ERR;
139
140 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5;
141 int pcie_disable_ari = 0;
142
143 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip,
144 int *max_supported);
145 static int pcie_get_max_supported(dev_info_t *dip, void *arg);
146 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
147 caddr_t *addrp, ddi_acc_handle_t *handlep);
148 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph);
149
150 dev_info_t *pcie_get_rc_dip(dev_info_t *dip);
151
152 /*
153 * modload support
154 */
155
156 static struct modlmisc modlmisc = {
157 &mod_miscops, /* Type of module */
158 "PCI Express Framework Module"
159 };
160
161 static struct modlinkage modlinkage = {
162 MODREV_1,
163 (void *)&modlmisc,
164 NULL
165 };
166
167 /*
168 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
169 * Currently used to send the pci.fabric ereports whose payload depends on the
170 * type of PCI device it is being sent for.
171 */
172 char *pcie_nv_buf;
173 nv_alloc_t *pcie_nvap;
174 nvlist_t *pcie_nvl;
175
176 int
_init(void)177 _init(void)
178 {
179 int rval;
180
181 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP);
182 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ);
183 pcie_nvl = fm_nvlist_create(pcie_nvap);
184
185 if ((rval = mod_install(&modlinkage)) != 0) {
186 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
187 fm_nva_xdestroy(pcie_nvap);
188 kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
189 }
190 return (rval);
191 }
192
193 int
_fini()194 _fini()
195 {
196 int rval;
197
198 if ((rval = mod_remove(&modlinkage)) == 0) {
199 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
200 fm_nva_xdestroy(pcie_nvap);
201 kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
202 }
203 return (rval);
204 }
205
206 int
_info(struct modinfo * modinfop)207 _info(struct modinfo *modinfop)
208 {
209 return (mod_info(&modlinkage, modinfop));
210 }
211
212 /* ARGSUSED */
213 int
pcie_init(dev_info_t * dip,caddr_t arg)214 pcie_init(dev_info_t *dip, caddr_t arg)
215 {
216 int ret = DDI_SUCCESS;
217
218 /*
219 * Create a "devctl" minor node to support DEVCTL_DEVICE_*
220 * and DEVCTL_BUS_* ioctls to this bus.
221 */
222 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR,
223 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR),
224 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
225 PCIE_DBG("Failed to create devctl minor node for %s%d\n",
226 ddi_driver_name(dip), ddi_get_instance(dip));
227
228 return (ret);
229 }
230
231 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) {
232 /*
233 * On some x86 platforms, we observed unexpected hotplug
234 * initialization failures in recent years. The known cause
235 * is a hardware issue: while the problem PCI bridges have
236 * the Hotplug Capable registers set, the machine actually
237 * does not implement the expected ACPI object.
238 *
239 * We don't want to stop PCI driver attach and system boot
240 * just because of this hotplug initialization failure.
241 * Continue with a debug message printed.
242 */
243 PCIE_DBG("%s%d: Failed setting hotplug framework\n",
244 ddi_driver_name(dip), ddi_get_instance(dip));
245
246 #if defined(__sparc)
247 ddi_remove_minor_node(dip, "devctl");
248
249 return (ret);
250 #endif /* defined(__sparc) */
251 }
252
253 return (DDI_SUCCESS);
254 }
255
256 /* ARGSUSED */
257 int
pcie_uninit(dev_info_t * dip)258 pcie_uninit(dev_info_t *dip)
259 {
260 int ret = DDI_SUCCESS;
261
262 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED)
263 (void) pcie_ari_disable(dip);
264
265 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) {
266 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
267 ddi_driver_name(dip), ddi_get_instance(dip));
268
269 return (ret);
270 }
271
272 ddi_remove_minor_node(dip, "devctl");
273
274 return (ret);
275 }
276
277 /*
278 * PCIe module interface for enabling hotplug interrupt.
279 *
280 * It should be called after pcie_init() is done and bus driver's
281 * interrupt handlers have being attached.
282 */
283 int
pcie_hpintr_enable(dev_info_t * dip)284 pcie_hpintr_enable(dev_info_t *dip)
285 {
286 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
287 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip);
288
289 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
290 (void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p);
291 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
292 (void) pcishpc_enable_irqs(ctrl_p);
293 }
294 return (DDI_SUCCESS);
295 }
296
297 /*
298 * PCIe module interface for disabling hotplug interrupt.
299 *
300 * It should be called before pcie_uninit() is called and bus driver's
301 * interrupt handlers is dettached.
302 */
303 int
pcie_hpintr_disable(dev_info_t * dip)304 pcie_hpintr_disable(dev_info_t *dip)
305 {
306 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
307 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip);
308
309 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
310 (void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p);
311 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
312 (void) pcishpc_disable_irqs(ctrl_p);
313 }
314 return (DDI_SUCCESS);
315 }
316
317 /* ARGSUSED */
318 int
pcie_intr(dev_info_t * dip)319 pcie_intr(dev_info_t *dip)
320 {
321 return (pcie_hp_intr(dip));
322 }
323
324 /* ARGSUSED */
325 int
pcie_open(dev_info_t * dip,dev_t * devp,int flags,int otyp,cred_t * credp)326 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp)
327 {
328 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
329
330 /*
331 * Make sure the open is for the right file type.
332 */
333 if (otyp != OTYP_CHR)
334 return (EINVAL);
335
336 /*
337 * Handle the open by tracking the device state.
338 */
339 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) ||
340 ((flags & FEXCL) &&
341 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) {
342 return (EBUSY);
343 }
344
345 if (flags & FEXCL)
346 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
347 else
348 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN;
349
350 return (0);
351 }
352
353 /* ARGSUSED */
354 int
pcie_close(dev_info_t * dip,dev_t dev,int flags,int otyp,cred_t * credp)355 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp)
356 {
357 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
358
359 if (otyp != OTYP_CHR)
360 return (EINVAL);
361
362 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
363
364 return (0);
365 }
366
367 /* ARGSUSED */
368 int
pcie_ioctl(dev_info_t * dip,dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)369 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode,
370 cred_t *credp, int *rvalp)
371 {
372 struct devctl_iocdata *dcp;
373 uint_t bus_state;
374 int rv = DDI_SUCCESS;
375
376 /*
377 * We can use the generic implementation for devctl ioctl
378 */
379 switch (cmd) {
380 case DEVCTL_DEVICE_GETSTATE:
381 case DEVCTL_DEVICE_ONLINE:
382 case DEVCTL_DEVICE_OFFLINE:
383 case DEVCTL_BUS_GETSTATE:
384 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
385 default:
386 break;
387 }
388
389 /*
390 * read devctl ioctl data
391 */
392 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
393 return (EFAULT);
394
395 switch (cmd) {
396 case DEVCTL_BUS_QUIESCE:
397 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
398 if (bus_state == BUS_QUIESCED)
399 break;
400 (void) ndi_set_bus_state(dip, BUS_QUIESCED);
401 break;
402 case DEVCTL_BUS_UNQUIESCE:
403 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
404 if (bus_state == BUS_ACTIVE)
405 break;
406 (void) ndi_set_bus_state(dip, BUS_ACTIVE);
407 break;
408 case DEVCTL_BUS_RESET:
409 case DEVCTL_BUS_RESETALL:
410 case DEVCTL_DEVICE_RESET:
411 rv = ENOTSUP;
412 break;
413 default:
414 rv = ENOTTY;
415 }
416
417 ndi_dc_freehdl(dcp);
418 return (rv);
419 }
420
421 /* ARGSUSED */
422 int
pcie_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * lengthp)423 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
424 int flags, char *name, caddr_t valuep, int *lengthp)
425 {
426 if (dev == DDI_DEV_T_ANY)
427 goto skip;
428
429 if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
430 strcmp(name, "pci-occupant") == 0) {
431 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev));
432
433 pcie_hp_create_occupant_props(dip, dev, pci_dev);
434 }
435
436 skip:
437 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
438 }
439
440 int
pcie_init_cfghdl(dev_info_t * cdip)441 pcie_init_cfghdl(dev_info_t *cdip)
442 {
443 pcie_bus_t *bus_p;
444 ddi_acc_handle_t eh = NULL;
445
446 bus_p = PCIE_DIP2BUS(cdip);
447 if (bus_p == NULL)
448 return (DDI_FAILURE);
449
450 /* Create an config access special to error handling */
451 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) {
452 cmn_err(CE_WARN, "Cannot setup config access"
453 " for BDF 0x%x\n", bus_p->bus_bdf);
454 return (DDI_FAILURE);
455 }
456
457 bus_p->bus_cfg_hdl = eh;
458 return (DDI_SUCCESS);
459 }
460
461 void
pcie_fini_cfghdl(dev_info_t * cdip)462 pcie_fini_cfghdl(dev_info_t *cdip)
463 {
464 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
465
466 pci_config_teardown(&bus_p->bus_cfg_hdl);
467 }
468
469 /*
470 * PCI-Express child device initialization.
471 * This function enables generic pci-express interrupts and error
472 * handling.
473 *
474 * @param pdip root dip (root nexus's dip)
475 * @param cdip child's dip (device's dip)
476 * @return DDI_SUCCESS or DDI_FAILURE
477 */
478 /* ARGSUSED */
479 int
pcie_initchild(dev_info_t * cdip)480 pcie_initchild(dev_info_t *cdip)
481 {
482 uint16_t tmp16, reg16;
483 pcie_bus_t *bus_p;
484 uint32_t devid, venid;
485
486 bus_p = PCIE_DIP2BUS(cdip);
487 if (bus_p == NULL) {
488 PCIE_DBG("%s: BUS not found.\n",
489 ddi_driver_name(cdip));
490
491 return (DDI_FAILURE);
492 }
493
494 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS)
495 return (DDI_FAILURE);
496
497 /*
498 * Update pcie_bus_t with real Vendor Id Device Id.
499 *
500 * For assigned devices in IOV environment, the OBP will return
501 * faked device id/vendor id on configration read and for both
502 * properties in root domain. translate_devid() function will
503 * update the properties with real device-id/vendor-id on such
504 * platforms, so that we can utilize the properties here to get
505 * real device-id/vendor-id and overwrite the faked ids.
506 *
507 * For unassigned devices or devices in non-IOV environment, the
508 * operation below won't make a difference.
509 *
510 * The IOV implementation only supports assignment of PCIE
511 * endpoint devices. Devices under pci-pci bridges don't need
512 * operation like this.
513 */
514 devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
515 "device-id", -1);
516 venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
517 "vendor-id", -1);
518 bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff);
519
520 /* Clear the device's status register */
521 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT);
522 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16);
523
524 /* Setup the device's command register */
525 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM);
526 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default;
527
528 #if defined(__i386) || defined(__amd64)
529 boolean_t empty_io_range = B_FALSE;
530 boolean_t empty_mem_range = B_FALSE;
531 /*
532 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem
533 * access as it can cause a hang if enabled.
534 */
535 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range,
536 &empty_mem_range);
537 if ((empty_io_range == B_TRUE) &&
538 (pcie_command_default & PCI_COMM_IO)) {
539 tmp16 &= ~PCI_COMM_IO;
540 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
541 ddi_driver_name(cdip), bus_p->bus_bdf);
542 }
543 if ((empty_mem_range == B_TRUE) &&
544 (pcie_command_default & PCI_COMM_MAE)) {
545 tmp16 &= ~PCI_COMM_MAE;
546 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
547 ddi_driver_name(cdip), bus_p->bus_bdf);
548 }
549 #endif /* defined(__i386) || defined(__amd64) */
550
551 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p))
552 tmp16 &= ~PCI_COMM_SERR_ENABLE;
553
554 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16);
555 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16);
556
557 /*
558 * If the device has a bus control register then program it
559 * based on the settings in the command register.
560 */
561 if (PCIE_IS_BDG(bus_p)) {
562 /* Clear the device's secondary status register */
563 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
564 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16);
565
566 /* Setup the device's secondary command register */
567 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
568 tmp16 = (reg16 & pcie_bdg_command_default_fw);
569
570 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE;
571 /*
572 * Workaround for this Nvidia bridge. Don't enable the SERR
573 * enable bit in the bridge control register as it could lead to
574 * bogus NMIs.
575 */
576 if (bus_p->bus_dev_ven_id == 0x037010DE)
577 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE;
578
579 if (pcie_command_default & PCI_COMM_PARITY_DETECT)
580 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
581
582 /*
583 * Enable Master Abort Mode only if URs have not been masked.
584 * For PCI and PCIe-PCI bridges, enabling this bit causes a
585 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this
586 * bit is masked, posted requests are dropped and non-posted
587 * requests are returned with -1.
588 */
589 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
590 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE;
591 else
592 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
593 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16);
594 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL,
595 reg16);
596 }
597
598 if (PCIE_IS_PCIE(bus_p)) {
599 /* Setup PCIe device control register */
600 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
601 /* note: MPS/MRRS are initialized in pcie_initchild_mps() */
602 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
603 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
604 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
605 PCIE_DEVCTL_MAX_PAYLOAD_MASK));
606 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
607 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
608
609 /* Enable PCIe errors */
610 pcie_enable_errors(cdip);
611 }
612
613 bus_p->bus_ari = B_FALSE;
614 if ((pcie_ari_is_enabled(ddi_get_parent(cdip))
615 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip)
616 == PCIE_ARI_DEVICE)) {
617 bus_p->bus_ari = B_TRUE;
618 }
619
620 if (pcie_initchild_mps(cdip) == DDI_FAILURE) {
621 pcie_fini_cfghdl(cdip);
622 return (DDI_FAILURE);
623 }
624
625 return (DDI_SUCCESS);
626 }
627
628 static void
pcie_init_pfd(dev_info_t * dip)629 pcie_init_pfd(dev_info_t *dip)
630 {
631 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t);
632 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
633
634 PCIE_DIP2PFD(dip) = pfd_p;
635
636 pfd_p->pe_bus_p = bus_p;
637 pfd_p->pe_severity_flags = 0;
638 pfd_p->pe_orig_severity_flags = 0;
639 pfd_p->pe_lock = B_FALSE;
640 pfd_p->pe_valid = B_FALSE;
641
642 /* Allocate the root fault struct for both RC and RP */
643 if (PCIE_IS_ROOT(bus_p)) {
644 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
645 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
646 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
647 }
648
649 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
650 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
651 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
652
653 if (PCIE_IS_BDG(bus_p))
654 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
655
656 if (PCIE_IS_PCIE(bus_p)) {
657 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
658
659 if (PCIE_IS_RP(bus_p))
660 PCIE_RP_REG(pfd_p) =
661 PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
662
663 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
664 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
665
666 if (PCIE_IS_RP(bus_p)) {
667 PCIE_ADV_RP_REG(pfd_p) =
668 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
669 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
670 PCIE_INVALID_BDF;
671 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
672 PCIE_INVALID_BDF;
673 } else if (PCIE_IS_PCIE_BDG(bus_p)) {
674 PCIE_ADV_BDG_REG(pfd_p) =
675 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t);
676 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
677 PCIE_INVALID_BDF;
678 }
679
680 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
681 PCIX_BDG_ERR_REG(pfd_p) =
682 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
683
684 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
685 PCIX_BDG_ECC_REG(pfd_p, 0) =
686 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
687 PCIX_BDG_ECC_REG(pfd_p, 1) =
688 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
689 }
690 }
691 } else if (PCIE_IS_PCIX(bus_p)) {
692 if (PCIE_IS_BDG(bus_p)) {
693 PCIX_BDG_ERR_REG(pfd_p) =
694 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
695
696 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
697 PCIX_BDG_ECC_REG(pfd_p, 0) =
698 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
699 PCIX_BDG_ECC_REG(pfd_p, 1) =
700 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
701 }
702 } else {
703 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t);
704
705 if (PCIX_ECC_VERSION_CHECK(bus_p))
706 PCIX_ECC_REG(pfd_p) =
707 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
708 }
709 }
710 }
711
712 static void
pcie_fini_pfd(dev_info_t * dip)713 pcie_fini_pfd(dev_info_t *dip)
714 {
715 pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
716 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
717
718 if (PCIE_IS_PCIE(bus_p)) {
719 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
720 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
721 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
722 sizeof (pf_pcix_ecc_regs_t));
723 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
724 sizeof (pf_pcix_ecc_regs_t));
725 }
726
727 kmem_free(PCIX_BDG_ERR_REG(pfd_p),
728 sizeof (pf_pcix_bdg_err_regs_t));
729 }
730
731 if (PCIE_IS_RP(bus_p))
732 kmem_free(PCIE_ADV_RP_REG(pfd_p),
733 sizeof (pf_pcie_adv_rp_err_regs_t));
734 else if (PCIE_IS_PCIE_BDG(bus_p))
735 kmem_free(PCIE_ADV_BDG_REG(pfd_p),
736 sizeof (pf_pcie_adv_bdg_err_regs_t));
737
738 kmem_free(PCIE_ADV_REG(pfd_p),
739 sizeof (pf_pcie_adv_err_regs_t));
740
741 if (PCIE_IS_RP(bus_p))
742 kmem_free(PCIE_RP_REG(pfd_p),
743 sizeof (pf_pcie_rp_err_regs_t));
744
745 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
746 } else if (PCIE_IS_PCIX(bus_p)) {
747 if (PCIE_IS_BDG(bus_p)) {
748 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
749 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
750 sizeof (pf_pcix_ecc_regs_t));
751 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
752 sizeof (pf_pcix_ecc_regs_t));
753 }
754
755 kmem_free(PCIX_BDG_ERR_REG(pfd_p),
756 sizeof (pf_pcix_bdg_err_regs_t));
757 } else {
758 if (PCIX_ECC_VERSION_CHECK(bus_p))
759 kmem_free(PCIX_ECC_REG(pfd_p),
760 sizeof (pf_pcix_ecc_regs_t));
761
762 kmem_free(PCIX_ERR_REG(pfd_p),
763 sizeof (pf_pcix_err_regs_t));
764 }
765 }
766
767 if (PCIE_IS_BDG(bus_p))
768 kmem_free(PCI_BDG_ERR_REG(pfd_p),
769 sizeof (pf_pci_bdg_err_regs_t));
770
771 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
772 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
773
774 if (PCIE_IS_ROOT(bus_p)) {
775 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
776 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
777 }
778
779 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t));
780
781 PCIE_DIP2PFD(dip) = NULL;
782 }
783
784
785 /*
786 * Special functions to allocate pf_data_t's for PCIe root complexes.
787 * Note: Root Complex not Root Port
788 */
789 void
pcie_rc_init_pfd(dev_info_t * dip,pf_data_t * pfd_p)790 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p)
791 {
792 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip);
793 pfd_p->pe_severity_flags = 0;
794 pfd_p->pe_orig_severity_flags = 0;
795 pfd_p->pe_lock = B_FALSE;
796 pfd_p->pe_valid = B_FALSE;
797
798 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
799 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
800 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
801 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
802 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
803 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
804 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
805 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
806 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
807 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
808 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
809 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF;
810 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF;
811
812 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity;
813 }
814
815 void
pcie_rc_fini_pfd(pf_data_t * pfd_p)816 pcie_rc_fini_pfd(pf_data_t *pfd_p)
817 {
818 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t));
819 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t));
820 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t));
821 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
822 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
823 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
824 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
825 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
826 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
827 }
828
829 /*
830 * init pcie_bus_t for root complex
831 *
832 * Only a few of the fields in bus_t is valid for root complex.
833 * The fields that are bracketed are initialized in this routine:
834 *
835 * dev_info_t * <bus_dip>
836 * dev_info_t * bus_rp_dip
837 * ddi_acc_handle_t bus_cfg_hdl
838 * uint_t <bus_fm_flags>
839 * pcie_req_id_t bus_bdf
840 * pcie_req_id_t bus_rp_bdf
841 * uint32_t bus_dev_ven_id
842 * uint8_t bus_rev_id
843 * uint8_t <bus_hdr_type>
844 * uint16_t <bus_dev_type>
845 * uint8_t bus_bdg_secbus
846 * uint16_t bus_pcie_off
847 * uint16_t <bus_aer_off>
848 * uint16_t bus_pcix_off
849 * uint16_t bus_ecc_ver
850 * pci_bus_range_t bus_bus_range
851 * ppb_ranges_t * bus_addr_ranges
852 * int bus_addr_entries
853 * pci_regspec_t * bus_assigned_addr
854 * int bus_assigned_entries
855 * pf_data_t * bus_pfd
856 * pcie_domain_t * <bus_dom>
857 * int bus_mps
858 * uint64_t bus_cfgacc_base
859 * void * bus_plat_private
860 */
861 void
pcie_rc_init_bus(dev_info_t * dip)862 pcie_rc_init_bus(dev_info_t *dip)
863 {
864 pcie_bus_t *bus_p;
865
866 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
867 bus_p->bus_dip = dip;
868 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO;
869 bus_p->bus_hdr_type = PCI_HEADER_ONE;
870
871 /* Fake that there are AER logs */
872 bus_p->bus_aer_off = (uint16_t)-1;
873
874 /* Needed only for handle lookup */
875 bus_p->bus_fm_flags |= PF_FM_READY;
876
877 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p);
878
879 PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t);
880 }
881
882 void
pcie_rc_fini_bus(dev_info_t * dip)883 pcie_rc_fini_bus(dev_info_t *dip)
884 {
885 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip);
886 ndi_set_bus_private(dip, B_FALSE, NULL, NULL);
887 kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t));
888 kmem_free(bus_p, sizeof (pcie_bus_t));
889 }
890
891 /*
892 * partially init pcie_bus_t for device (dip,bdf) for accessing pci
893 * config space
894 *
895 * This routine is invoked during boot, either after creating a devinfo node
896 * (x86 case) or during px driver attach (sparc case); it is also invoked
897 * in hotplug context after a devinfo node is created.
898 *
899 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
900 * is set:
901 *
902 * dev_info_t * <bus_dip>
903 * dev_info_t * <bus_rp_dip>
904 * ddi_acc_handle_t bus_cfg_hdl
905 * uint_t bus_fm_flags
906 * pcie_req_id_t <bus_bdf>
907 * pcie_req_id_t <bus_rp_bdf>
908 * uint32_t <bus_dev_ven_id>
909 * uint8_t <bus_rev_id>
910 * uint8_t <bus_hdr_type>
911 * uint16_t <bus_dev_type>
912 * uint8_t <bus_bdg_secbus
913 * uint16_t <bus_pcie_off>
914 * uint16_t <bus_aer_off>
915 * uint16_t <bus_pcix_off>
916 * uint16_t <bus_ecc_ver>
917 * pci_bus_range_t bus_bus_range
918 * ppb_ranges_t * bus_addr_ranges
919 * int bus_addr_entries
920 * pci_regspec_t * bus_assigned_addr
921 * int bus_assigned_entries
922 * pf_data_t * bus_pfd
923 * pcie_domain_t * bus_dom
924 * int bus_mps
925 * uint64_t bus_cfgacc_base
926 * void * bus_plat_private
927 *
928 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
929 * is set:
930 *
931 * dev_info_t * bus_dip
932 * dev_info_t * bus_rp_dip
933 * ddi_acc_handle_t bus_cfg_hdl
934 * uint_t bus_fm_flags
935 * pcie_req_id_t bus_bdf
936 * pcie_req_id_t bus_rp_bdf
937 * uint32_t bus_dev_ven_id
938 * uint8_t bus_rev_id
939 * uint8_t bus_hdr_type
940 * uint16_t bus_dev_type
941 * uint8_t <bus_bdg_secbus>
942 * uint16_t bus_pcie_off
943 * uint16_t bus_aer_off
944 * uint16_t bus_pcix_off
945 * uint16_t bus_ecc_ver
946 * pci_bus_range_t <bus_bus_range>
947 * ppb_ranges_t * <bus_addr_ranges>
948 * int <bus_addr_entries>
949 * pci_regspec_t * <bus_assigned_addr>
950 * int <bus_assigned_entries>
951 * pf_data_t * <bus_pfd>
952 * pcie_domain_t * bus_dom
953 * int bus_mps
954 * uint64_t bus_cfgacc_base
955 * void * <bus_plat_private>
956 */
957
958 pcie_bus_t *
pcie_init_bus(dev_info_t * dip,pcie_req_id_t bdf,uint8_t flags)959 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags)
960 {
961 uint16_t status, base, baseptr, num_cap;
962 uint32_t capid;
963 int range_size;
964 pcie_bus_t *bus_p;
965 dev_info_t *rcdip;
966 dev_info_t *pdip;
967 const char *errstr = NULL;
968
969 if (!(flags & PCIE_BUS_INITIAL))
970 goto initial_done;
971
972 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
973
974 bus_p->bus_dip = dip;
975 bus_p->bus_bdf = bdf;
976
977 rcdip = pcie_get_rc_dip(dip);
978 ASSERT(rcdip != NULL);
979
980 /* Save the Vendor ID, Device ID and revision ID */
981 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID);
982 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID);
983 /* Save the Header Type */
984 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER);
985 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M;
986
987 /*
988 * Figure out the device type and all the relavant capability offsets
989 */
990 /* set default value */
991 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
992
993 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT);
994 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP))
995 goto caps_done; /* capability not supported */
996
997 /* Relevant conventional capabilities first */
998
999 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
1000 num_cap = 2;
1001
1002 switch (bus_p->bus_hdr_type) {
1003 case PCI_HEADER_ZERO:
1004 baseptr = PCI_CONF_CAP_PTR;
1005 break;
1006 case PCI_HEADER_PPB:
1007 baseptr = PCI_BCNF_CAP_PTR;
1008 break;
1009 case PCI_HEADER_CARDBUS:
1010 baseptr = PCI_CBUS_CAP_PTR;
1011 break;
1012 default:
1013 cmn_err(CE_WARN, "%s: unexpected pci header type:%x",
1014 __func__, bus_p->bus_hdr_type);
1015 goto caps_done;
1016 }
1017
1018 base = baseptr;
1019 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap;
1020 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) {
1021 capid = pci_cfgacc_get8(rcdip, bdf, base);
1022 switch (capid) {
1023 case PCI_CAP_ID_PCI_E:
1024 bus_p->bus_pcie_off = base;
1025 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf,
1026 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1027
1028 /* Check and save PCIe hotplug capability information */
1029 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) &&
1030 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP)
1031 & PCIE_PCIECAP_SLOT_IMPL) &&
1032 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP)
1033 & PCIE_SLOTCAP_HP_CAPABLE))
1034 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
1035
1036 num_cap--;
1037 break;
1038 case PCI_CAP_ID_PCIX:
1039 bus_p->bus_pcix_off = base;
1040 if (PCIE_IS_BDG(bus_p))
1041 bus_p->bus_ecc_ver =
1042 pci_cfgacc_get16(rcdip, bdf, base +
1043 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
1044 else
1045 bus_p->bus_ecc_ver =
1046 pci_cfgacc_get16(rcdip, bdf, base +
1047 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
1048 num_cap--;
1049 break;
1050 default:
1051 break;
1052 }
1053 }
1054
1055 /* Check and save PCI hotplug (SHPC) capability information */
1056 if (PCIE_IS_BDG(bus_p)) {
1057 base = baseptr;
1058 for (base = pci_cfgacc_get8(rcdip, bdf, base);
1059 base; base = pci_cfgacc_get8(rcdip, bdf,
1060 base + PCI_CAP_NEXT_PTR)) {
1061 capid = pci_cfgacc_get8(rcdip, bdf, base);
1062 if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
1063 bus_p->bus_pci_hp_off = base;
1064 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE;
1065 break;
1066 }
1067 }
1068 }
1069
1070 /* Then, relevant extended capabilities */
1071
1072 if (!PCIE_IS_PCIE(bus_p))
1073 goto caps_done;
1074
1075 /* Extended caps: PCIE_EXT_CAP_ID_AER */
1076 for (base = PCIE_EXT_CAP; base; base = (capid >>
1077 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) {
1078 capid = pci_cfgacc_get32(rcdip, bdf, base);
1079 if (capid == PCI_CAP_EINVAL32)
1080 break;
1081 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK)
1082 == PCIE_EXT_CAP_ID_AER) {
1083 bus_p->bus_aer_off = base;
1084 break;
1085 }
1086 }
1087
1088 caps_done:
1089 /* save RP dip and RP bdf */
1090 if (PCIE_IS_RP(bus_p)) {
1091 bus_p->bus_rp_dip = dip;
1092 bus_p->bus_rp_bdf = bus_p->bus_bdf;
1093 } else {
1094 for (pdip = ddi_get_parent(dip); pdip;
1095 pdip = ddi_get_parent(pdip)) {
1096 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip);
1097
1098 /*
1099 * If RP dip and RP bdf in parent's bus_t have
1100 * been initialized, simply use these instead of
1101 * continuing up to the RC.
1102 */
1103 if (parent_bus_p->bus_rp_dip != NULL) {
1104 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip;
1105 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf;
1106 break;
1107 }
1108
1109 /*
1110 * When debugging be aware that some NVIDIA x86
1111 * architectures have 2 nodes for each RP, One at Bus
1112 * 0x0 and one at Bus 0x80. The requester is from Bus
1113 * 0x80
1114 */
1115 if (PCIE_IS_ROOT(parent_bus_p)) {
1116 bus_p->bus_rp_dip = pdip;
1117 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf;
1118 break;
1119 }
1120 }
1121 }
1122
1123 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
1124 bus_p->bus_fm_flags = 0;
1125 bus_p->bus_mps = 0;
1126
1127 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p);
1128
1129 if (PCIE_IS_HOTPLUG_CAPABLE(dip))
1130 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1131 "hotplug-capable");
1132
1133 initial_done:
1134 if (!(flags & PCIE_BUS_FINAL))
1135 goto final_done;
1136
1137 /* already initialized? */
1138 bus_p = PCIE_DIP2BUS(dip);
1139
1140 /* Save the Range information if device is a switch/bridge */
1141 if (PCIE_IS_BDG(bus_p)) {
1142 /* get "bus_range" property */
1143 range_size = sizeof (pci_bus_range_t);
1144 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1145 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size)
1146 != DDI_PROP_SUCCESS) {
1147 errstr = "Cannot find \"bus-range\" property";
1148 cmn_err(CE_WARN,
1149 "PCIE init err info failed BDF 0x%x:%s\n",
1150 bus_p->bus_bdf, errstr);
1151 }
1152
1153 /* get secondary bus number */
1154 rcdip = pcie_get_rc_dip(dip);
1155 ASSERT(rcdip != NULL);
1156
1157 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip,
1158 bus_p->bus_bdf, PCI_BCNF_SECBUS);
1159
1160 /* Get "ranges" property */
1161 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1162 "ranges", (caddr_t)&bus_p->bus_addr_ranges,
1163 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS)
1164 bus_p->bus_addr_entries = 0;
1165 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t);
1166 }
1167
1168 /* save "assigned-addresses" property array, ignore failues */
1169 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1170 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr,
1171 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS)
1172 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t);
1173 else
1174 bus_p->bus_assigned_entries = 0;
1175
1176 pcie_init_pfd(dip);
1177
1178 pcie_init_plat(dip);
1179
1180 final_done:
1181
1182 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
1183 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf,
1184 bus_p->bus_bdg_secbus);
1185 #ifdef DEBUG
1186 pcie_print_bus(bus_p);
1187 #endif
1188
1189 return (bus_p);
1190 }
1191
1192 /*
1193 * Invoked before destroying devinfo node, mostly during hotplug
1194 * operation to free pcie_bus_t data structure
1195 */
1196 /* ARGSUSED */
1197 void
pcie_fini_bus(dev_info_t * dip,uint8_t flags)1198 pcie_fini_bus(dev_info_t *dip, uint8_t flags)
1199 {
1200 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
1201 ASSERT(bus_p);
1202
1203 if (flags & PCIE_BUS_INITIAL) {
1204 pcie_fini_plat(dip);
1205 pcie_fini_pfd(dip);
1206
1207 kmem_free(bus_p->bus_assigned_addr,
1208 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries));
1209 kmem_free(bus_p->bus_addr_ranges,
1210 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries));
1211 /* zero out the fields that have been destroyed */
1212 bus_p->bus_assigned_addr = NULL;
1213 bus_p->bus_addr_ranges = NULL;
1214 bus_p->bus_assigned_entries = 0;
1215 bus_p->bus_addr_entries = 0;
1216 }
1217
1218 if (flags & PCIE_BUS_FINAL) {
1219 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
1220 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1221 "hotplug-capable");
1222 }
1223
1224 ndi_set_bus_private(dip, B_TRUE, NULL, NULL);
1225 kmem_free(bus_p, sizeof (pcie_bus_t));
1226 }
1227 }
1228
1229 int
pcie_postattach_child(dev_info_t * cdip)1230 pcie_postattach_child(dev_info_t *cdip)
1231 {
1232 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
1233
1234 if (!bus_p)
1235 return (DDI_FAILURE);
1236
1237 return (pcie_enable_ce(cdip));
1238 }
1239
1240 /*
1241 * PCI-Express child device de-initialization.
1242 * This function disables generic pci-express interrupts and error
1243 * handling.
1244 */
1245 void
pcie_uninitchild(dev_info_t * cdip)1246 pcie_uninitchild(dev_info_t *cdip)
1247 {
1248 pcie_disable_errors(cdip);
1249 pcie_fini_cfghdl(cdip);
1250 pcie_fini_dom(cdip);
1251 }
1252
1253 /*
1254 * find the root complex dip
1255 */
1256 dev_info_t *
pcie_get_rc_dip(dev_info_t * dip)1257 pcie_get_rc_dip(dev_info_t *dip)
1258 {
1259 dev_info_t *rcdip;
1260 pcie_bus_t *rc_bus_p;
1261
1262 for (rcdip = ddi_get_parent(dip); rcdip;
1263 rcdip = ddi_get_parent(rcdip)) {
1264 rc_bus_p = PCIE_DIP2BUS(rcdip);
1265 if (rc_bus_p && PCIE_IS_RC(rc_bus_p))
1266 break;
1267 }
1268
1269 return (rcdip);
1270 }
1271
1272 static boolean_t
pcie_is_pci_device(dev_info_t * dip)1273 pcie_is_pci_device(dev_info_t *dip)
1274 {
1275 dev_info_t *pdip;
1276 char *device_type;
1277
1278 pdip = ddi_get_parent(dip);
1279 ASSERT(pdip);
1280
1281 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
1282 "device_type", &device_type) != DDI_PROP_SUCCESS)
1283 return (B_FALSE);
1284
1285 if (strcmp(device_type, "pciex") != 0 &&
1286 strcmp(device_type, "pci") != 0) {
1287 ddi_prop_free(device_type);
1288 return (B_FALSE);
1289 }
1290
1291 ddi_prop_free(device_type);
1292 return (B_TRUE);
1293 }
1294
1295 typedef struct {
1296 boolean_t init;
1297 uint8_t flags;
1298 } pcie_bus_arg_t;
1299
1300 /*ARGSUSED*/
1301 static int
pcie_fab_do_init_fini(dev_info_t * dip,void * arg)1302 pcie_fab_do_init_fini(dev_info_t *dip, void *arg)
1303 {
1304 pcie_req_id_t bdf;
1305 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg;
1306
1307 if (!pcie_is_pci_device(dip))
1308 goto out;
1309
1310 if (bus_arg->init) {
1311 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS)
1312 goto out;
1313
1314 (void) pcie_init_bus(dip, bdf, bus_arg->flags);
1315 } else {
1316 (void) pcie_fini_bus(dip, bus_arg->flags);
1317 }
1318
1319 return (DDI_WALK_CONTINUE);
1320
1321 out:
1322 return (DDI_WALK_PRUNECHILD);
1323 }
1324
1325 void
pcie_fab_init_bus(dev_info_t * rcdip,uint8_t flags)1326 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags)
1327 {
1328 int circular_count;
1329 dev_info_t *dip = ddi_get_child(rcdip);
1330 pcie_bus_arg_t arg;
1331
1332 arg.init = B_TRUE;
1333 arg.flags = flags;
1334
1335 ndi_devi_enter(rcdip, &circular_count);
1336 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1337 ndi_devi_exit(rcdip, circular_count);
1338 }
1339
1340 void
pcie_fab_fini_bus(dev_info_t * rcdip,uint8_t flags)1341 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags)
1342 {
1343 int circular_count;
1344 dev_info_t *dip = ddi_get_child(rcdip);
1345 pcie_bus_arg_t arg;
1346
1347 arg.init = B_FALSE;
1348 arg.flags = flags;
1349
1350 ndi_devi_enter(rcdip, &circular_count);
1351 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1352 ndi_devi_exit(rcdip, circular_count);
1353 }
1354
1355 void
pcie_enable_errors(dev_info_t * dip)1356 pcie_enable_errors(dev_info_t *dip)
1357 {
1358 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1359 uint16_t reg16, tmp16;
1360 uint32_t reg32, tmp32;
1361
1362 ASSERT(bus_p);
1363
1364 /*
1365 * Clear any pending errors
1366 */
1367 pcie_clear_errors(dip);
1368
1369 if (!PCIE_IS_PCIE(bus_p))
1370 return;
1371
1372 /*
1373 * Enable Baseline Error Handling but leave CE reporting off (poweron
1374 * default).
1375 */
1376 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) !=
1377 PCI_CAP_EINVAL16) {
1378 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1379 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1380 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1381 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1382 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN));
1383
1384 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
1385 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
1386 }
1387
1388 /* Enable Root Port Baseline Error Receiving */
1389 if (PCIE_IS_ROOT(bus_p) &&
1390 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) !=
1391 PCI_CAP_EINVAL16) {
1392
1393 tmp16 = pcie_serr_disable_flag ?
1394 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) :
1395 pcie_root_ctrl_default;
1396 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16);
1397 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL,
1398 reg16);
1399 }
1400
1401 /*
1402 * Enable PCI-Express Advanced Error Handling if Exists
1403 */
1404 if (!PCIE_HAS_AER(bus_p))
1405 return;
1406
1407 /* Set Uncorrectable Severity */
1408 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) !=
1409 PCI_CAP_EINVAL32) {
1410 tmp32 = pcie_aer_uce_severity;
1411
1412 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32);
1413 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV,
1414 reg32);
1415 }
1416
1417 /* Enable Uncorrectable errors */
1418 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) !=
1419 PCI_CAP_EINVAL32) {
1420 tmp32 = pcie_aer_uce_mask;
1421
1422 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32);
1423 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK,
1424 reg32);
1425 }
1426
1427 /* Enable ECRC generation and checking */
1428 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1429 PCI_CAP_EINVAL32) {
1430 tmp32 = reg32 | pcie_ecrc_value;
1431 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32);
1432 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32);
1433 }
1434
1435 /* Enable Secondary Uncorrectable errors if this is a bridge */
1436 if (!PCIE_IS_PCIE_BDG(bus_p))
1437 goto root;
1438
1439 /* Set Uncorrectable Severity */
1440 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) !=
1441 PCI_CAP_EINVAL32) {
1442 tmp32 = pcie_aer_suce_severity;
1443
1444 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32);
1445 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV,
1446 reg32);
1447 }
1448
1449 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) !=
1450 PCI_CAP_EINVAL32) {
1451 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask);
1452 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32,
1453 PCIE_AER_SUCE_MASK, reg32);
1454 }
1455
1456 root:
1457 /*
1458 * Enable Root Control this is a Root device
1459 */
1460 if (!PCIE_IS_ROOT(bus_p))
1461 return;
1462
1463 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1464 PCI_CAP_EINVAL16) {
1465 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD,
1466 pcie_root_error_cmd_default);
1467 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16,
1468 PCIE_AER_RE_CMD, reg16);
1469 }
1470 }
1471
1472 /*
1473 * This function is used for enabling CE reporting and setting the AER CE mask.
1474 * When called from outside the pcie module it should always be preceded by
1475 * a call to pcie_enable_errors.
1476 */
1477 int
pcie_enable_ce(dev_info_t * dip)1478 pcie_enable_ce(dev_info_t *dip)
1479 {
1480 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1481 uint16_t device_sts, device_ctl;
1482 uint32_t tmp_pcie_aer_ce_mask;
1483
1484 if (!PCIE_IS_PCIE(bus_p))
1485 return (DDI_SUCCESS);
1486
1487 /*
1488 * The "pcie_ce_mask" property is used to control both the CE reporting
1489 * enable field in the device control register and the AER CE mask. We
1490 * leave CE reporting disabled if pcie_ce_mask is set to -1.
1491 */
1492
1493 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1494 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask);
1495
1496 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) {
1497 /*
1498 * Nothing to do since CE reporting has already been disabled.
1499 */
1500 return (DDI_SUCCESS);
1501 }
1502
1503 if (PCIE_HAS_AER(bus_p)) {
1504 /* Enable AER CE */
1505 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask);
1506 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK,
1507 0);
1508
1509 /* Clear any pending AER CE errors */
1510 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1);
1511 }
1512
1513 /* clear any pending CE errors */
1514 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) !=
1515 PCI_CAP_EINVAL16)
1516 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS,
1517 device_sts & (~PCIE_DEVSTS_CE_DETECTED));
1518
1519 /* Enable CE reporting */
1520 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1521 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL,
1522 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default);
1523 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl);
1524
1525 return (DDI_SUCCESS);
1526 }
1527
1528 /* ARGSUSED */
1529 void
pcie_disable_errors(dev_info_t * dip)1530 pcie_disable_errors(dev_info_t *dip)
1531 {
1532 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1533 uint16_t device_ctl;
1534 uint32_t aer_reg;
1535
1536 if (!PCIE_IS_PCIE(bus_p))
1537 return;
1538
1539 /*
1540 * Disable PCI-Express Baseline Error Handling
1541 */
1542 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1543 device_ctl &= ~PCIE_DEVCTL_ERR_MASK;
1544 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl);
1545
1546 /*
1547 * Disable PCI-Express Advanced Error Handling if Exists
1548 */
1549 if (!PCIE_HAS_AER(bus_p))
1550 goto root;
1551
1552 /* Disable Uncorrectable errors */
1553 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS);
1554
1555 /* Disable Correctable errors */
1556 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS);
1557
1558 /* Disable ECRC generation and checking */
1559 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1560 PCI_CAP_EINVAL32) {
1561 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
1562 PCIE_AER_CTL_ECRC_CHECK_ENA);
1563
1564 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg);
1565 }
1566 /*
1567 * Disable Secondary Uncorrectable errors if this is a bridge
1568 */
1569 if (!PCIE_IS_PCIE_BDG(bus_p))
1570 goto root;
1571
1572 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS);
1573
1574 root:
1575 /*
1576 * disable Root Control this is a Root device
1577 */
1578 if (!PCIE_IS_ROOT(bus_p))
1579 return;
1580
1581 if (!pcie_serr_disable_flag) {
1582 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL);
1583 device_ctl &= ~PCIE_ROOT_SYS_ERR;
1584 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl);
1585 }
1586
1587 if (!PCIE_HAS_AER(bus_p))
1588 return;
1589
1590 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1591 PCI_CAP_EINVAL16) {
1592 device_ctl &= ~pcie_root_error_cmd_default;
1593 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl);
1594 }
1595 }
1596
1597 /*
1598 * Extract bdf from "reg" property.
1599 */
1600 int
pcie_get_bdf_from_dip(dev_info_t * dip,pcie_req_id_t * bdf)1601 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf)
1602 {
1603 pci_regspec_t *regspec;
1604 int reglen;
1605
1606 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1607 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS)
1608 return (DDI_FAILURE);
1609
1610 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
1611 ddi_prop_free(regspec);
1612 return (DDI_FAILURE);
1613 }
1614
1615 /* Get phys_hi from first element. All have same bdf. */
1616 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8;
1617
1618 ddi_prop_free(regspec);
1619 return (DDI_SUCCESS);
1620 }
1621
1622 dev_info_t *
pcie_get_my_childs_dip(dev_info_t * dip,dev_info_t * rdip)1623 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
1624 {
1625 dev_info_t *cdip = rdip;
1626
1627 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
1628 ;
1629
1630 return (cdip);
1631 }
1632
1633 uint32_t
pcie_get_bdf_for_dma_xfer(dev_info_t * dip,dev_info_t * rdip)1634 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip)
1635 {
1636 dev_info_t *cdip;
1637
1638 /*
1639 * As part of the probing, the PCI fcode interpreter may setup a DMA
1640 * request if a given card has a fcode on it using dip and rdip of the
1641 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this
1642 * case, return a invalid value for the bdf since we cannot get to the
1643 * bdf value of the actual device which will be initiating this DMA.
1644 */
1645 if (rdip == dip)
1646 return (PCIE_INVALID_BDF);
1647
1648 cdip = pcie_get_my_childs_dip(dip, rdip);
1649
1650 /*
1651 * For a given rdip, return the bdf value of dip's (px or pcieb)
1652 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
1653 *
1654 * XXX - For now, return a invalid bdf value for all PCI and PCI-X
1655 * devices since this needs more work.
1656 */
1657 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
1658 PCIE_INVALID_BDF : PCI_GET_BDF(cdip));
1659 }
1660
1661 uint32_t
pcie_get_aer_uce_mask()1662 pcie_get_aer_uce_mask() {
1663 return (pcie_aer_uce_mask);
1664 }
1665 uint32_t
pcie_get_aer_ce_mask()1666 pcie_get_aer_ce_mask() {
1667 return (pcie_aer_ce_mask);
1668 }
1669 uint32_t
pcie_get_aer_suce_mask()1670 pcie_get_aer_suce_mask() {
1671 return (pcie_aer_suce_mask);
1672 }
1673 uint32_t
pcie_get_serr_mask()1674 pcie_get_serr_mask() {
1675 return (pcie_serr_disable_flag);
1676 }
1677
1678 void
pcie_set_aer_uce_mask(uint32_t mask)1679 pcie_set_aer_uce_mask(uint32_t mask) {
1680 pcie_aer_uce_mask = mask;
1681 if (mask & PCIE_AER_UCE_UR)
1682 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN;
1683 else
1684 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN;
1685
1686 if (mask & PCIE_AER_UCE_ECRC)
1687 pcie_ecrc_value = 0;
1688 }
1689
1690 void
pcie_set_aer_ce_mask(uint32_t mask)1691 pcie_set_aer_ce_mask(uint32_t mask) {
1692 pcie_aer_ce_mask = mask;
1693 }
1694 void
pcie_set_aer_suce_mask(uint32_t mask)1695 pcie_set_aer_suce_mask(uint32_t mask) {
1696 pcie_aer_suce_mask = mask;
1697 }
1698 void
pcie_set_serr_mask(uint32_t mask)1699 pcie_set_serr_mask(uint32_t mask) {
1700 pcie_serr_disable_flag = mask;
1701 }
1702
1703 /*
1704 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling
1705 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge.
1706 */
1707 boolean_t
pcie_is_child(dev_info_t * dip,dev_info_t * rdip)1708 pcie_is_child(dev_info_t *dip, dev_info_t *rdip)
1709 {
1710 dev_info_t *cdip = ddi_get_child(dip);
1711 for (; cdip; cdip = ddi_get_next_sibling(cdip))
1712 if (cdip == rdip)
1713 break;
1714 return (cdip != NULL);
1715 }
1716
1717 boolean_t
pcie_is_link_disabled(dev_info_t * dip)1718 pcie_is_link_disabled(dev_info_t *dip)
1719 {
1720 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1721
1722 if (PCIE_IS_PCIE(bus_p)) {
1723 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) &
1724 PCIE_LINKCTL_LINK_DISABLE)
1725 return (B_TRUE);
1726 }
1727 return (B_FALSE);
1728 }
1729
1730 /*
1731 * Initialize the MPS for a root port.
1732 *
1733 * dip - dip of root port device.
1734 */
1735 void
pcie_init_root_port_mps(dev_info_t * dip)1736 pcie_init_root_port_mps(dev_info_t *dip)
1737 {
1738 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1739 int rp_cap, max_supported = pcie_max_mps;
1740
1741 (void) pcie_get_fabric_mps(ddi_get_parent(dip),
1742 ddi_get_child(dip), &max_supported);
1743
1744 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL,
1745 bus_p->bus_pcie_off, PCIE_DEVCAP) &
1746 PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1747
1748 if (rp_cap < max_supported)
1749 max_supported = rp_cap;
1750
1751 bus_p->bus_mps = max_supported;
1752 (void) pcie_initchild_mps(dip);
1753 }
1754
1755 /*
1756 * Initialize the Maximum Payload Size of a device.
1757 *
1758 * cdip - dip of device.
1759 *
1760 * returns - DDI_SUCCESS or DDI_FAILURE
1761 */
1762 int
pcie_initchild_mps(dev_info_t * cdip)1763 pcie_initchild_mps(dev_info_t *cdip)
1764 {
1765 pcie_bus_t *bus_p;
1766 dev_info_t *pdip = ddi_get_parent(cdip);
1767 uint8_t dev_type;
1768
1769 bus_p = PCIE_DIP2BUS(cdip);
1770 if (bus_p == NULL) {
1771 PCIE_DBG("%s: BUS not found.\n",
1772 ddi_driver_name(cdip));
1773 return (DDI_FAILURE);
1774 }
1775
1776 dev_type = bus_p->bus_dev_type;
1777
1778 /*
1779 * For ARI Devices, only function zero's MPS needs to be set.
1780 */
1781 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
1782 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) {
1783 pcie_req_id_t child_bdf;
1784
1785 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
1786 return (DDI_FAILURE);
1787 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
1788 return (DDI_SUCCESS);
1789 }
1790
1791 if (PCIE_IS_PCIE(bus_p)) {
1792 int suggested_mrrs, fabric_mps;
1793 uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl;
1794
1795 dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1796 if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p :
1797 PCIE_DIP2BUS(pdip))->bus_mps) < 0) {
1798 dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1799 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1800 (pcie_devctl_default &
1801 (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1802 PCIE_DEVCTL_MAX_PAYLOAD_MASK));
1803
1804 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
1805 return (DDI_SUCCESS);
1806 }
1807
1808 device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
1809 PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1810
1811 device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >>
1812 PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
1813
1814 if (device_mps_cap < fabric_mps)
1815 device_mrrs = device_mps = device_mps_cap;
1816 else
1817 device_mps = (uint16_t)fabric_mps;
1818
1819 suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
1820 cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs);
1821
1822 if ((device_mps == fabric_mps) ||
1823 (suggested_mrrs < device_mrrs))
1824 device_mrrs = (uint16_t)suggested_mrrs;
1825
1826 /*
1827 * Replace MPS and MRRS settings.
1828 */
1829 dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1830 PCIE_DEVCTL_MAX_PAYLOAD_MASK);
1831
1832 dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) |
1833 device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT);
1834
1835 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
1836
1837 bus_p->bus_mps = device_mps;
1838 }
1839
1840 return (DDI_SUCCESS);
1841 }
1842
1843 /*
1844 * Scans a device tree/branch for a maximum payload size capabilities.
1845 *
1846 * rc_dip - dip of Root Complex.
1847 * dip - dip of device where scan will begin.
1848 * max_supported (IN) - maximum allowable MPS.
1849 * max_supported (OUT) - maximum payload size capability of fabric.
1850 */
1851 void
pcie_get_fabric_mps(dev_info_t * rc_dip,dev_info_t * dip,int * max_supported)1852 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
1853 {
1854 if (dip == NULL)
1855 return;
1856
1857 /*
1858 * Perform a fabric scan to obtain Maximum Payload Capabilities
1859 */
1860 (void) pcie_scan_mps(rc_dip, dip, max_supported);
1861
1862 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported);
1863 }
1864
1865 /*
1866 * Scans fabric and determines Maximum Payload Size based on
1867 * highest common denominator alogorithm
1868 */
1869 static void
pcie_scan_mps(dev_info_t * rc_dip,dev_info_t * dip,int * max_supported)1870 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
1871 {
1872 int circular_count;
1873 pcie_max_supported_t max_pay_load_supported;
1874
1875 max_pay_load_supported.dip = rc_dip;
1876 max_pay_load_supported.highest_common_mps = *max_supported;
1877
1878 ndi_devi_enter(ddi_get_parent(dip), &circular_count);
1879 ddi_walk_devs(dip, pcie_get_max_supported,
1880 (void *)&max_pay_load_supported);
1881 ndi_devi_exit(ddi_get_parent(dip), circular_count);
1882
1883 *max_supported = max_pay_load_supported.highest_common_mps;
1884 }
1885
1886 /*
1887 * Called as part of the Maximum Payload Size scan.
1888 */
1889 static int
pcie_get_max_supported(dev_info_t * dip,void * arg)1890 pcie_get_max_supported(dev_info_t *dip, void *arg)
1891 {
1892 uint32_t max_supported;
1893 uint16_t cap_ptr;
1894 pcie_max_supported_t *current = (pcie_max_supported_t *)arg;
1895 pci_regspec_t *reg;
1896 int rlen;
1897 caddr_t virt;
1898 ddi_acc_handle_t config_handle;
1899
1900 if (ddi_get_child(current->dip) == NULL) {
1901 goto fail1;
1902 }
1903
1904 if (pcie_dev(dip) == DDI_FAILURE) {
1905 PCIE_DBG("MPS: pcie_get_max_supported: %s: "
1906 "Not a PCIe dev\n", ddi_driver_name(dip));
1907 goto fail1;
1908 }
1909
1910 /*
1911 * If the suggested-mrrs property exists, then don't include this
1912 * device in the MPS capabilities scan.
1913 */
1914 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1915 "suggested-mrrs") != 0)
1916 goto fail1;
1917
1918 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
1919 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) {
1920 PCIE_DBG("MPS: pcie_get_max_supported: %s: "
1921 "Can not read reg\n", ddi_driver_name(dip));
1922 goto fail1;
1923 }
1924
1925 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt,
1926 &config_handle) != DDI_SUCCESS) {
1927 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys "
1928 "failed\n", ddi_driver_name(dip));
1929 goto fail2;
1930 }
1931
1932 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) ==
1933 DDI_FAILURE) {
1934 goto fail3;
1935 }
1936
1937 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr,
1938 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1939
1940 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip),
1941 max_supported);
1942
1943 if (max_supported < current->highest_common_mps)
1944 current->highest_common_mps = max_supported;
1945
1946 fail3:
1947 pcie_unmap_phys(&config_handle, reg);
1948 fail2:
1949 kmem_free(reg, rlen);
1950 fail1:
1951 return (DDI_WALK_CONTINUE);
1952 }
1953
1954 /*
1955 * Determines if there are any root ports attached to a root complex.
1956 *
1957 * dip - dip of root complex
1958 *
1959 * Returns - DDI_SUCCESS if there is at least one root port otherwise
1960 * DDI_FAILURE.
1961 */
1962 int
pcie_root_port(dev_info_t * dip)1963 pcie_root_port(dev_info_t *dip)
1964 {
1965 int port_type;
1966 uint16_t cap_ptr;
1967 ddi_acc_handle_t config_handle;
1968 dev_info_t *cdip = ddi_get_child(dip);
1969
1970 /*
1971 * Determine if any of the children of the passed in dip
1972 * are root ports.
1973 */
1974 for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
1975
1976 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS)
1977 continue;
1978
1979 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E,
1980 &cap_ptr)) == DDI_FAILURE) {
1981 pci_config_teardown(&config_handle);
1982 continue;
1983 }
1984
1985 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr,
1986 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1987
1988 pci_config_teardown(&config_handle);
1989
1990 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
1991 return (DDI_SUCCESS);
1992 }
1993
1994 /* No root ports were found */
1995
1996 return (DDI_FAILURE);
1997 }
1998
1999 /*
2000 * Function that determines if a device a PCIe device.
2001 *
2002 * dip - dip of device.
2003 *
2004 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
2005 */
2006 int
pcie_dev(dev_info_t * dip)2007 pcie_dev(dev_info_t *dip)
2008 {
2009 /* get parent device's device_type property */
2010 char *device_type;
2011 int rc = DDI_FAILURE;
2012 dev_info_t *pdip = ddi_get_parent(dip);
2013
2014 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
2015 DDI_PROP_DONTPASS, "device_type", &device_type)
2016 != DDI_PROP_SUCCESS) {
2017 return (DDI_FAILURE);
2018 }
2019
2020 if (strcmp(device_type, "pciex") == 0)
2021 rc = DDI_SUCCESS;
2022 else
2023 rc = DDI_FAILURE;
2024
2025 ddi_prop_free(device_type);
2026 return (rc);
2027 }
2028
2029 /*
2030 * Function to map in a device's memory space.
2031 */
2032 static int
pcie_map_phys(dev_info_t * dip,pci_regspec_t * phys_spec,caddr_t * addrp,ddi_acc_handle_t * handlep)2033 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
2034 caddr_t *addrp, ddi_acc_handle_t *handlep)
2035 {
2036 ddi_map_req_t mr;
2037 ddi_acc_hdl_t *hp;
2038 int result;
2039 ddi_device_acc_attr_t attr;
2040
2041 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2042 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2043 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2044 attr.devacc_attr_access = DDI_CAUTIOUS_ACC;
2045
2046 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
2047 hp = impl_acc_hdl_get(*handlep);
2048 hp->ah_vers = VERS_ACCHDL;
2049 hp->ah_dip = dip;
2050 hp->ah_rnumber = 0;
2051 hp->ah_offset = 0;
2052 hp->ah_len = 0;
2053 hp->ah_acc = attr;
2054
2055 mr.map_op = DDI_MO_MAP_LOCKED;
2056 mr.map_type = DDI_MT_REGSPEC;
2057 mr.map_obj.rp = (struct regspec *)phys_spec;
2058 mr.map_prot = PROT_READ | PROT_WRITE;
2059 mr.map_flags = DDI_MF_KERNEL_MAPPING;
2060 mr.map_handlep = hp;
2061 mr.map_vers = DDI_MAP_VERSION;
2062
2063 result = ddi_map(dip, &mr, 0, 0, addrp);
2064
2065 if (result != DDI_SUCCESS) {
2066 impl_acc_hdl_free(*handlep);
2067 *handlep = (ddi_acc_handle_t)NULL;
2068 } else {
2069 hp->ah_addr = *addrp;
2070 }
2071
2072 return (result);
2073 }
2074
2075 /*
2076 * Map out memory that was mapped in with pcie_map_phys();
2077 */
2078 static void
pcie_unmap_phys(ddi_acc_handle_t * handlep,pci_regspec_t * ph)2079 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
2080 {
2081 ddi_map_req_t mr;
2082 ddi_acc_hdl_t *hp;
2083
2084 hp = impl_acc_hdl_get(*handlep);
2085 ASSERT(hp);
2086
2087 mr.map_op = DDI_MO_UNMAP;
2088 mr.map_type = DDI_MT_REGSPEC;
2089 mr.map_obj.rp = (struct regspec *)ph;
2090 mr.map_prot = PROT_READ | PROT_WRITE;
2091 mr.map_flags = DDI_MF_KERNEL_MAPPING;
2092 mr.map_handlep = hp;
2093 mr.map_vers = DDI_MAP_VERSION;
2094
2095 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
2096 hp->ah_len, &hp->ah_addr);
2097
2098 impl_acc_hdl_free(*handlep);
2099 *handlep = (ddi_acc_handle_t)NULL;
2100 }
2101
2102 void
pcie_set_rber_fatal(dev_info_t * dip,boolean_t val)2103 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val)
2104 {
2105 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2106 bus_p->bus_pfd->pe_rber_fatal = val;
2107 }
2108
2109 /*
2110 * Return parent Root Port's pe_rber_fatal value.
2111 */
2112 boolean_t
pcie_get_rber_fatal(dev_info_t * dip)2113 pcie_get_rber_fatal(dev_info_t *dip)
2114 {
2115 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2116 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip);
2117 return (rp_bus_p->bus_pfd->pe_rber_fatal);
2118 }
2119
2120 int
pcie_ari_supported(dev_info_t * dip)2121 pcie_ari_supported(dev_info_t *dip)
2122 {
2123 uint32_t devcap2;
2124 uint16_t pciecap;
2125 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2126 uint8_t dev_type;
2127
2128 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip);
2129
2130 if (bus_p == NULL)
2131 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2132
2133 dev_type = bus_p->bus_dev_type;
2134
2135 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
2136 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT))
2137 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2138
2139 if (pcie_disable_ari) {
2140 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip);
2141 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2142 }
2143
2144 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
2145
2146 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) {
2147 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip);
2148 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2149 }
2150
2151 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2);
2152
2153 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
2154 dip, devcap2);
2155
2156 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
2157 PCIE_DBG("pcie_ari_supported: "
2158 "dip=%p: ARI Forwarding is supported\n", dip);
2159 return (PCIE_ARI_FORW_SUPPORTED);
2160 }
2161 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2162 }
2163
2164 int
pcie_ari_enable(dev_info_t * dip)2165 pcie_ari_enable(dev_info_t *dip)
2166 {
2167 uint16_t devctl2;
2168 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2169
2170 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip);
2171
2172 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2173 return (DDI_FAILURE);
2174
2175 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2176 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN;
2177 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2178
2179 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
2180 dip, devctl2);
2181
2182 return (DDI_SUCCESS);
2183 }
2184
2185 int
pcie_ari_disable(dev_info_t * dip)2186 pcie_ari_disable(dev_info_t *dip)
2187 {
2188 uint16_t devctl2;
2189 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2190
2191 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip);
2192
2193 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2194 return (DDI_FAILURE);
2195
2196 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2197 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN;
2198 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2199
2200 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
2201 dip, devctl2);
2202
2203 return (DDI_SUCCESS);
2204 }
2205
2206 int
pcie_ari_is_enabled(dev_info_t * dip)2207 pcie_ari_is_enabled(dev_info_t *dip)
2208 {
2209 uint16_t devctl2;
2210 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2211
2212 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip);
2213
2214 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2215 return (PCIE_ARI_FORW_DISABLED);
2216
2217 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2);
2218
2219 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
2220 dip, devctl2);
2221
2222 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
2223 PCIE_DBG("pcie_ari_is_enabled: "
2224 "dip=%p: ARI Forwarding is enabled\n", dip);
2225 return (PCIE_ARI_FORW_ENABLED);
2226 }
2227
2228 return (PCIE_ARI_FORW_DISABLED);
2229 }
2230
2231 int
pcie_ari_device(dev_info_t * dip)2232 pcie_ari_device(dev_info_t *dip)
2233 {
2234 ddi_acc_handle_t handle;
2235 uint16_t cap_ptr;
2236
2237 PCIE_DBG("pcie_ari_device: dip=%p\n", dip);
2238
2239 /*
2240 * XXX - This function may be called before the bus_p structure
2241 * has been populated. This code can be changed to remove
2242 * pci_config_setup()/pci_config_teardown() when the RFE
2243 * to populate the bus_p structures early in boot is putback.
2244 */
2245
2246 /* First make sure it is a PCIe device */
2247
2248 if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2249 return (PCIE_NOT_ARI_DEVICE);
2250
2251 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
2252 != DDI_SUCCESS) {
2253 pci_config_teardown(&handle);
2254 return (PCIE_NOT_ARI_DEVICE);
2255 }
2256
2257 /* Locate the ARI Capability */
2258
2259 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI),
2260 &cap_ptr)) == DDI_FAILURE) {
2261 pci_config_teardown(&handle);
2262 return (PCIE_NOT_ARI_DEVICE);
2263 }
2264
2265 /* ARI Capability was found so it must be a ARI device */
2266 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip);
2267
2268 pci_config_teardown(&handle);
2269 return (PCIE_ARI_DEVICE);
2270 }
2271
2272 int
pcie_ari_get_next_function(dev_info_t * dip,int * func)2273 pcie_ari_get_next_function(dev_info_t *dip, int *func)
2274 {
2275 uint32_t val;
2276 uint16_t cap_ptr, next_function;
2277 ddi_acc_handle_t handle;
2278
2279 /*
2280 * XXX - This function may be called before the bus_p structure
2281 * has been populated. This code can be changed to remove
2282 * pci_config_setup()/pci_config_teardown() when the RFE
2283 * to populate the bus_p structures early in boot is putback.
2284 */
2285
2286 if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2287 return (DDI_FAILURE);
2288
2289 if ((PCI_CAP_LOCATE(handle,
2290 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) {
2291 pci_config_teardown(&handle);
2292 return (DDI_FAILURE);
2293 }
2294
2295 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP);
2296
2297 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) &
2298 PCIE_ARI_CAP_NEXT_FUNC_MASK;
2299
2300 pci_config_teardown(&handle);
2301
2302 *func = next_function;
2303
2304 return (DDI_SUCCESS);
2305 }
2306
2307 dev_info_t *
pcie_func_to_dip(dev_info_t * dip,pcie_req_id_t function)2308 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function)
2309 {
2310 pcie_req_id_t child_bdf;
2311 dev_info_t *cdip;
2312
2313 for (cdip = ddi_get_child(dip); cdip;
2314 cdip = ddi_get_next_sibling(cdip)) {
2315
2316 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2317 return (NULL);
2318
2319 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function)
2320 return (cdip);
2321 }
2322 return (NULL);
2323 }
2324
2325 #ifdef DEBUG
2326
2327 static void
pcie_print_bus(pcie_bus_t * bus_p)2328 pcie_print_bus(pcie_bus_t *bus_p)
2329 {
2330 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip);
2331 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags);
2332
2333 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf);
2334 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id);
2335 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id);
2336 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type);
2337 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type);
2338 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus);
2339 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off);
2340 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off);
2341 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off);
2342 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver);
2343 }
2344
2345 /*
2346 * For debugging purposes set pcie_dbg_print != 0 to see printf messages
2347 * during interrupt.
2348 *
2349 * When a proper solution is in place this code will disappear.
2350 * Potential solutions are:
2351 * o circular buffers
2352 * o taskq to print at lower pil
2353 */
2354 int pcie_dbg_print = 0;
2355 void
pcie_dbg(char * fmt,...)2356 pcie_dbg(char *fmt, ...)
2357 {
2358 va_list ap;
2359
2360 if (!pcie_debug_flags) {
2361 return;
2362 }
2363 va_start(ap, fmt);
2364 if (servicing_interrupt()) {
2365 if (pcie_dbg_print) {
2366 prom_vprintf(fmt, ap);
2367 }
2368 } else {
2369 prom_vprintf(fmt, ap);
2370 }
2371 va_end(ap);
2372 }
2373 #endif /* DEBUG */
2374
2375 #if defined(__i386) || defined(__amd64)
2376 static void
pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl,boolean_t * empty_io_range,boolean_t * empty_mem_range)2377 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range,
2378 boolean_t *empty_mem_range)
2379 {
2380 uint8_t class, subclass;
2381 uint_t val;
2382
2383 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
2384 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);
2385
2386 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) {
2387 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) &
2388 PCI_BCNF_IO_MASK) << 8);
2389 /*
2390 * Assuming that a zero based io_range[0] implies an
2391 * invalid I/O range. Likewise for mem_range[0].
2392 */
2393 if (val == 0)
2394 *empty_io_range = B_TRUE;
2395 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) &
2396 PCI_BCNF_MEM_MASK) << 16);
2397 if (val == 0)
2398 *empty_mem_range = B_TRUE;
2399 }
2400 }
2401
2402 #endif /* defined(__i386) || defined(__amd64) */
2403