1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2019 Joyent, Inc.
25 */
26
27 /* x86 specific code used by the pcieb driver */
28
29 #include <sys/types.h>
30 #include <sys/ddi.h>
31 #include <sys/kmem.h>
32 #include <sys/sysmacros.h>
33 #include <sys/sunddi.h>
34 #include <sys/sunndi.h>
35 #include <sys/pcie.h>
36 #include <sys/pci_cap.h>
37 #include <sys/pcie_impl.h>
38 #include <sys/pcie_acpi.h>
39 #include <sys/hotplug/hpctrl.h>
40 #include <io/pciex/pcieb.h>
41 #include <io/pciex/pcie_nb5000.h>
42
43 /* Flag to turn off intel error handling workarounds */
44 int pcieb_intel_workaround_disable = 0;
45
46 void
pcieb_peekpoke_cb(dev_info_t * dip,ddi_fm_error_t * derr)47 pcieb_peekpoke_cb(dev_info_t *dip, ddi_fm_error_t *derr)
48 {
49 pf_eh_enter(PCIE_DIP2BUS(dip));
50 (void) pf_scan_fabric(dip, derr, NULL);
51 pf_eh_exit(PCIE_DIP2BUS(dip));
52 }
53
54 void
pcieb_set_prot_scan(dev_info_t * dip,ddi_acc_impl_t * hdlp)55 pcieb_set_prot_scan(dev_info_t *dip, ddi_acc_impl_t *hdlp)
56 {
57 pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
58 ddi_get_instance(dip));
59
60 hdlp->ahi_err_mutexp = &pcieb->pcieb_err_mutex;
61 hdlp->ahi_peekpoke_mutexp = &pcieb->pcieb_peek_poke_mutex;
62 hdlp->ahi_scan_dip = dip;
63 hdlp->ahi_scan = pcieb_peekpoke_cb;
64 }
65
66 int
pcieb_plat_peekpoke(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t ctlop,void * arg,void * result)67 pcieb_plat_peekpoke(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
68 void *arg, void *result)
69 {
70 pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
71 ddi_get_instance(dip));
72
73 if (!PCIE_IS_RP(PCIE_DIP2BUS(dip)))
74 return (ddi_ctlops(dip, rdip, ctlop, arg, result));
75
76 return (pci_peekpoke_check(dip, rdip, ctlop, arg, result,
77 ddi_ctlops, &pcieb->pcieb_err_mutex,
78 &pcieb->pcieb_peek_poke_mutex,
79 pcieb_peekpoke_cb));
80 }
81
82 /* x86 specific workarounds needed at the end of pcieb attach */
83 void
pcieb_plat_attach_workaround(dev_info_t * dip)84 pcieb_plat_attach_workaround(dev_info_t *dip)
85 {
86 /* Must apply workaround only after all initialization is done */
87 pcieb_intel_error_workaround(dip);
88 pcieb_intel_mps_workaround(dip);
89
90 }
91
92 /* Workarounds to enable error handling on certain Intel chipsets */
93 void
pcieb_intel_error_workaround(dev_info_t * dip)94 pcieb_intel_error_workaround(dev_info_t *dip)
95 {
96 pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
97 ddi_get_instance(dip));
98
99 pcieb_intel_serr_workaround(dip, pcieb->pcieb_no_aer_msi);
100 pcieb_intel_rber_workaround(dip);
101 pcieb_intel_sw_workaround(dip);
102 }
103
104 int
pcieb_plat_intr_ops(dev_info_t * dip,dev_info_t * rdip,ddi_intr_op_t intr_op,ddi_intr_handle_impl_t * hdlp,void * result)105 pcieb_plat_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
106 ddi_intr_handle_impl_t *hdlp, void *result)
107 {
108 return (i_ddi_intr_ops(dip, rdip, intr_op, hdlp, result));
109 }
110
111 /* shpc is not supported on x86 */
112 /*ARGSUSED*/
113 int
pcieb_plat_pcishpc_probe(dev_info_t * dip,ddi_acc_handle_t config_handle)114 pcieb_plat_pcishpc_probe(dev_info_t *dip, ddi_acc_handle_t config_handle)
115 {
116 return (DDI_FAILURE);
117 }
118
119 /*
120 * Dummy functions to get around the fact that there's no shpc module on x86
121 * today
122 */
123 /*ARGSUSED*/
124 int
pcishpc_init(dev_info_t * dip)125 pcishpc_init(dev_info_t *dip)
126 {
127 return (DDI_FAILURE);
128 }
129
130 /*ARGSUSED*/
131 int
pcishpc_uninit(dev_info_t * dip)132 pcishpc_uninit(dev_info_t *dip)
133 {
134 return (DDI_FAILURE);
135 }
136
137 /*ARGSUSED*/
138 int
pcishpc_intr(dev_info_t * dip)139 pcishpc_intr(dev_info_t *dip)
140 {
141 return (DDI_INTR_UNCLAIMED);
142 }
143
144 /*ARGSUSED*/
145 boolean_t
pcieb_plat_pwr_disable(dev_info_t * dip)146 pcieb_plat_pwr_disable(dev_info_t *dip)
147 {
148 /* Always disable on x86 */
149 return (B_TRUE);
150 }
151
152 boolean_t
pcieb_plat_msi_supported(dev_info_t * dip)153 pcieb_plat_msi_supported(dev_info_t *dip)
154 {
155 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
156 uint16_t vendor_id, device_id;
157 vendor_id = bus_p->bus_dev_ven_id & 0xFFFF;
158 device_id = bus_p->bus_dev_ven_id >> 16;
159
160 /*
161 * Intel ESB2 switches have a errata which prevents using MSIs
162 * for hotplug.
163 */
164 return (((vendor_id == INTEL_VENDOR_ID) &&
165 INTEL_ESB2_SW_PCIE_DEV_ID(device_id)) ? B_FALSE : B_TRUE);
166 }
167
168 void
pcieb_plat_intr_attach(pcieb_devstate_t * pcieb)169 pcieb_plat_intr_attach(pcieb_devstate_t *pcieb)
170 {
171 /*
172 * _OSC initialization needs to be done before interrupts are
173 * initialized.
174 */
175 pcieb_init_osc(pcieb->pcieb_dip);
176 }
177
178 void
pcieb_plat_initchild(dev_info_t * child)179 pcieb_plat_initchild(dev_info_t *child)
180 {
181 struct ddi_parent_private_data *pdptr;
182 if (ddi_getprop(DDI_DEV_T_NONE, child, DDI_PROP_DONTPASS, "interrupts",
183 -1) != -1) {
184 pdptr = kmem_zalloc((sizeof (struct ddi_parent_private_data) +
185 sizeof (struct intrspec)), KM_SLEEP);
186 pdptr->par_intr = (struct intrspec *)(pdptr + 1);
187 pdptr->par_nintr = 1;
188 ddi_set_parent_data(child, pdptr);
189 } else
190 ddi_set_parent_data(child, NULL);
191 }
192
193 void
pcieb_plat_uninitchild(dev_info_t * child)194 pcieb_plat_uninitchild(dev_info_t *child)
195 {
196 struct ddi_parent_private_data *pdptr;
197
198 if ((pdptr = ddi_get_parent_data(child)) != NULL)
199 kmem_free(pdptr, (sizeof (*pdptr) + sizeof (struct intrspec)));
200
201 ddi_set_parent_data(child, NULL);
202 }
203
204 /* _OSC related */
205 void
pcieb_init_osc(dev_info_t * devi)206 pcieb_init_osc(dev_info_t *devi)
207 {
208 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(devi);
209 uint32_t osc_flags = OSC_CONTROL_PCIE_ADV_ERR;
210
211 /*
212 * Call _OSC method for 2 reasons:
213 * 1. Hotplug: To determine if it is native or ACPI mode.
214 *
215 * 2. Error handling: Inform firmware that OS can support AER error
216 * handling. Currently we don't care for what the BIOS response was
217 * and instead setup interrupts for error handling as if it were
218 * supported.
219 *
220 * For hotpluggable slots the _OSC method has already been called as
221 * part of the hotplug initialization.
222 * For non-hotpluggable slots we need to call the _OSC method only for
223 * Root Ports (for AER support).
224 */
225 if (!pcie_is_osc(devi) && PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p))
226 (void) pcie_acpi_osc(devi, &osc_flags);
227 }
228
229 /*
230 * Intel chip specific workarounds. Right now they're limited to the 5000, 5400
231 * and 7300 series chipsets.
232 */
233 typedef struct x86_error_reg {
234 uint32_t offset;
235 uint_t size;
236 uint32_t mask;
237 uint32_t value1; /* Value for MSI case */
238 uint32_t value2; /* Value for machinecheck case */
239 } x86_error_reg_t;
240
241 typedef struct x86_error_tbl {
242 uint16_t vendor_id;
243 uint16_t device_id_low;
244 uint16_t device_id_high;
245 uint8_t rev_id_low;
246 uint8_t rev_id_high;
247 x86_error_reg_t *error_regs;
248 int error_regs_len;
249 } x86_error_tbl_t;
250
251 /*
252 * Chipset and device specific settings that are required for error handling
253 * (reporting, fowarding, and response at the RC) beyond the standard
254 * registers in the PCIE and AER caps.
255 *
256 * The Northbridge Root Port settings also apply to the ESI port. The ESI
257 * port is a special leaf device but functions like a root port connected
258 * to the Southbridge and receives all the onboard Southbridge errors
259 * including those from Southbridge Root Ports. However, this does not
260 * include the Southbridge Switch Ports which act like normal switch ports
261 * and is connected to the Northbridge through a separate link.
262 *
263 * PCIE errors from the ESB2 Southbridge RPs are simply fowarded to the ESI
264 * port on the Northbridge.
265 *
266 * If MSIs don't work we want UEs (Fatal and Non-Fatal) to panic the system,
267 * except for URs. We do this by having the Root Ports respond with a System
268 * Error and having that trigger a Machine Check (MCE).
269 */
270
271 /*
272 * 7300 Northbridge Root Ports
273 */
274 static x86_error_reg_t intel_7300_rp_regs[] = {
275 /* Command Register - Enable SERR */
276 {0x4, 16, 0xFFFF, 0x0, PCI_COMM_SERR_ENABLE},
277
278 /* Root Control Register - SERR on NFE/FE */
279 {0x88, 16, 0x0, 0x0, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
280 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
281
282 /* AER UE Mask - Mask UR */
283 {0x108, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
284
285 /* PEXCTRL[21] check for certain malformed TLP types and MSI enable */
286 {0x48, 32, 0xFFFFFFFF, 0xC0200000, 0x200000},
287 /* PEXCTRL3[7]. MSI RAS error enable */
288 {0x4D, 32, 0xFFFFFFFF, 0x1, 0x0},
289
290 /* PEX_ERR_DOCMD[7:0] */
291 {0x144, 8, 0x0, 0x0, 0xF0},
292
293 /* EMASK_UNCOR_PEX[21:0] UE mask */
294 {0x148, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
295
296 /* EMASK_RP_PEX[2:0] FE, UE, CE message detect mask */
297 {0x150, 8, 0x0, 0x0, 0x1},
298 };
299 #define INTEL_7300_RP_REGS_LEN \
300 (sizeof (intel_7300_rp_regs) / sizeof (x86_error_reg_t))
301
302 /*
303 * 5000 Northbridge Root Ports
304 */
305 static x86_error_reg_t intel_5000_rp_regs[] = {
306 /* Command Register - Enable SERR */
307 {0x4, 16, 0xFFFF, PCI_COMM_SERR_ENABLE, PCI_COMM_SERR_ENABLE},
308
309 /* Root Control Register - SERR on NFE/FE/CE */
310 {0x88, 16, 0x0, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
311 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN |
312 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN,
313 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
314 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
315
316 /* AER UE Mask - Mask UR */
317 {0x108, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
318
319 /* PEXCTRL[21] check for certain malformed TLP type */
320 {0x48, 32, 0xFFFFFFFF, 0xC0200000, 0x200000},
321 /* PEXCTRL3[7]. MSI RAS error enable. */
322 {0x4D, 32, 0xFFFFFFFF, 0x1, 0x0},
323
324 /* PEX_ERR_DOCMD[7:0] */
325 {0x144, 8, 0x0, 0x0, 0xF0},
326
327 /* EMASK_UNCOR_PEX[21:0] UE mask */
328 {0x148, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
329
330 /* EMASK_RP_PEX[2:0] FE, UE, CE message detect mask */
331 {0x150, 8, 0x0, 0x0, 0x1},
332 };
333 #define INTEL_5000_RP_REGS_LEN \
334 (sizeof (intel_5000_rp_regs) / sizeof (x86_error_reg_t))
335
336 /*
337 * 5400 Northbridge Root Ports.
338 */
339 static x86_error_reg_t intel_5400_rp_regs[] = {
340 /* Command Register - Enable SERR */
341 {0x4, 16, 0xFFFF, PCI_COMM_SERR_ENABLE, PCI_COMM_SERR_ENABLE},
342
343 /* Root Control Register - SERR on NFE/FE */
344 {0x88, 16, 0x0, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
345 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN |
346 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN,
347 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
348 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
349
350 /* AER UE Mask - Mask UR */
351 {0x108, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
352
353 /* PEXCTRL[21] check for certain malformed TLP types */
354 {0x48, 32, 0xFFFFFFFF, 0xC0200000, 0x200000},
355 /* PEXCTRL3. MSI RAS error enable. */
356 {0x4E, 8, 0x0, 0x1, 0x0},
357
358 /* PEX_ERR_DOCMD[11:0] */
359 {0x144, 16, 0x0, 0x0, 0xFF0},
360
361 /* PEX_ERR_PIN_MASK[4:0] do not mask ERR[2:0] pins used by DOCMD */
362 {0x146, 16, 0x0, 0x10, 0x10},
363
364 /* EMASK_UNCOR_PEX[21:0] UE mask */
365 {0x148, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
366
367 /* EMASK_RP_PEX[2:0] FE, UE, CE message detect mask */
368 {0x150, 8, 0x0, 0x0, 0x1},
369 };
370 #define INTEL_5400_RP_REGS_LEN \
371 (sizeof (intel_5400_rp_regs) / sizeof (x86_error_reg_t))
372
373
374 /*
375 * ESB2 Southbridge Root Ports
376 */
377 static x86_error_reg_t intel_esb2_rp_regs[] = {
378 /* Command Register - Enable SERR */
379 {0x4, 16, 0xFFFF, PCI_COMM_SERR_ENABLE, PCI_COMM_SERR_ENABLE},
380
381 /* Root Control Register - SERR on NFE/FE */
382 {0x5c, 16, 0x0, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
383 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN |
384 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN,
385 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
386 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
387
388 /* UEM[20:0] UE mask (write-once) */
389 {0x148, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
390 };
391 #define INTEL_ESB2_RP_REGS_LEN \
392 (sizeof (intel_esb2_rp_regs) / sizeof (x86_error_reg_t))
393
394
395 /*
396 * ESB2 Southbridge Switch Ports
397 */
398 static x86_error_reg_t intel_esb2_sw_regs[] = {
399 /* Command Register - Enable SERR */
400 {0x4, 16, 0xFFFF, PCI_COMM_SERR_ENABLE, PCI_COMM_SERR_ENABLE},
401
402 /* AER UE Mask - Mask UR */
403 {0x108, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
404 };
405 #define INTEL_ESB2_SW_REGS_LEN \
406 (sizeof (intel_esb2_sw_regs) / sizeof (x86_error_reg_t))
407
408
409 x86_error_tbl_t x86_error_init_tbl[] = {
410 /* Intel 7300: 3600 = ESI, 3604-360A = NB root ports */
411 {0x8086, 0x3600, 0x3600, 0x0, 0xFF,
412 intel_7300_rp_regs, INTEL_7300_RP_REGS_LEN},
413 {0x8086, 0x3604, 0x360A, 0x0, 0xFF,
414 intel_7300_rp_regs, INTEL_7300_RP_REGS_LEN},
415
416 /* Intel 5000: 25C0, 25D0, 25D4, 25D8 = ESI */
417 {0x8086, 0x25C0, 0x25C0, 0x0, 0xFF,
418 intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
419 {0x8086, 0x25D0, 0x25D0, 0x0, 0xFF,
420 intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
421 {0x8086, 0x25D4, 0x25D4, 0x0, 0xFF,
422 intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
423 {0x8086, 0x25D8, 0x25D8, 0x0, 0xFF,
424 intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
425
426 /* Intel 5000: 25E2-25E7 and 25F7-25FA = NB root ports */
427 {0x8086, 0x25E2, 0x25E7, 0x0, 0xFF,
428 intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
429 {0x8086, 0x25F7, 0x25FA, 0x0, 0xFF,
430 intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
431
432 /* Intel 5400: 4000-4001, 4003 = ESI and 4021-4029 = NB root ports */
433 {0x8086, 0x4000, 0x4001, 0x0, 0xFF,
434 intel_5400_rp_regs, INTEL_5400_RP_REGS_LEN},
435 {0x8086, 0x4003, 0x4003, 0x0, 0xFF,
436 intel_5400_rp_regs, INTEL_5400_RP_REGS_LEN},
437 {0x8086, 0x4021, 0x4029, 0x0, 0xFF,
438 intel_5400_rp_regs, INTEL_5400_RP_REGS_LEN},
439
440 /* Intel 631xESB/632xESB aka ESB2: 2690-2697 = SB root ports */
441 {0x8086, 0x2690, 0x2697, 0x0, 0xFF,
442 intel_esb2_rp_regs, INTEL_ESB2_RP_REGS_LEN},
443
444 /* Intel Switches on esb2: 3500-3503, 3510-351B */
445 {0x8086, 0x3500, 0x3503, 0x0, 0xFF,
446 intel_esb2_sw_regs, INTEL_ESB2_SW_REGS_LEN},
447 {0x8086, 0x3510, 0x351B, 0x0, 0xFF,
448 intel_esb2_sw_regs, INTEL_ESB2_SW_REGS_LEN},
449
450 /* XXX Intel PCIe-PCIx on esb2: 350C */
451 };
452 static int x86_error_init_tbl_len =
453 sizeof (x86_error_init_tbl) / sizeof (x86_error_tbl_t);
454
455 /*
456 * The main goal of this workaround is to set chipset specific settings if
457 * MSIs happen to be enabled on this device. Otherwise make the system
458 * Machine Check/Panic if an UE is detected in the fabric.
459 */
460 void
pcieb_intel_serr_workaround(dev_info_t * dip,boolean_t mcheck)461 pcieb_intel_serr_workaround(dev_info_t *dip, boolean_t mcheck)
462 {
463 uint16_t vid, did;
464 uint8_t rid;
465 int i, j;
466 x86_error_tbl_t *tbl;
467 x86_error_reg_t *reg;
468 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
469 ddi_acc_handle_t cfg_hdl = bus_p->bus_cfg_hdl;
470 uint16_t bdf = bus_p->bus_bdf;
471
472 if (pcieb_intel_workaround_disable)
473 return;
474
475 vid = bus_p->bus_dev_ven_id & 0xFFFF;
476 did = bus_p->bus_dev_ven_id >> 16;
477 rid = bus_p->bus_rev_id;
478
479 PCIEB_DEBUG(DBG_ATTACH, dip, "VID:0x%x DID:0x%x RID:0x%x bdf=0x%x\n",
480 vid, did, rid, bdf);
481
482 tbl = x86_error_init_tbl;
483 for (i = 0; i < x86_error_init_tbl_len; i++, tbl++) {
484 if (!((vid == tbl->vendor_id) &&
485 (did >= tbl->device_id_low) &&
486 (did <= tbl->device_id_high) &&
487 (rid >= tbl->rev_id_low) &&
488 (rid <= tbl->rev_id_high)))
489 continue;
490
491 if (mcheck && PCIE_IS_RP(bus_p))
492 pcie_set_rber_fatal(dip, B_TRUE);
493
494 reg = tbl->error_regs;
495 for (j = 0; j < tbl->error_regs_len; j++, reg++) {
496 uint32_t data = 0xDEADBEEF;
497 uint32_t value = 0xDEADBEEF;
498 switch (reg->size) {
499 case 32:
500 data = (uint32_t)pci_config_get32(cfg_hdl,
501 reg->offset);
502 value = (mcheck ?
503 ((data & reg->mask) | reg->value2) :
504 ((data & reg->mask) | reg->value1));
505 pci_config_put32(cfg_hdl, reg->offset, value);
506 value = (uint32_t)pci_config_get32(cfg_hdl,
507 reg->offset);
508 break;
509 case 16:
510 data = (uint32_t)pci_config_get16(cfg_hdl,
511 reg->offset);
512 value = (mcheck ?
513 ((data & reg->mask) | reg->value2) :
514 ((data & reg->mask) | reg->value1));
515 pci_config_put16(cfg_hdl, reg->offset,
516 (uint16_t)value);
517 value = (uint32_t)pci_config_get16(cfg_hdl,
518 reg->offset);
519 break;
520 case 8:
521 data = (uint32_t)pci_config_get8(cfg_hdl,
522 reg->offset);
523 value = (mcheck ?
524 ((data & reg->mask) | reg->value2) :
525 ((data & reg->mask) | reg->value1));
526 pci_config_put8(cfg_hdl, reg->offset,
527 (uint8_t)value);
528 value = (uint32_t)pci_config_get8(cfg_hdl,
529 reg->offset);
530 break;
531 }
532
533 PCIEB_DEBUG(DBG_ATTACH, dip, "bdf:%x mcheck:%d size:%d "
534 "off:0x%x mask:0x%x value:0x%x + orig:0x%x -> "
535 "0x%x\n", bdf, mcheck, reg->size, reg->offset,
536 reg->mask, (mcheck ? reg->value2 : reg->value1),
537 data, value);
538 }
539 }
540 }
541
542 /*
543 * For devices that support Role Base Errors, make several UE have a FATAL
544 * severity. That way a Fatal Message will be sent instead of a Correctable
545 * Message. Without full FMA support, CEs will be ignored.
546 */
547 uint32_t pcieb_rber_sev = (PCIE_AER_UCE_TRAINING | PCIE_AER_UCE_DLP |
548 PCIE_AER_UCE_SD | PCIE_AER_UCE_PTLP | PCIE_AER_UCE_FCP | PCIE_AER_UCE_TO |
549 PCIE_AER_UCE_CA | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP | PCIE_AER_UCE_ECRC);
550
551 void
pcieb_intel_rber_workaround(dev_info_t * dip)552 pcieb_intel_rber_workaround(dev_info_t *dip)
553 {
554 uint32_t rber;
555 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
556
557 if (pcieb_intel_workaround_disable)
558 return;
559
560 /*
561 * Check Root Port's machinecheck setting to determine if this
562 * workaround is needed or not.
563 */
564 if (!pcie_get_rber_fatal(dip))
565 return;
566
567 if (!PCIE_IS_PCIE(bus_p) || !PCIE_HAS_AER(bus_p))
568 return;
569
570 rber = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
571 PCIE_DEVCAP_ROLE_BASED_ERR_REP;
572 if (!rber)
573 return;
574
575 (void) PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, pcieb_rber_sev);
576 }
577
578 /*
579 * The Intel 5000 Chipset has an errata that requires read completion
580 * coalescing to be disabled if the Max Payload Size is set to 256 bytes.
581 */
582 void
pcieb_intel_mps_workaround(dev_info_t * dip)583 pcieb_intel_mps_workaround(dev_info_t *dip)
584 {
585 uint16_t vid, did;
586 uint32_t pexctrl;
587 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
588
589 vid = bus_p->bus_dev_ven_id & 0xFFFF;
590 did = bus_p->bus_dev_ven_id >> 16;
591
592 if ((vid == INTEL_VENDOR_ID) && (INTEL_NB5000_PCIE_DEV_ID(did) ||
593 INTEL_NB5100_PCIE_DEV_ID(did))) {
594
595 pexctrl = pci_config_get32(bus_p->bus_cfg_hdl,
596 INTEL_NB5000_PEXCTRL_OFFSET);
597 /*
598 * Turn off coalescing (bit 10)
599 */
600 pexctrl &= ~INTEL_NB5000_PEXCTRL_COALESCE_EN;
601
602 pci_config_put32(bus_p->bus_cfg_hdl,
603 INTEL_NB5000_PEXCTRL_OFFSET, pexctrl);
604 }
605 }
606
607 /*
608 * Workaround for certain switches regardless of platform
609 */
610 void
pcieb_intel_sw_workaround(dev_info_t * dip)611 pcieb_intel_sw_workaround(dev_info_t *dip)
612 {
613 uint16_t vid, regw;
614 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
615 ddi_acc_handle_t cfg_hdl = bus_p->bus_cfg_hdl;
616
617 if (pcieb_intel_workaround_disable)
618 return;
619
620 if (!PCIE_IS_SW(PCIE_DIP2BUS(dip)))
621 return;
622
623 vid = bus_p->bus_dev_ven_id & 0xFFFF;
624 /*
625 * Intel and PLX switches require SERR in CMD reg to foward error
626 * messages, though this is not PCIE spec-compliant behavior.
627 * To prevent the switches themselves from reporting errors on URs
628 * when the CMD reg has SERR enabled (which is expected according to
629 * the PCIE spec) we rely on masking URs in the AER cap.
630 */
631 if (vid == 0x8086 || vid == 0x10B5) {
632 regw = pci_config_get16(cfg_hdl, PCI_CONF_COMM);
633 pci_config_put16(cfg_hdl, PCI_CONF_COMM,
634 regw | PCI_COMM_SERR_ENABLE);
635 }
636 }
637
638 int
pcieb_plat_ctlops(dev_info_t * rdip,ddi_ctl_enum_t ctlop,void * arg)639 pcieb_plat_ctlops(dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg)
640 {
641 struct detachspec *ds;
642 struct attachspec *as;
643
644 switch (ctlop) {
645 case DDI_CTLOPS_DETACH:
646 ds = (struct detachspec *)arg;
647 switch (ds->when) {
648 case DDI_POST:
649 if (ds->cmd == DDI_SUSPEND) {
650 if (pci_post_suspend(rdip) != DDI_SUCCESS)
651 return (DDI_FAILURE);
652 }
653 break;
654 default:
655 break;
656 }
657 break;
658 case DDI_CTLOPS_ATTACH:
659 as = (struct attachspec *)arg;
660 switch (as->when) {
661 case DDI_PRE:
662 if (as->cmd == DDI_RESUME) {
663 if (pci_pre_resume(rdip) != DDI_SUCCESS)
664 return (DDI_FAILURE);
665 }
666 break;
667 case DDI_POST:
668 /*
669 * For leaf devices supporting RBER and AER, we
670 * need to apply this workaround on them after
671 * attach to be notified of UEs that would
672 * otherwise be ignored as CEs on Intel chipsets
673 * currently
674 */
675 pcieb_intel_rber_workaround(rdip);
676 break;
677 default:
678 break;
679 }
680 break;
681 default:
682 break;
683 }
684
685 return (DDI_SUCCESS);
686 }
687