xref: /titanic_50/usr/src/uts/intel/io/pciex/pcieb_x86.c (revision 9c468ea9d266203f8dac0165f60fc9b92d8aead3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /* x86 specific code used by the pcieb driver */
27 
28 #include <sys/types.h>
29 #include <sys/ddi.h>
30 #include <sys/kmem.h>
31 #include <sys/sysmacros.h>
32 #include <sys/sunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/pcie.h>
35 #include <sys/pci_cap.h>
36 #include <sys/pcie_impl.h>
37 #include <sys/pcie_acpi.h>
38 #include <sys/hotplug/hpctrl.h>
39 #include <io/pciex/pcieb.h>
40 #include <io/pciex/pcie_nb5000.h>
41 
42 /* Flag to turn off intel error handling workarounds */
43 int pcieb_intel_workaround_disable = 0;
44 
45 void
46 pcieb_peekpoke_cb(dev_info_t *dip, ddi_fm_error_t *derr) {
47 	(void) pf_scan_fabric(dip, derr, NULL);
48 }
49 
50 void
51 pcieb_set_prot_scan(dev_info_t *dip, ddi_acc_impl_t *hdlp)
52 {
53 	pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
54 	    ddi_get_instance(dip));
55 
56 	hdlp->ahi_err_mutexp = &pcieb->pcieb_err_mutex;
57 	hdlp->ahi_peekpoke_mutexp = &pcieb->pcieb_peek_poke_mutex;
58 	hdlp->ahi_scan_dip = dip;
59 	hdlp->ahi_scan = pcieb_peekpoke_cb;
60 }
61 
62 int
63 pcieb_plat_peekpoke(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop,
64     void *arg, void *result)
65 {
66 	pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
67 	    ddi_get_instance(dip));
68 
69 	if (!PCIE_IS_RP(PCIE_DIP2BUS(dip)))
70 		return (ddi_ctlops(dip, rdip, ctlop, arg, result));
71 
72 	return (pci_peekpoke_check(dip, rdip, ctlop, arg, result,
73 	    ddi_ctlops, &pcieb->pcieb_err_mutex,
74 	    &pcieb->pcieb_peek_poke_mutex,
75 	    pcieb_peekpoke_cb));
76 }
77 
78 /* x86 specific workarounds needed at the end of pcieb attach */
79 void
80 pcieb_plat_attach_workaround(dev_info_t *dip)
81 {
82 	/* Must apply workaround only after all initialization is done */
83 	pcieb_intel_error_workaround(dip);
84 	pcieb_intel_mps_workaround(dip);
85 
86 }
87 
88 /* Workarounds to enable error handling on certain Intel chipsets */
89 void
90 pcieb_intel_error_workaround(dev_info_t *dip)
91 {
92 	pcieb_devstate_t *pcieb = ddi_get_soft_state(pcieb_state,
93 	    ddi_get_instance(dip));
94 
95 	pcieb_intel_serr_workaround(dip, pcieb->pcieb_no_aer_msi);
96 	pcieb_intel_rber_workaround(dip);
97 	pcieb_intel_sw_workaround(dip);
98 }
99 
100 int
101 pcieb_plat_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
102     ddi_intr_handle_impl_t *hdlp, void *result)
103 {
104 	return (i_ddi_intr_ops(dip, rdip, intr_op, hdlp, result));
105 }
106 
107 /* shpc is not supported on x86 */
108 /*ARGSUSED*/
109 int
110 pcieb_plat_pcishpc_probe(dev_info_t *dip, ddi_acc_handle_t config_handle)
111 {
112 	return (DDI_FAILURE);
113 }
114 
115 /*
116  * Dummy functions to get around the fact that there's no shpc module on x86
117  * today
118  */
119 /*ARGSUSED*/
120 int
121 pcishpc_init(dev_info_t *dip)
122 {
123 	return (DDI_FAILURE);
124 }
125 
126 /*ARGSUSED*/
127 int
128 pcishpc_uninit(dev_info_t *dip)
129 {
130 	return (DDI_FAILURE);
131 }
132 
133 /*ARGSUSED*/
134 int
135 pcishpc_intr(dev_info_t *dip)
136 {
137 	return (DDI_INTR_UNCLAIMED);
138 }
139 
140 /*ARGSUSED*/
141 boolean_t
142 pcieb_plat_pwr_disable(dev_info_t *dip)
143 {
144 	/* Always disable on x86 */
145 	return (B_TRUE);
146 }
147 
148 boolean_t
149 pcieb_plat_msi_supported(dev_info_t *dip)
150 {
151 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
152 	uint16_t vendor_id, device_id;
153 	vendor_id = bus_p->bus_dev_ven_id & 0xFFFF;
154 	device_id = bus_p->bus_dev_ven_id >> 16;
155 
156 	/*
157 	 * Intel ESB2 switches have a errata which prevents using MSIs
158 	 * for hotplug.
159 	 */
160 	return (((vendor_id == INTEL_VENDOR_ID) &&
161 	    INTEL_ESB2_SW_PCIE_DEV_ID(device_id)) ? B_FALSE : B_TRUE);
162 }
163 
164 void
165 pcieb_plat_intr_attach(pcieb_devstate_t *pcieb)
166 {
167 	/*
168 	 *  _OSC initialization needs to be done before interrupts are
169 	 *  initialized.
170 	 */
171 	pcieb_init_osc(pcieb->pcieb_dip);
172 }
173 
174 void
175 pcieb_plat_initchild(dev_info_t *child)
176 {
177 	struct ddi_parent_private_data *pdptr;
178 	if (ddi_getprop(DDI_DEV_T_NONE, child, DDI_PROP_DONTPASS, "interrupts",
179 	    -1) != -1) {
180 		pdptr = kmem_zalloc((sizeof (struct ddi_parent_private_data) +
181 		    sizeof (struct intrspec)), KM_SLEEP);
182 		pdptr->par_intr = (struct intrspec *)(pdptr + 1);
183 		pdptr->par_nintr = 1;
184 		ddi_set_parent_data(child, pdptr);
185 	} else
186 		ddi_set_parent_data(child, NULL);
187 }
188 
189 void
190 pcieb_plat_uninitchild(dev_info_t *child)
191 {
192 	struct ddi_parent_private_data	*pdptr;
193 
194 	if ((pdptr = ddi_get_parent_data(child)) != NULL)
195 		kmem_free(pdptr, (sizeof (*pdptr) + sizeof (struct intrspec)));
196 
197 	ddi_set_parent_data(child, NULL);
198 }
199 
200 /* _OSC related */
201 void
202 pcieb_init_osc(dev_info_t *devi) {
203 	pcie_bus_t	*bus_p = PCIE_DIP2UPBUS(devi);
204 	uint32_t	osc_flags = OSC_CONTROL_PCIE_ADV_ERR;
205 
206 	/*
207 	 * Call _OSC method for 2 reasons:
208 	 * 1. Hotplug: To determine if it is native or ACPI mode.
209 	 *
210 	 * 2. Error handling: Inform firmware that OS can support AER error
211 	 * handling. Currently we don't care for what the BIOS response was
212 	 * and instead setup interrupts for error handling as if it were
213 	 * supported.
214 	 *
215 	 * For hotpluggable slots the _OSC method has already been called as
216 	 * part of the hotplug initialization.
217 	 * For non-hotpluggable slots we need to call the _OSC method only for
218 	 * Root Ports (for AER support).
219 	 */
220 	if (!pcie_is_osc(devi) && PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p))
221 		(void) pcie_acpi_osc(devi, &osc_flags);
222 }
223 
224 /*
225  * Intel chip specific workarounds. Right now they're limited to the 5000, 5400
226  * and 7300 series chipsets.
227  */
228 typedef struct x86_error_reg {
229 	uint32_t	offset;
230 	uint_t		size;
231 	uint32_t	mask;
232 	uint32_t	value1;	/* Value for MSI case */
233 	uint32_t	value2; /* Value for machinecheck case */
234 } x86_error_reg_t;
235 
236 typedef struct x86_error_tbl {
237 	uint16_t	vendor_id;
238 	uint16_t	device_id_low;
239 	uint16_t	device_id_high;
240 	uint8_t		rev_id_low;
241 	uint8_t		rev_id_high;
242 	x86_error_reg_t	*error_regs;
243 	int		error_regs_len;
244 } x86_error_tbl_t;
245 
246 /*
247  * Chipset and device specific settings that are required for error handling
248  * (reporting, fowarding, and response at the RC) beyond the standard
249  * registers in the PCIE and AER caps.
250  *
251  * The Northbridge Root Port settings also apply to the ESI port.  The ESI
252  * port is a special leaf device but functions like a root port connected
253  * to the Southbridge and receives all the onboard Southbridge errors
254  * including those from Southbridge Root Ports.  However, this does not
255  * include the Southbridge Switch Ports which act like normal switch ports
256  * and is connected to the Northbridge through a separate link.
257  *
258  * PCIE errors from the ESB2 Southbridge RPs are simply fowarded to the ESI
259  * port on the Northbridge.
260  *
261  * If MSIs don't work we want UEs (Fatal and Non-Fatal) to panic the system,
262  * except for URs.  We do this by having the Root Ports respond with a System
263  * Error and having that trigger a Machine Check (MCE).
264  */
265 
266 /*
267  * 7300 Northbridge Root Ports
268  */
269 static x86_error_reg_t intel_7300_rp_regs[] = {
270 	/* Command Register - Enable SERR */
271 	{0x4,   16, 0xFFFF,	0x0,	PCI_COMM_SERR_ENABLE},
272 
273 	/* Root Control Register - SERR on NFE/FE */
274 	{0x88,  16, 0x0,	0x0,	PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
275 					PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
276 
277 	/* AER UE Mask - Mask UR */
278 	{0x108, 32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
279 
280 	/* PEXCTRL[21] check for certain malformed TLP types and MSI enable */
281 	{0x48,	32, 0xFFFFFFFF, 0xC0200000, 0x200000},
282 	/* PEXCTRL3[7]. MSI RAS error enable */
283 	{0x4D,	32, 0xFFFFFFFF, 0x1, 0x0},
284 
285 	/* PEX_ERR_DOCMD[7:0] */
286 	{0x144,	8,  0x0,	0x0,	0xF0},
287 
288 	/* EMASK_UNCOR_PEX[21:0] UE mask */
289 	{0x148,	32, 0x0, PCIE_AER_UCE_UR, PCIE_AER_UCE_UR},
290 
291 	/* EMASK_RP_PEX[2:0] FE, UE, CE message detect mask */
292 	{0x150,	8,  0x0,	0x0,	0x1},
293 };
294 #define	INTEL_7300_RP_REGS_LEN \
295 	(sizeof (intel_7300_rp_regs) / sizeof (x86_error_reg_t))
296 
297 /*
298  * 5000 Northbridge Root Ports
299  */
300 static x86_error_reg_t intel_5000_rp_regs[] = {
301 	/* Command Register - Enable SERR */
302 	{0x4,   16, 0xFFFF,	PCI_COMM_SERR_ENABLE,	PCI_COMM_SERR_ENABLE},
303 
304 	/* Root Control Register - SERR on NFE/FE/CE */
305 	{0x88,  16, 0x0,	PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
306 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN |
307 				PCIE_ROOTCTL_SYS_ERR_ON_CE_EN,
308 				PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
309 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
310 
311 	/* AER UE Mask - Mask UR */
312 	{0x108, 32, 0x0,	PCIE_AER_UCE_UR,	PCIE_AER_UCE_UR},
313 
314 	/* PEXCTRL[21] check for certain malformed TLP type */
315 	{0x48,	32, 0xFFFFFFFF, 0xC0200000, 0x200000},
316 	/* PEXCTRL3[7]. MSI RAS error enable. */
317 	{0x4D,	32, 0xFFFFFFFF,	0x1,	0x0},
318 
319 	/* PEX_ERR_DOCMD[7:0] */
320 	{0x144,	8,  0x0,	0x0,	0xF0},
321 
322 	/* EMASK_UNCOR_PEX[21:0] UE mask */
323 	{0x148,	32, 0x0, 	PCIE_AER_UCE_UR,	PCIE_AER_UCE_UR},
324 
325 	/* EMASK_RP_PEX[2:0] FE, UE, CE message detect mask */
326 	{0x150,	8,  0x0, 	0x0,	0x1},
327 };
328 #define	INTEL_5000_RP_REGS_LEN \
329 	(sizeof (intel_5000_rp_regs) / sizeof (x86_error_reg_t))
330 
331 /*
332  * 5400 Northbridge Root Ports.
333  */
334 static x86_error_reg_t intel_5400_rp_regs[] = {
335 	/* Command Register - Enable SERR */
336 	{0x4,   16, 0xFFFF,	PCI_COMM_SERR_ENABLE, PCI_COMM_SERR_ENABLE},
337 
338 	/* Root Control Register - SERR on NFE/FE */
339 	{0x88,  16, 0x0, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
340 			    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN |
341 			    PCIE_ROOTCTL_SYS_ERR_ON_CE_EN,
342 			    PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
343 			    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
344 
345 	/* AER UE Mask - Mask UR */
346 	{0x108, 32, 0x0,	PCIE_AER_UCE_UR,	PCIE_AER_UCE_UR},
347 
348 	/* PEXCTRL[21] check for certain malformed TLP types */
349 	{0x48,	32, 0xFFFFFFFF,	0xC0200000, 0x200000},
350 	/* PEXCTRL3. MSI RAS error enable. */
351 	{0x4E,	8, 0x0,	0x1,	0x0},
352 
353 	/* PEX_ERR_DOCMD[11:0] */
354 	{0x144,	16,  0x0, 	0x0,	0xFF0},
355 
356 	/* PEX_ERR_PIN_MASK[4:0] do not mask ERR[2:0] pins used by DOCMD */
357 	{0x146,	16,  0x0,	0x10,	0x10},
358 
359 	/* EMASK_UNCOR_PEX[21:0] UE mask */
360 	{0x148,	32, 0x0, 	PCIE_AER_UCE_UR,	PCIE_AER_UCE_UR},
361 
362 	/* EMASK_RP_PEX[2:0] FE, UE, CE message detect mask */
363 	{0x150,	8,  0x0, 	0x0,	0x1},
364 };
365 #define	INTEL_5400_RP_REGS_LEN \
366 	(sizeof (intel_5400_rp_regs) / sizeof (x86_error_reg_t))
367 
368 
369 /*
370  * ESB2 Southbridge Root Ports
371  */
372 static x86_error_reg_t intel_esb2_rp_regs[] = {
373 	/* Command Register - Enable SERR */
374 	{0x4,   16, 0xFFFF,	PCI_COMM_SERR_ENABLE,	PCI_COMM_SERR_ENABLE},
375 
376 	/* Root Control Register - SERR on NFE/FE */
377 	{0x5c,  16, 0x0,	PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
378 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN |
379 				PCIE_ROOTCTL_SYS_ERR_ON_CE_EN,
380 				PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
381 				PCIE_ROOTCTL_SYS_ERR_ON_FE_EN},
382 
383 	/* UEM[20:0] UE mask (write-once) */
384 	{0x148, 32, 0x0,	PCIE_AER_UCE_UR,	PCIE_AER_UCE_UR},
385 };
386 #define	INTEL_ESB2_RP_REGS_LEN \
387 	(sizeof (intel_esb2_rp_regs) / sizeof (x86_error_reg_t))
388 
389 
390 /*
391  * ESB2 Southbridge Switch Ports
392  */
393 static x86_error_reg_t intel_esb2_sw_regs[] = {
394 	/* Command Register - Enable SERR */
395 	{0x4,   16, 0xFFFF,	PCI_COMM_SERR_ENABLE,	PCI_COMM_SERR_ENABLE},
396 
397 	/* AER UE Mask - Mask UR */
398 	{0x108, 32, 0x0,	PCIE_AER_UCE_UR,	PCIE_AER_UCE_UR},
399 };
400 #define	INTEL_ESB2_SW_REGS_LEN \
401 	(sizeof (intel_esb2_sw_regs) / sizeof (x86_error_reg_t))
402 
403 
404 x86_error_tbl_t x86_error_init_tbl[] = {
405 	/* Intel 7300: 3600 = ESI, 3604-360A = NB root ports */
406 	{0x8086, 0x3600, 0x3600, 0x0, 0xFF,
407 		intel_7300_rp_regs, INTEL_7300_RP_REGS_LEN},
408 	{0x8086, 0x3604, 0x360A, 0x0, 0xFF,
409 		intel_7300_rp_regs, INTEL_7300_RP_REGS_LEN},
410 
411 	/* Intel 5000: 25C0, 25D0, 25D4, 25D8 = ESI */
412 	{0x8086, 0x25C0, 0x25C0, 0x0, 0xFF,
413 		intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
414 	{0x8086, 0x25D0, 0x25D0, 0x0, 0xFF,
415 		intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
416 	{0x8086, 0x25D4, 0x25D4, 0x0, 0xFF,
417 		intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
418 	{0x8086, 0x25D8, 0x25D8, 0x0, 0xFF,
419 		intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
420 
421 	/* Intel 5000: 25E2-25E7 and 25F7-25FA = NB root ports */
422 	{0x8086, 0x25E2, 0x25E7, 0x0, 0xFF,
423 		intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
424 	{0x8086, 0x25F7, 0x25FA, 0x0, 0xFF,
425 		intel_5000_rp_regs, INTEL_5000_RP_REGS_LEN},
426 
427 	/* Intel 5400: 4000-4001, 4003 = ESI and 4021-4029 = NB root ports */
428 	{0x8086, 0x4000, 0x4001, 0x0, 0xFF,
429 		intel_5400_rp_regs, INTEL_5400_RP_REGS_LEN},
430 	{0x8086, 0x4003, 0x4003, 0x0, 0xFF,
431 		intel_5400_rp_regs, INTEL_5400_RP_REGS_LEN},
432 	{0x8086, 0x4021, 0x4029, 0x0, 0xFF,
433 		intel_5400_rp_regs, INTEL_5400_RP_REGS_LEN},
434 
435 	/* Intel 631xESB/632xESB aka ESB2: 2690-2697 = SB root ports */
436 	{0x8086, 0x2690, 0x2697, 0x0, 0xFF,
437 		intel_esb2_rp_regs, INTEL_ESB2_RP_REGS_LEN},
438 
439 	/* Intel Switches on esb2: 3500-3503, 3510-351B */
440 	{0x8086, 0x3500, 0x3503, 0x0, 0xFF,
441 		intel_esb2_sw_regs, INTEL_ESB2_SW_REGS_LEN},
442 	{0x8086, 0x3510, 0x351B, 0x0, 0xFF,
443 		intel_esb2_sw_regs, INTEL_ESB2_SW_REGS_LEN},
444 
445 	/* XXX Intel PCIe-PCIx on esb2: 350C */
446 };
447 static int x86_error_init_tbl_len =
448 	sizeof (x86_error_init_tbl) / sizeof (x86_error_tbl_t);
449 
450 /*
451  * The main goal of this workaround is to set chipset specific settings if
452  * MSIs happen to be enabled on this device. Otherwise make the system
453  * Machine Check/Panic if an UE is detected in the fabric.
454  */
455 void
456 pcieb_intel_serr_workaround(dev_info_t *dip, boolean_t mcheck)
457 {
458 	uint16_t		vid, did;
459 	uint8_t			rid;
460 	int			i, j;
461 	x86_error_tbl_t		*tbl;
462 	x86_error_reg_t		*reg;
463 	pcie_bus_t		*bus_p = PCIE_DIP2UPBUS(dip);
464 	ddi_acc_handle_t	cfg_hdl = bus_p->bus_cfg_hdl;
465 	uint16_t		bdf = bus_p->bus_bdf;
466 
467 	if (pcieb_intel_workaround_disable)
468 		return;
469 
470 	vid = bus_p->bus_dev_ven_id & 0xFFFF;
471 	did = bus_p->bus_dev_ven_id >> 16;
472 	rid = bus_p->bus_rev_id;
473 
474 	PCIEB_DEBUG(DBG_ATTACH, dip, "VID:0x%x DID:0x%x RID:0x%x bdf=0x%x\n",
475 	    vid, did, rid, bdf);
476 
477 	tbl = x86_error_init_tbl;
478 	for (i = 0; i < x86_error_init_tbl_len; i++, tbl++) {
479 		if (!((vid == tbl->vendor_id) &&
480 		    (did >= tbl->device_id_low) &&
481 		    (did <= tbl->device_id_high) &&
482 		    (rid >= tbl->rev_id_low) &&
483 		    (rid <= tbl->rev_id_high)))
484 			continue;
485 
486 		if (mcheck && PCIE_IS_RP(bus_p))
487 			pcie_set_rber_fatal(dip, B_TRUE);
488 
489 		reg = tbl->error_regs;
490 		for (j = 0; j < tbl->error_regs_len; j++, reg++) {
491 			uint32_t data = 0xDEADBEEF;
492 			uint32_t value = 0xDEADBEEF;
493 			switch (reg->size) {
494 			case 32:
495 				data = (uint32_t)pci_config_get32(cfg_hdl,
496 				    reg->offset);
497 				value = (mcheck ?
498 				    ((data & reg->mask) | reg->value2) :
499 				    ((data & reg->mask) | reg->value1));
500 				pci_config_put32(cfg_hdl, reg->offset, value);
501 				value = (uint32_t)pci_config_get32(cfg_hdl,
502 				    reg->offset);
503 				break;
504 			case 16:
505 				data = (uint32_t)pci_config_get16(cfg_hdl,
506 				    reg->offset);
507 				value = (mcheck ?
508 				    ((data & reg->mask) | reg->value2) :
509 				    ((data & reg->mask) | reg->value1));
510 				pci_config_put16(cfg_hdl, reg->offset,
511 				    (uint16_t)value);
512 				value = (uint32_t)pci_config_get16(cfg_hdl,
513 				    reg->offset);
514 				break;
515 			case 8:
516 				data = (uint32_t)pci_config_get8(cfg_hdl,
517 				    reg->offset);
518 				value = (mcheck ?
519 				    ((data & reg->mask) | reg->value2) :
520 				    ((data & reg->mask) | reg->value1));
521 				pci_config_put8(cfg_hdl, reg->offset,
522 				    (uint8_t)value);
523 				value = (uint32_t)pci_config_get8(cfg_hdl,
524 				    reg->offset);
525 				break;
526 			}
527 
528 			PCIEB_DEBUG(DBG_ATTACH, dip, "bdf:%x mcheck:%d size:%d "
529 			    "off:0x%x mask:0x%x value:0x%x + orig:0x%x -> "
530 			    "0x%x\n", bdf, mcheck, reg->size, reg->offset,
531 			    reg->mask, (mcheck ?  reg->value2 : reg->value1),
532 			    data, value);
533 		}
534 	}
535 }
536 
537 /*
538  * For devices that support Role Base Errors, make several UE have a FATAL
539  * severity.  That way a Fatal Message will be sent instead of a Correctable
540  * Message.  Without full FMA support, CEs will be ignored.
541  */
542 uint32_t pcieb_rber_sev = (PCIE_AER_UCE_TRAINING | PCIE_AER_UCE_DLP |
543     PCIE_AER_UCE_SD | PCIE_AER_UCE_PTLP | PCIE_AER_UCE_FCP | PCIE_AER_UCE_TO |
544     PCIE_AER_UCE_CA | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP | PCIE_AER_UCE_ECRC);
545 
546 void
547 pcieb_intel_rber_workaround(dev_info_t *dip)
548 {
549 	uint32_t rber;
550 	pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
551 
552 	if (pcieb_intel_workaround_disable)
553 		return;
554 
555 	/*
556 	 * Check Root Port's machinecheck setting to determine if this
557 	 * workaround is needed or not.
558 	 */
559 	if (!pcie_get_rber_fatal(dip))
560 		return;
561 
562 	if (!PCIE_IS_PCIE(bus_p) || !PCIE_HAS_AER(bus_p))
563 		return;
564 
565 	rber = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
566 	    PCIE_DEVCAP_ROLE_BASED_ERR_REP;
567 	if (!rber)
568 		return;
569 
570 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, pcieb_rber_sev);
571 }
572 
573 /*
574  * The Intel 5000 Chipset has an errata that requires read completion
575  * coalescing to be disabled if the Max Payload Size is set to 256 bytes.
576  */
577 void
578 pcieb_intel_mps_workaround(dev_info_t *dip)
579 {
580 	uint16_t		vid, did;
581 	uint32_t		pexctrl;
582 	pcie_bus_t		*bus_p = PCIE_DIP2UPBUS(dip);
583 
584 	vid = bus_p->bus_dev_ven_id & 0xFFFF;
585 	did = bus_p->bus_dev_ven_id >> 16;
586 
587 	if ((vid == INTEL_VENDOR_ID) && (INTEL_NB5000_PCIE_DEV_ID(did) ||
588 	    INTEL_NB5100_PCIE_DEV_ID(did))) {
589 
590 		pexctrl = pci_config_get32(bus_p->bus_cfg_hdl,
591 		    INTEL_NB5000_PEXCTRL_OFFSET);
592 		/*
593 		 * Turn off coalescing (bit 10)
594 		 */
595 		pexctrl &= ~INTEL_NB5000_PEXCTRL_COALESCE_EN;
596 
597 		pci_config_put32(bus_p->bus_cfg_hdl,
598 		    INTEL_NB5000_PEXCTRL_OFFSET, pexctrl);
599 	}
600 }
601 
602 /*
603  * Workaround for certain switches regardless of platform
604  */
605 void
606 pcieb_intel_sw_workaround(dev_info_t *dip)
607 {
608 	uint16_t		vid, regw;
609 	pcie_bus_t		*bus_p = PCIE_DIP2UPBUS(dip);
610 	ddi_acc_handle_t	cfg_hdl = bus_p->bus_cfg_hdl;
611 
612 	if (pcieb_intel_workaround_disable)
613 		return;
614 
615 	if (!PCIE_IS_SW(PCIE_DIP2BUS(dip)))
616 		return;
617 
618 	vid = bus_p->bus_dev_ven_id & 0xFFFF;
619 	/*
620 	 * Intel and PLX switches require SERR in CMD reg to foward error
621 	 * messages, though this is not PCIE spec-compliant behavior.
622 	 * To prevent the switches themselves from reporting errors on URs
623 	 * when the CMD reg has SERR enabled (which is expected according to
624 	 * the PCIE spec) we rely on masking URs in the AER cap.
625 	 */
626 	if (vid == 0x8086 || vid == 0x10B5) {
627 		regw = pci_config_get16(cfg_hdl, PCI_CONF_COMM);
628 		pci_config_put16(cfg_hdl, PCI_CONF_COMM,
629 		    regw | PCI_COMM_SERR_ENABLE);
630 	}
631 }
632 
633 int
634 pcieb_plat_ctlops(dev_info_t *rdip, ddi_ctl_enum_t ctlop, void *arg)
635 {
636 	struct detachspec *ds;
637 	struct attachspec *as;
638 
639 	switch (ctlop) {
640 	case DDI_CTLOPS_DETACH:
641 		ds = (struct detachspec *)arg;
642 		switch (ds->when) {
643 		case DDI_POST:
644 			if (ds->cmd == DDI_SUSPEND) {
645 				if (pci_post_suspend(rdip) != DDI_SUCCESS)
646 					return (DDI_FAILURE);
647 			}
648 			break;
649 		default:
650 			break;
651 		}
652 		break;
653 	case DDI_CTLOPS_ATTACH:
654 		as = (struct attachspec *)arg;
655 		switch (as->when) {
656 		case DDI_PRE:
657 			if (as->cmd == DDI_RESUME) {
658 				if (pci_pre_resume(rdip) != DDI_SUCCESS)
659 					return (DDI_FAILURE);
660 			}
661 			break;
662 		case DDI_POST:
663 			/*
664 			 * For leaf devices supporting RBER and AER, we
665 			 * need to apply this workaround on them after
666 			 * attach to be notified of UEs that would
667 			 * otherwise be ignored as CEs on Intel chipsets
668 			 * currently
669 			 */
670 			pcieb_intel_rber_workaround(rdip);
671 			break;
672 		default:
673 			break;
674 		}
675 		break;
676 	default:
677 		break;
678 	}
679 
680 	return (DDI_SUCCESS);
681 }
682