xref: /linux/drivers/edac/e752x_edac.c (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /*
2  * Intel e752x Memory Controller kernel module
3  * (C) 2004 Linux Networx (http://lnxi.com)
4  * This file may be distributed under the terms of the
5  * GNU General Public License.
6  *
7  * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
8  *
9  * Datasheets:
10  *	http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
11  *	ftp://download.intel.com/design/intarch/datashts/31345803.pdf
12  *
13  * Written by Tom Zimmerman
14  *
15  * Contributors:
16  * 	Thayne Harbaugh at realmsys.com (?)
17  * 	Wang Zhenyu at intel.com
18  * 	Dave Jiang at mvista.com
19  *
20  */
21 
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/pci_ids.h>
26 #include <linux/edac.h>
27 #include "edac_core.h"
28 
29 #define E752X_REVISION	" Ver: 2.0.2"
30 #define EDAC_MOD_STR	"e752x_edac"
31 
32 static int report_non_memory_errors;
33 static int force_function_unhide;
34 static int sysbus_parity = -1;
35 
36 static struct edac_pci_ctl_info *e752x_pci;
37 
38 #define e752x_printk(level, fmt, arg...) \
39 	edac_printk(level, "e752x", fmt, ##arg)
40 
41 #define e752x_mc_printk(mci, level, fmt, arg...) \
42 	edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
43 
44 #ifndef PCI_DEVICE_ID_INTEL_7520_0
45 #define PCI_DEVICE_ID_INTEL_7520_0      0x3590
46 #endif				/* PCI_DEVICE_ID_INTEL_7520_0      */
47 
48 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
49 #define PCI_DEVICE_ID_INTEL_7520_1_ERR  0x3591
50 #endif				/* PCI_DEVICE_ID_INTEL_7520_1_ERR  */
51 
52 #ifndef PCI_DEVICE_ID_INTEL_7525_0
53 #define PCI_DEVICE_ID_INTEL_7525_0      0x359E
54 #endif				/* PCI_DEVICE_ID_INTEL_7525_0      */
55 
56 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
57 #define PCI_DEVICE_ID_INTEL_7525_1_ERR  0x3593
58 #endif				/* PCI_DEVICE_ID_INTEL_7525_1_ERR  */
59 
60 #ifndef PCI_DEVICE_ID_INTEL_7320_0
61 #define PCI_DEVICE_ID_INTEL_7320_0	0x3592
62 #endif				/* PCI_DEVICE_ID_INTEL_7320_0 */
63 
64 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
65 #define PCI_DEVICE_ID_INTEL_7320_1_ERR	0x3593
66 #endif				/* PCI_DEVICE_ID_INTEL_7320_1_ERR */
67 
68 #ifndef PCI_DEVICE_ID_INTEL_3100_0
69 #define PCI_DEVICE_ID_INTEL_3100_0	0x35B0
70 #endif				/* PCI_DEVICE_ID_INTEL_3100_0 */
71 
72 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
73 #define PCI_DEVICE_ID_INTEL_3100_1_ERR	0x35B1
74 #endif				/* PCI_DEVICE_ID_INTEL_3100_1_ERR */
75 
76 #define E752X_NR_CSROWS		8	/* number of csrows */
77 
78 /* E752X register addresses - device 0 function 0 */
79 #define E752X_MCHSCRB		0x52	/* Memory Scrub register (16b) */
80 					/*
81 					 * 6:5     Scrub Completion Count
82 					 * 3:2     Scrub Rate (i3100 only)
83 					 *      01=fast 10=normal
84 					 * 1:0     Scrub Mode enable
85 					 *      00=off 10=on
86 					 */
87 #define E752X_DRB		0x60	/* DRAM row boundary register (8b) */
88 #define E752X_DRA		0x70	/* DRAM row attribute register (8b) */
89 					/*
90 					 * 31:30   Device width row 7
91 					 *      01=x8 10=x4 11=x8 DDR2
92 					 * 27:26   Device width row 6
93 					 * 23:22   Device width row 5
94 					 * 19:20   Device width row 4
95 					 * 15:14   Device width row 3
96 					 * 11:10   Device width row 2
97 					 *  7:6    Device width row 1
98 					 *  3:2    Device width row 0
99 					 */
100 #define E752X_DRC		0x7C	/* DRAM controller mode reg (32b) */
101 					/* FIXME:IS THIS RIGHT? */
102 					/*
103 					 * 22    Number channels 0=1,1=2
104 					 * 19:18 DRB Granularity 32/64MB
105 					 */
106 #define E752X_DRM		0x80	/* Dimm mapping register */
107 #define E752X_DDRCSR		0x9A	/* DDR control and status reg (16b) */
108 					/*
109 					 * 14:12 1 single A, 2 single B, 3 dual
110 					 */
111 #define E752X_TOLM		0xC4	/* DRAM top of low memory reg (16b) */
112 #define E752X_REMAPBASE		0xC6	/* DRAM remap base address reg (16b) */
113 #define E752X_REMAPLIMIT	0xC8	/* DRAM remap limit address reg (16b) */
114 #define E752X_REMAPOFFSET	0xCA	/* DRAM remap limit offset reg (16b) */
115 
116 /* E752X register addresses - device 0 function 1 */
117 #define E752X_FERR_GLOBAL	0x40	/* Global first error register (32b) */
118 #define E752X_NERR_GLOBAL	0x44	/* Global next error register (32b) */
119 #define E752X_HI_FERR		0x50	/* Hub interface first error reg (8b) */
120 #define E752X_HI_NERR		0x52	/* Hub interface next error reg (8b) */
121 #define E752X_HI_ERRMASK	0x54	/* Hub interface error mask reg (8b) */
122 #define E752X_HI_SMICMD		0x5A	/* Hub interface SMI command reg (8b) */
123 #define E752X_SYSBUS_FERR	0x60	/* System buss first error reg (16b) */
124 #define E752X_SYSBUS_NERR	0x62	/* System buss next error reg (16b) */
125 #define E752X_SYSBUS_ERRMASK	0x64	/* System buss error mask reg (16b) */
126 #define E752X_SYSBUS_SMICMD	0x6A	/* System buss SMI command reg (16b) */
127 #define E752X_BUF_FERR		0x70	/* Memory buffer first error reg (8b) */
128 #define E752X_BUF_NERR		0x72	/* Memory buffer next error reg (8b) */
129 #define E752X_BUF_ERRMASK	0x74	/* Memory buffer error mask reg (8b) */
130 #define E752X_BUF_SMICMD	0x7A	/* Memory buffer SMI cmd reg (8b) */
131 #define E752X_DRAM_FERR		0x80	/* DRAM first error register (16b) */
132 #define E752X_DRAM_NERR		0x82	/* DRAM next error register (16b) */
133 #define E752X_DRAM_ERRMASK	0x84	/* DRAM error mask register (8b) */
134 #define E752X_DRAM_SMICMD	0x8A	/* DRAM SMI command register (8b) */
135 #define E752X_DRAM_RETR_ADD	0xAC	/* DRAM Retry address register (32b) */
136 #define E752X_DRAM_SEC1_ADD	0xA0	/* DRAM first correctable memory */
137 					/*     error address register (32b) */
138 					/*
139 					 * 31    Reserved
140 					 * 30:2  CE address (64 byte block 34:6
141 					 * 1     Reserved
142 					 * 0     HiLoCS
143 					 */
144 #define E752X_DRAM_SEC2_ADD	0xC8	/* DRAM first correctable memory */
145 					/*     error address register (32b) */
146 					/*
147 					 * 31    Reserved
148 					 * 30:2  CE address (64 byte block 34:6)
149 					 * 1     Reserved
150 					 * 0     HiLoCS
151 					 */
152 #define E752X_DRAM_DED_ADD	0xA4	/* DRAM first uncorrectable memory */
153 					/*     error address register (32b) */
154 					/*
155 					 * 31    Reserved
156 					 * 30:2  CE address (64 byte block 34:6)
157 					 * 1     Reserved
158 					 * 0     HiLoCS
159 					 */
160 #define E752X_DRAM_SCRB_ADD	0xA8	/* DRAM 1st uncorrectable scrub mem */
161 					/*     error address register (32b) */
162 					/*
163 					 * 31    Reserved
164 					 * 30:2  CE address (64 byte block 34:6
165 					 * 1     Reserved
166 					 * 0     HiLoCS
167 					 */
168 #define E752X_DRAM_SEC1_SYNDROME 0xC4	/* DRAM first correctable memory */
169 					/*     error syndrome register (16b) */
170 #define E752X_DRAM_SEC2_SYNDROME 0xC6	/* DRAM second correctable memory */
171 					/*     error syndrome register (16b) */
172 #define E752X_DEVPRES1		0xF4	/* Device Present 1 register (8b) */
173 
174 /* 3100 IMCH specific register addresses - device 0 function 1 */
175 #define I3100_NSI_FERR		0x48	/* NSI first error reg (32b) */
176 #define I3100_NSI_NERR		0x4C	/* NSI next error reg (32b) */
177 #define I3100_NSI_SMICMD	0x54	/* NSI SMI command register (32b) */
178 #define I3100_NSI_EMASK		0x90	/* NSI error mask register (32b) */
179 
180 /* ICH5R register addresses - device 30 function 0 */
181 #define ICH5R_PCI_STAT		0x06	/* PCI status register (16b) */
182 #define ICH5R_PCI_2ND_STAT	0x1E	/* PCI status secondary reg (16b) */
183 #define ICH5R_PCI_BRIDGE_CTL	0x3E	/* PCI bridge control register (16b) */
184 
185 enum e752x_chips {
186 	E7520 = 0,
187 	E7525 = 1,
188 	E7320 = 2,
189 	I3100 = 3
190 };
191 
192 /*
193  * Those chips Support single-rank and dual-rank memories only.
194  *
195  * On e752x chips, the odd rows are present only on dual-rank memories.
196  * Dividing the rank by two will provide the dimm#
197  *
198  * i3100 MC has a different mapping: it supports only 4 ranks.
199  *
200  * The mapping is (from 1 to n):
201  *	slot	   single-ranked	double-ranked
202  *	dimm #1 -> rank #4		NA
203  *	dimm #2 -> rank #3		NA
204  *	dimm #3 -> rank #2		Ranks 2 and 3
205  *	dimm #4 -> rank $1		Ranks 1 and 4
206  *
207  * FIXME: The current mapping for i3100 considers that it supports up to 8
208  *	  ranks/chanel, but datasheet says that the MC supports only 4 ranks.
209  */
210 
211 struct e752x_pvt {
212 	struct pci_dev *dev_d0f0;
213 	struct pci_dev *dev_d0f1;
214 	u32 tolm;
215 	u32 remapbase;
216 	u32 remaplimit;
217 	int mc_symmetric;
218 	u8 map[8];
219 	int map_type;
220 	const struct e752x_dev_info *dev_info;
221 };
222 
223 struct e752x_dev_info {
224 	u16 err_dev;
225 	u16 ctl_dev;
226 	const char *ctl_name;
227 };
228 
229 struct e752x_error_info {
230 	u32 ferr_global;
231 	u32 nerr_global;
232 	u32 nsi_ferr;	/* 3100 only */
233 	u32 nsi_nerr;	/* 3100 only */
234 	u8 hi_ferr;	/* all but 3100 */
235 	u8 hi_nerr;	/* all but 3100 */
236 	u16 sysbus_ferr;
237 	u16 sysbus_nerr;
238 	u8 buf_ferr;
239 	u8 buf_nerr;
240 	u16 dram_ferr;
241 	u16 dram_nerr;
242 	u32 dram_sec1_add;
243 	u32 dram_sec2_add;
244 	u16 dram_sec1_syndrome;
245 	u16 dram_sec2_syndrome;
246 	u32 dram_ded_add;
247 	u32 dram_scrb_add;
248 	u32 dram_retr_add;
249 };
250 
251 static const struct e752x_dev_info e752x_devs[] = {
252 	[E7520] = {
253 		.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
254 		.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
255 		.ctl_name = "E7520"},
256 	[E7525] = {
257 		.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
258 		.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
259 		.ctl_name = "E7525"},
260 	[E7320] = {
261 		.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
262 		.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
263 		.ctl_name = "E7320"},
264 	[I3100] = {
265 		.err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
266 		.ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
267 		.ctl_name = "3100"},
268 };
269 
270 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
271  * map the scrubbing bandwidth to a hardware register value. The 'set'
272  * operation finds the 'matching or higher value'.  Note that scrubbing
273  * on the e752x can only be enabled/disabled.  The 3100 supports
274  * a normal and fast mode.
275  */
276 
277 #define SDRATE_EOT 0xFFFFFFFF
278 
279 struct scrubrate {
280 	u32 bandwidth;	/* bandwidth consumed by scrubbing in bytes/sec */
281 	u16 scrubval;	/* register value for scrub rate */
282 };
283 
284 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
285  * normal mode.  e752x bridges don't support choosing normal or fast mode,
286  * so the scrubbing bandwidth value isn't all that important - scrubbing is
287  * either on or off.
288  */
289 static const struct scrubrate scrubrates_e752x[] = {
290 	{0,		0x00},	/* Scrubbing Off */
291 	{500000,	0x02},	/* Scrubbing On */
292 	{SDRATE_EOT,	0x00}	/* End of Table */
293 };
294 
295 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
296  * Normal mode: 125 (32000 / 256) times slower than fast mode.
297  */
298 static const struct scrubrate scrubrates_i3100[] = {
299 	{0,		0x00},	/* Scrubbing Off */
300 	{500000,	0x0a},	/* Normal mode - 32k clocks */
301 	{62500000,	0x06},	/* Fast mode - 256 clocks */
302 	{SDRATE_EOT,	0x00}	/* End of Table */
303 };
304 
305 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
306 				unsigned long page)
307 {
308 	u32 remap;
309 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
310 
311 	edac_dbg(3, "\n");
312 
313 	if (page < pvt->tolm)
314 		return page;
315 
316 	if ((page >= 0x100000) && (page < pvt->remapbase))
317 		return page;
318 
319 	remap = (page - pvt->tolm) + pvt->remapbase;
320 
321 	if (remap < pvt->remaplimit)
322 		return remap;
323 
324 	e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
325 	return pvt->tolm - 1;
326 }
327 
328 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
329 			u32 sec1_add, u16 sec1_syndrome)
330 {
331 	u32 page;
332 	int row;
333 	int channel;
334 	int i;
335 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
336 
337 	edac_dbg(3, "\n");
338 
339 	/* convert the addr to 4k page */
340 	page = sec1_add >> (PAGE_SHIFT - 4);
341 
342 	/* FIXME - check for -1 */
343 	if (pvt->mc_symmetric) {
344 		/* chip select are bits 14 & 13 */
345 		row = ((page >> 1) & 3);
346 		e752x_printk(KERN_WARNING,
347 			"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
348 			pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
349 			pvt->map[4], pvt->map[5], pvt->map[6],
350 			pvt->map[7]);
351 
352 		/* test for channel remapping */
353 		for (i = 0; i < 8; i++) {
354 			if (pvt->map[i] == row)
355 				break;
356 		}
357 
358 		e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
359 
360 		if (i < 8)
361 			row = i;
362 		else
363 			e752x_mc_printk(mci, KERN_WARNING,
364 					"row %d not found in remap table\n",
365 					row);
366 	} else
367 		row = edac_mc_find_csrow_by_page(mci, page);
368 
369 	/* 0 = channel A, 1 = channel B */
370 	channel = !(error_one & 1);
371 
372 	/* e752x mc reads 34:6 of the DRAM linear address */
373 	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
374 			     page, offset_in_page(sec1_add << 4), sec1_syndrome,
375 			     row, channel, -1,
376 			     "e752x CE", "");
377 }
378 
379 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
380 			u32 sec1_add, u16 sec1_syndrome, int *error_found,
381 			int handle_error)
382 {
383 	*error_found = 1;
384 
385 	if (handle_error)
386 		do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
387 }
388 
389 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
390 			u32 ded_add, u32 scrb_add)
391 {
392 	u32 error_2b, block_page;
393 	int row;
394 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
395 
396 	edac_dbg(3, "\n");
397 
398 	if (error_one & 0x0202) {
399 		error_2b = ded_add;
400 
401 		/* convert to 4k address */
402 		block_page = error_2b >> (PAGE_SHIFT - 4);
403 
404 		row = pvt->mc_symmetric ?
405 		/* chip select are bits 14 & 13 */
406 			((block_page >> 1) & 3) :
407 			edac_mc_find_csrow_by_page(mci, block_page);
408 
409 		/* e752x mc reads 34:6 of the DRAM linear address */
410 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
411 					block_page,
412 					offset_in_page(error_2b << 4), 0,
413 					 row, -1, -1,
414 					"e752x UE from Read", "");
415 
416 	}
417 	if (error_one & 0x0404) {
418 		error_2b = scrb_add;
419 
420 		/* convert to 4k address */
421 		block_page = error_2b >> (PAGE_SHIFT - 4);
422 
423 		row = pvt->mc_symmetric ?
424 		/* chip select are bits 14 & 13 */
425 			((block_page >> 1) & 3) :
426 			edac_mc_find_csrow_by_page(mci, block_page);
427 
428 		/* e752x mc reads 34:6 of the DRAM linear address */
429 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
430 					block_page,
431 					offset_in_page(error_2b << 4), 0,
432 					row, -1, -1,
433 					"e752x UE from Scruber", "");
434 	}
435 }
436 
437 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
438 			u32 ded_add, u32 scrb_add, int *error_found,
439 			int handle_error)
440 {
441 	*error_found = 1;
442 
443 	if (handle_error)
444 		do_process_ue(mci, error_one, ded_add, scrb_add);
445 }
446 
447 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
448 					 int *error_found, int handle_error)
449 {
450 	*error_found = 1;
451 
452 	if (!handle_error)
453 		return;
454 
455 	edac_dbg(3, "\n");
456 	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
457 			     -1, -1, -1,
458 			     "e752x UE log memory write", "");
459 }
460 
461 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
462 				 u32 retry_add)
463 {
464 	u32 error_1b, page;
465 	int row;
466 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
467 
468 	error_1b = retry_add;
469 	page = error_1b >> (PAGE_SHIFT - 4);  /* convert the addr to 4k page */
470 
471 	/* chip select are bits 14 & 13 */
472 	row = pvt->mc_symmetric ? ((page >> 1) & 3) :
473 		edac_mc_find_csrow_by_page(mci, page);
474 
475 	e752x_mc_printk(mci, KERN_WARNING,
476 			"CE page 0x%lx, row %d : Memory read retry\n",
477 			(long unsigned int)page, row);
478 }
479 
480 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
481 				u32 retry_add, int *error_found,
482 				int handle_error)
483 {
484 	*error_found = 1;
485 
486 	if (handle_error)
487 		do_process_ded_retry(mci, error, retry_add);
488 }
489 
490 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
491 					int *error_found, int handle_error)
492 {
493 	*error_found = 1;
494 
495 	if (handle_error)
496 		e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
497 }
498 
499 static char *global_message[11] = {
500 	"PCI Express C1",
501 	"PCI Express C",
502 	"PCI Express B1",
503 	"PCI Express B",
504 	"PCI Express A1",
505 	"PCI Express A",
506 	"DMA Controller",
507 	"HUB or NS Interface",
508 	"System Bus",
509 	"DRAM Controller",  /* 9th entry */
510 	"Internal Buffer"
511 };
512 
513 #define DRAM_ENTRY	9
514 
515 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
516 
517 static void do_global_error(int fatal, u32 errors)
518 {
519 	int i;
520 
521 	for (i = 0; i < 11; i++) {
522 		if (errors & (1 << i)) {
523 			/* If the error is from DRAM Controller OR
524 			 * we are to report ALL errors, then
525 			 * report the error
526 			 */
527 			if ((i == DRAM_ENTRY) || report_non_memory_errors)
528 				e752x_printk(KERN_WARNING, "%sError %s\n",
529 					fatal_message[fatal],
530 					global_message[i]);
531 		}
532 	}
533 }
534 
535 static inline void global_error(int fatal, u32 errors, int *error_found,
536 				int handle_error)
537 {
538 	*error_found = 1;
539 
540 	if (handle_error)
541 		do_global_error(fatal, errors);
542 }
543 
544 static char *hub_message[7] = {
545 	"HI Address or Command Parity", "HI Illegal Access",
546 	"HI Internal Parity", "Out of Range Access",
547 	"HI Data Parity", "Enhanced Config Access",
548 	"Hub Interface Target Abort"
549 };
550 
551 static void do_hub_error(int fatal, u8 errors)
552 {
553 	int i;
554 
555 	for (i = 0; i < 7; i++) {
556 		if (errors & (1 << i))
557 			e752x_printk(KERN_WARNING, "%sError %s\n",
558 				fatal_message[fatal], hub_message[i]);
559 	}
560 }
561 
562 static inline void hub_error(int fatal, u8 errors, int *error_found,
563 			int handle_error)
564 {
565 	*error_found = 1;
566 
567 	if (handle_error)
568 		do_hub_error(fatal, errors);
569 }
570 
571 #define NSI_FATAL_MASK		0x0c080081
572 #define NSI_NON_FATAL_MASK	0x23a0ba64
573 #define NSI_ERR_MASK		(NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
574 
575 static char *nsi_message[30] = {
576 	"NSI Link Down",	/* NSI_FERR/NSI_NERR bit 0, fatal error */
577 	"",						/* reserved */
578 	"NSI Parity Error",				/* bit 2, non-fatal */
579 	"",						/* reserved */
580 	"",						/* reserved */
581 	"Correctable Error Message",			/* bit 5, non-fatal */
582 	"Non-Fatal Error Message",			/* bit 6, non-fatal */
583 	"Fatal Error Message",				/* bit 7, fatal */
584 	"",						/* reserved */
585 	"Receiver Error",				/* bit 9, non-fatal */
586 	"",						/* reserved */
587 	"Bad TLP",					/* bit 11, non-fatal */
588 	"Bad DLLP",					/* bit 12, non-fatal */
589 	"REPLAY_NUM Rollover",				/* bit 13, non-fatal */
590 	"",						/* reserved */
591 	"Replay Timer Timeout",				/* bit 15, non-fatal */
592 	"",						/* reserved */
593 	"",						/* reserved */
594 	"",						/* reserved */
595 	"Data Link Protocol Error",			/* bit 19, fatal */
596 	"",						/* reserved */
597 	"Poisoned TLP",					/* bit 21, non-fatal */
598 	"",						/* reserved */
599 	"Completion Timeout",				/* bit 23, non-fatal */
600 	"Completer Abort",				/* bit 24, non-fatal */
601 	"Unexpected Completion",			/* bit 25, non-fatal */
602 	"Receiver Overflow",				/* bit 26, fatal */
603 	"Malformed TLP",				/* bit 27, fatal */
604 	"",						/* reserved */
605 	"Unsupported Request"				/* bit 29, non-fatal */
606 };
607 
608 static void do_nsi_error(int fatal, u32 errors)
609 {
610 	int i;
611 
612 	for (i = 0; i < 30; i++) {
613 		if (errors & (1 << i))
614 			printk(KERN_WARNING "%sError %s\n",
615 			       fatal_message[fatal], nsi_message[i]);
616 	}
617 }
618 
619 static inline void nsi_error(int fatal, u32 errors, int *error_found,
620 		int handle_error)
621 {
622 	*error_found = 1;
623 
624 	if (handle_error)
625 		do_nsi_error(fatal, errors);
626 }
627 
628 static char *membuf_message[4] = {
629 	"Internal PMWB to DRAM parity",
630 	"Internal PMWB to System Bus Parity",
631 	"Internal System Bus or IO to PMWB Parity",
632 	"Internal DRAM to PMWB Parity"
633 };
634 
635 static void do_membuf_error(u8 errors)
636 {
637 	int i;
638 
639 	for (i = 0; i < 4; i++) {
640 		if (errors & (1 << i))
641 			e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
642 				membuf_message[i]);
643 	}
644 }
645 
646 static inline void membuf_error(u8 errors, int *error_found, int handle_error)
647 {
648 	*error_found = 1;
649 
650 	if (handle_error)
651 		do_membuf_error(errors);
652 }
653 
654 static char *sysbus_message[10] = {
655 	"Addr or Request Parity",
656 	"Data Strobe Glitch",
657 	"Addr Strobe Glitch",
658 	"Data Parity",
659 	"Addr Above TOM",
660 	"Non DRAM Lock Error",
661 	"MCERR", "BINIT",
662 	"Memory Parity",
663 	"IO Subsystem Parity"
664 };
665 
666 static void do_sysbus_error(int fatal, u32 errors)
667 {
668 	int i;
669 
670 	for (i = 0; i < 10; i++) {
671 		if (errors & (1 << i))
672 			e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
673 				fatal_message[fatal], sysbus_message[i]);
674 	}
675 }
676 
677 static inline void sysbus_error(int fatal, u32 errors, int *error_found,
678 				int handle_error)
679 {
680 	*error_found = 1;
681 
682 	if (handle_error)
683 		do_sysbus_error(fatal, errors);
684 }
685 
686 static void e752x_check_hub_interface(struct e752x_error_info *info,
687 				int *error_found, int handle_error)
688 {
689 	u8 stat8;
690 
691 	//pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
692 
693 	stat8 = info->hi_ferr;
694 
695 	if (stat8 & 0x7f) {	/* Error, so process */
696 		stat8 &= 0x7f;
697 
698 		if (stat8 & 0x2b)
699 			hub_error(1, stat8 & 0x2b, error_found, handle_error);
700 
701 		if (stat8 & 0x54)
702 			hub_error(0, stat8 & 0x54, error_found, handle_error);
703 	}
704 	//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
705 
706 	stat8 = info->hi_nerr;
707 
708 	if (stat8 & 0x7f) {	/* Error, so process */
709 		stat8 &= 0x7f;
710 
711 		if (stat8 & 0x2b)
712 			hub_error(1, stat8 & 0x2b, error_found, handle_error);
713 
714 		if (stat8 & 0x54)
715 			hub_error(0, stat8 & 0x54, error_found, handle_error);
716 	}
717 }
718 
719 static void e752x_check_ns_interface(struct e752x_error_info *info,
720 				int *error_found, int handle_error)
721 {
722 	u32 stat32;
723 
724 	stat32 = info->nsi_ferr;
725 	if (stat32 & NSI_ERR_MASK) { /* Error, so process */
726 		if (stat32 & NSI_FATAL_MASK)	/* check for fatal errors */
727 			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
728 				  handle_error);
729 		if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
730 			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
731 				  handle_error);
732 	}
733 	stat32 = info->nsi_nerr;
734 	if (stat32 & NSI_ERR_MASK) {
735 		if (stat32 & NSI_FATAL_MASK)
736 			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
737 				  handle_error);
738 		if (stat32 & NSI_NON_FATAL_MASK)
739 			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
740 				  handle_error);
741 	}
742 }
743 
744 static void e752x_check_sysbus(struct e752x_error_info *info,
745 			int *error_found, int handle_error)
746 {
747 	u32 stat32, error32;
748 
749 	//pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
750 	stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
751 
752 	if (stat32 == 0)
753 		return;		/* no errors */
754 
755 	error32 = (stat32 >> 16) & 0x3ff;
756 	stat32 = stat32 & 0x3ff;
757 
758 	if (stat32 & 0x087)
759 		sysbus_error(1, stat32 & 0x087, error_found, handle_error);
760 
761 	if (stat32 & 0x378)
762 		sysbus_error(0, stat32 & 0x378, error_found, handle_error);
763 
764 	if (error32 & 0x087)
765 		sysbus_error(1, error32 & 0x087, error_found, handle_error);
766 
767 	if (error32 & 0x378)
768 		sysbus_error(0, error32 & 0x378, error_found, handle_error);
769 }
770 
771 static void e752x_check_membuf(struct e752x_error_info *info,
772 			int *error_found, int handle_error)
773 {
774 	u8 stat8;
775 
776 	stat8 = info->buf_ferr;
777 
778 	if (stat8 & 0x0f) {	/* Error, so process */
779 		stat8 &= 0x0f;
780 		membuf_error(stat8, error_found, handle_error);
781 	}
782 
783 	stat8 = info->buf_nerr;
784 
785 	if (stat8 & 0x0f) {	/* Error, so process */
786 		stat8 &= 0x0f;
787 		membuf_error(stat8, error_found, handle_error);
788 	}
789 }
790 
791 static void e752x_check_dram(struct mem_ctl_info *mci,
792 			struct e752x_error_info *info, int *error_found,
793 			int handle_error)
794 {
795 	u16 error_one, error_next;
796 
797 	error_one = info->dram_ferr;
798 	error_next = info->dram_nerr;
799 
800 	/* decode and report errors */
801 	if (error_one & 0x0101)	/* check first error correctable */
802 		process_ce(mci, error_one, info->dram_sec1_add,
803 			info->dram_sec1_syndrome, error_found, handle_error);
804 
805 	if (error_next & 0x0101)	/* check next error correctable */
806 		process_ce(mci, error_next, info->dram_sec2_add,
807 			info->dram_sec2_syndrome, error_found, handle_error);
808 
809 	if (error_one & 0x4040)
810 		process_ue_no_info_wr(mci, error_found, handle_error);
811 
812 	if (error_next & 0x4040)
813 		process_ue_no_info_wr(mci, error_found, handle_error);
814 
815 	if (error_one & 0x2020)
816 		process_ded_retry(mci, error_one, info->dram_retr_add,
817 				error_found, handle_error);
818 
819 	if (error_next & 0x2020)
820 		process_ded_retry(mci, error_next, info->dram_retr_add,
821 				error_found, handle_error);
822 
823 	if (error_one & 0x0808)
824 		process_threshold_ce(mci, error_one, error_found, handle_error);
825 
826 	if (error_next & 0x0808)
827 		process_threshold_ce(mci, error_next, error_found,
828 				handle_error);
829 
830 	if (error_one & 0x0606)
831 		process_ue(mci, error_one, info->dram_ded_add,
832 			info->dram_scrb_add, error_found, handle_error);
833 
834 	if (error_next & 0x0606)
835 		process_ue(mci, error_next, info->dram_ded_add,
836 			info->dram_scrb_add, error_found, handle_error);
837 }
838 
839 static void e752x_get_error_info(struct mem_ctl_info *mci,
840 				 struct e752x_error_info *info)
841 {
842 	struct pci_dev *dev;
843 	struct e752x_pvt *pvt;
844 
845 	memset(info, 0, sizeof(*info));
846 	pvt = (struct e752x_pvt *)mci->pvt_info;
847 	dev = pvt->dev_d0f1;
848 	pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
849 
850 	if (info->ferr_global) {
851 		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
852 			pci_read_config_dword(dev, I3100_NSI_FERR,
853 					     &info->nsi_ferr);
854 			info->hi_ferr = 0;
855 		} else {
856 			pci_read_config_byte(dev, E752X_HI_FERR,
857 					     &info->hi_ferr);
858 			info->nsi_ferr = 0;
859 		}
860 		pci_read_config_word(dev, E752X_SYSBUS_FERR,
861 				&info->sysbus_ferr);
862 		pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
863 		pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
864 		pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
865 				&info->dram_sec1_add);
866 		pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
867 				&info->dram_sec1_syndrome);
868 		pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
869 				&info->dram_ded_add);
870 		pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
871 				&info->dram_scrb_add);
872 		pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
873 				&info->dram_retr_add);
874 
875 		/* ignore the reserved bits just in case */
876 		if (info->hi_ferr & 0x7f)
877 			pci_write_config_byte(dev, E752X_HI_FERR,
878 					info->hi_ferr);
879 
880 		if (info->nsi_ferr & NSI_ERR_MASK)
881 			pci_write_config_dword(dev, I3100_NSI_FERR,
882 					info->nsi_ferr);
883 
884 		if (info->sysbus_ferr)
885 			pci_write_config_word(dev, E752X_SYSBUS_FERR,
886 					info->sysbus_ferr);
887 
888 		if (info->buf_ferr & 0x0f)
889 			pci_write_config_byte(dev, E752X_BUF_FERR,
890 					info->buf_ferr);
891 
892 		if (info->dram_ferr)
893 			pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_FERR,
894 					 info->dram_ferr, info->dram_ferr);
895 
896 		pci_write_config_dword(dev, E752X_FERR_GLOBAL,
897 				info->ferr_global);
898 	}
899 
900 	pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
901 
902 	if (info->nerr_global) {
903 		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
904 			pci_read_config_dword(dev, I3100_NSI_NERR,
905 					     &info->nsi_nerr);
906 			info->hi_nerr = 0;
907 		} else {
908 			pci_read_config_byte(dev, E752X_HI_NERR,
909 					     &info->hi_nerr);
910 			info->nsi_nerr = 0;
911 		}
912 		pci_read_config_word(dev, E752X_SYSBUS_NERR,
913 				&info->sysbus_nerr);
914 		pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
915 		pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
916 		pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
917 				&info->dram_sec2_add);
918 		pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
919 				&info->dram_sec2_syndrome);
920 
921 		if (info->hi_nerr & 0x7f)
922 			pci_write_config_byte(dev, E752X_HI_NERR,
923 					info->hi_nerr);
924 
925 		if (info->nsi_nerr & NSI_ERR_MASK)
926 			pci_write_config_dword(dev, I3100_NSI_NERR,
927 					info->nsi_nerr);
928 
929 		if (info->sysbus_nerr)
930 			pci_write_config_word(dev, E752X_SYSBUS_NERR,
931 					info->sysbus_nerr);
932 
933 		if (info->buf_nerr & 0x0f)
934 			pci_write_config_byte(dev, E752X_BUF_NERR,
935 					info->buf_nerr);
936 
937 		if (info->dram_nerr)
938 			pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_NERR,
939 					 info->dram_nerr, info->dram_nerr);
940 
941 		pci_write_config_dword(dev, E752X_NERR_GLOBAL,
942 				info->nerr_global);
943 	}
944 }
945 
946 static int e752x_process_error_info(struct mem_ctl_info *mci,
947 				struct e752x_error_info *info,
948 				int handle_errors)
949 {
950 	u32 error32, stat32;
951 	int error_found;
952 
953 	error_found = 0;
954 	error32 = (info->ferr_global >> 18) & 0x3ff;
955 	stat32 = (info->ferr_global >> 4) & 0x7ff;
956 
957 	if (error32)
958 		global_error(1, error32, &error_found, handle_errors);
959 
960 	if (stat32)
961 		global_error(0, stat32, &error_found, handle_errors);
962 
963 	error32 = (info->nerr_global >> 18) & 0x3ff;
964 	stat32 = (info->nerr_global >> 4) & 0x7ff;
965 
966 	if (error32)
967 		global_error(1, error32, &error_found, handle_errors);
968 
969 	if (stat32)
970 		global_error(0, stat32, &error_found, handle_errors);
971 
972 	e752x_check_hub_interface(info, &error_found, handle_errors);
973 	e752x_check_ns_interface(info, &error_found, handle_errors);
974 	e752x_check_sysbus(info, &error_found, handle_errors);
975 	e752x_check_membuf(info, &error_found, handle_errors);
976 	e752x_check_dram(mci, info, &error_found, handle_errors);
977 	return error_found;
978 }
979 
980 static void e752x_check(struct mem_ctl_info *mci)
981 {
982 	struct e752x_error_info info;
983 
984 	edac_dbg(3, "\n");
985 	e752x_get_error_info(mci, &info);
986 	e752x_process_error_info(mci, &info, 1);
987 }
988 
989 /* Program byte/sec bandwidth scrub rate to hardware */
990 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
991 {
992 	const struct scrubrate *scrubrates;
993 	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
994 	struct pci_dev *pdev = pvt->dev_d0f0;
995 	int i;
996 
997 	if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
998 		scrubrates = scrubrates_i3100;
999 	else
1000 		scrubrates = scrubrates_e752x;
1001 
1002 	/* Translate the desired scrub rate to a e752x/3100 register value.
1003 	 * Search for the bandwidth that is equal or greater than the
1004 	 * desired rate and program the cooresponding register value.
1005 	 */
1006 	for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1007 		if (scrubrates[i].bandwidth >= new_bw)
1008 			break;
1009 
1010 	if (scrubrates[i].bandwidth == SDRATE_EOT)
1011 		return -1;
1012 
1013 	pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
1014 
1015 	return scrubrates[i].bandwidth;
1016 }
1017 
1018 /* Convert current scrub rate value into byte/sec bandwidth */
1019 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
1020 {
1021 	const struct scrubrate *scrubrates;
1022 	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
1023 	struct pci_dev *pdev = pvt->dev_d0f0;
1024 	u16 scrubval;
1025 	int i;
1026 
1027 	if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
1028 		scrubrates = scrubrates_i3100;
1029 	else
1030 		scrubrates = scrubrates_e752x;
1031 
1032 	/* Find the bandwidth matching the memory scrubber configuration */
1033 	pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
1034 	scrubval = scrubval & 0x0f;
1035 
1036 	for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1037 		if (scrubrates[i].scrubval == scrubval)
1038 			break;
1039 
1040 	if (scrubrates[i].bandwidth == SDRATE_EOT) {
1041 		e752x_printk(KERN_WARNING,
1042 			"Invalid sdram scrub control value: 0x%x\n", scrubval);
1043 		return -1;
1044 	}
1045 	return scrubrates[i].bandwidth;
1046 
1047 }
1048 
1049 /* Return 1 if dual channel mode is active.  Else return 0. */
1050 static inline int dual_channel_active(u16 ddrcsr)
1051 {
1052 	return (((ddrcsr >> 12) & 3) == 3);
1053 }
1054 
1055 /* Remap csrow index numbers if map_type is "reverse"
1056  */
1057 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
1058 {
1059 	struct e752x_pvt *pvt = mci->pvt_info;
1060 
1061 	if (!pvt->map_type)
1062 		return (7 - index);
1063 
1064 	return (index);
1065 }
1066 
1067 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1068 			u16 ddrcsr)
1069 {
1070 	struct csrow_info *csrow;
1071 	enum edac_type edac_mode;
1072 	unsigned long last_cumul_size;
1073 	int index, mem_dev, drc_chan;
1074 	int drc_drbg;		/* DRB granularity 0=64mb, 1=128mb */
1075 	int drc_ddim;		/* DRAM Data Integrity Mode 0=none, 2=edac */
1076 	u8 value;
1077 	u32 dra, drc, cumul_size, i, nr_pages;
1078 
1079 	dra = 0;
1080 	for (index = 0; index < 4; index++) {
1081 		u8 dra_reg;
1082 		pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
1083 		dra |= dra_reg << (index * 8);
1084 	}
1085 	pci_read_config_dword(pdev, E752X_DRC, &drc);
1086 	drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
1087 	drc_drbg = drc_chan + 1;	/* 128 in dual mode, 64 in single */
1088 	drc_ddim = (drc >> 20) & 0x3;
1089 
1090 	/* The dram row boundary (DRB) reg values are boundary address for
1091 	 * each DRAM row with a granularity of 64 or 128MB (single/dual
1092 	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
1093 	 * contain the total memory contained in all eight rows.
1094 	 */
1095 	for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1096 		/* mem_dev 0=x8, 1=x4 */
1097 		mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1098 		csrow = mci->csrows[remap_csrow_index(mci, index)];
1099 
1100 		mem_dev = (mem_dev == 2);
1101 		pci_read_config_byte(pdev, E752X_DRB + index, &value);
1102 		/* convert a 128 or 64 MiB DRB to a page size. */
1103 		cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1104 		edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
1105 		if (cumul_size == last_cumul_size)
1106 			continue;	/* not populated */
1107 
1108 		csrow->first_page = last_cumul_size;
1109 		csrow->last_page = cumul_size - 1;
1110 		nr_pages = cumul_size - last_cumul_size;
1111 		last_cumul_size = cumul_size;
1112 
1113 		/*
1114 		* if single channel or x8 devices then SECDED
1115 		* if dual channel and x4 then S4ECD4ED
1116 		*/
1117 		if (drc_ddim) {
1118 			if (drc_chan && mem_dev) {
1119 				edac_mode = EDAC_S4ECD4ED;
1120 				mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1121 			} else {
1122 				edac_mode = EDAC_SECDED;
1123 				mci->edac_cap |= EDAC_FLAG_SECDED;
1124 			}
1125 		} else
1126 			edac_mode = EDAC_NONE;
1127 		for (i = 0; i < csrow->nr_channels; i++) {
1128 			struct dimm_info *dimm = csrow->channels[i]->dimm;
1129 
1130 			edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
1131 			dimm->nr_pages = nr_pages / csrow->nr_channels;
1132 			dimm->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
1133 			dimm->mtype = MEM_RDDR;	/* only one type supported */
1134 			dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1135 			dimm->edac_mode = edac_mode;
1136 		}
1137 	}
1138 }
1139 
1140 static void e752x_init_mem_map_table(struct pci_dev *pdev,
1141 				struct e752x_pvt *pvt)
1142 {
1143 	int index;
1144 	u8 value, last, row;
1145 
1146 	last = 0;
1147 	row = 0;
1148 
1149 	for (index = 0; index < 8; index += 2) {
1150 		pci_read_config_byte(pdev, E752X_DRB + index, &value);
1151 		/* test if there is a dimm in this slot */
1152 		if (value == last) {
1153 			/* no dimm in the slot, so flag it as empty */
1154 			pvt->map[index] = 0xff;
1155 			pvt->map[index + 1] = 0xff;
1156 		} else {	/* there is a dimm in the slot */
1157 			pvt->map[index] = row;
1158 			row++;
1159 			last = value;
1160 			/* test the next value to see if the dimm is double
1161 			 * sided
1162 			 */
1163 			pci_read_config_byte(pdev, E752X_DRB + index + 1,
1164 					&value);
1165 
1166 			/* the dimm is single sided, so flag as empty */
1167 			/* this is a double sided dimm to save the next row #*/
1168 			pvt->map[index + 1] = (value == last) ? 0xff :	row;
1169 			row++;
1170 			last = value;
1171 		}
1172 	}
1173 }
1174 
1175 /* Return 0 on success or 1 on failure. */
1176 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1177 			struct e752x_pvt *pvt)
1178 {
1179 	pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL,
1180 				pvt->dev_info->err_dev, NULL);
1181 
1182 	if (pvt->dev_d0f1 == NULL) {
1183 		pvt->dev_d0f1 = pci_scan_single_device(pdev->bus,
1184 							PCI_DEVFN(0, 1));
1185 		pci_dev_get(pvt->dev_d0f1);
1186 	}
1187 
1188 	if (pvt->dev_d0f1 == NULL) {
1189 		e752x_printk(KERN_ERR, "error reporting device not found:"
1190 			"vendor %x device 0x%x (broken BIOS?)\n",
1191 			PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1192 		return 1;
1193 	}
1194 
1195 	pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL,
1196 				e752x_devs[dev_idx].ctl_dev,
1197 				NULL);
1198 
1199 	if (pvt->dev_d0f0 == NULL)
1200 		goto fail;
1201 
1202 	return 0;
1203 
1204 fail:
1205 	pci_dev_put(pvt->dev_d0f1);
1206 	return 1;
1207 }
1208 
1209 /* Setup system bus parity mask register.
1210  * Sysbus parity supported on:
1211  * e7320/e7520/e7525 + Xeon
1212  */
1213 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1214 {
1215 	char *cpu_id = cpu_data(0).x86_model_id;
1216 	struct pci_dev *dev = pvt->dev_d0f1;
1217 	int enable = 1;
1218 
1219 	/* Allow module parameter override, else see if CPU supports parity */
1220 	if (sysbus_parity != -1) {
1221 		enable = sysbus_parity;
1222 	} else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
1223 		e752x_printk(KERN_INFO, "System Bus Parity not "
1224 			     "supported by CPU, disabling\n");
1225 		enable = 0;
1226 	}
1227 
1228 	if (enable)
1229 		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1230 	else
1231 		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1232 }
1233 
1234 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1235 {
1236 	struct pci_dev *dev;
1237 
1238 	dev = pvt->dev_d0f1;
1239 	/* Turn off error disable & SMI in case the BIOS turned it on */
1240 	if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1241 		pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1242 		pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1243 	} else {
1244 		pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1245 		pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1246 	}
1247 
1248 	e752x_init_sysbus_parity_mask(pvt);
1249 
1250 	pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1251 	pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1252 	pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1253 	pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1254 	pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1255 }
1256 
1257 static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1258 {
1259 	u16 pci_data;
1260 	u8 stat8;
1261 	struct mem_ctl_info *mci;
1262 	struct edac_mc_layer layers[2];
1263 	struct e752x_pvt *pvt;
1264 	u16 ddrcsr;
1265 	int drc_chan;		/* Number of channels 0=1chan,1=2chan */
1266 	struct e752x_error_info discard;
1267 
1268 	edac_dbg(0, "mci\n");
1269 	edac_dbg(0, "Starting Probe1\n");
1270 
1271 	/* check to see if device 0 function 1 is enabled; if it isn't, we
1272 	 * assume the BIOS has reserved it for a reason and is expecting
1273 	 * exclusive access, we take care not to violate that assumption and
1274 	 * fail the probe. */
1275 	pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1276 	if (!force_function_unhide && !(stat8 & (1 << 5))) {
1277 		printk(KERN_INFO "Contact your BIOS vendor to see if the "
1278 			"E752x error registers can be safely un-hidden\n");
1279 		return -ENODEV;
1280 	}
1281 	stat8 |= (1 << 5);
1282 	pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1283 
1284 	pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1285 	/* FIXME: should check >>12 or 0xf, true for all? */
1286 	/* Dual channel = 1, Single channel = 0 */
1287 	drc_chan = dual_channel_active(ddrcsr);
1288 
1289 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1290 	layers[0].size = E752X_NR_CSROWS;
1291 	layers[0].is_virt_csrow = true;
1292 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1293 	layers[1].size = drc_chan + 1;
1294 	layers[1].is_virt_csrow = false;
1295 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1296 	if (mci == NULL)
1297 		return -ENOMEM;
1298 
1299 	edac_dbg(3, "init mci\n");
1300 	mci->mtype_cap = MEM_FLAG_RDDR;
1301 	/* 3100 IMCH supports SECDEC only */
1302 	mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1303 		(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1304 	/* FIXME - what if different memory types are in different csrows? */
1305 	mci->mod_name = EDAC_MOD_STR;
1306 	mci->mod_ver = E752X_REVISION;
1307 	mci->pdev = &pdev->dev;
1308 
1309 	edac_dbg(3, "init pvt\n");
1310 	pvt = (struct e752x_pvt *)mci->pvt_info;
1311 	pvt->dev_info = &e752x_devs[dev_idx];
1312 	pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1313 
1314 	if (e752x_get_devs(pdev, dev_idx, pvt)) {
1315 		edac_mc_free(mci);
1316 		return -ENODEV;
1317 	}
1318 
1319 	edac_dbg(3, "more mci init\n");
1320 	mci->ctl_name = pvt->dev_info->ctl_name;
1321 	mci->dev_name = pci_name(pdev);
1322 	mci->edac_check = e752x_check;
1323 	mci->ctl_page_to_phys = ctl_page_to_phys;
1324 	mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
1325 	mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
1326 
1327 	/* set the map type.  1 = normal, 0 = reversed
1328 	 * Must be set before e752x_init_csrows in case csrow mapping
1329 	 * is reversed.
1330 	 */
1331 	pci_read_config_byte(pdev, E752X_DRM, &stat8);
1332 	pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1333 
1334 	e752x_init_csrows(mci, pdev, ddrcsr);
1335 	e752x_init_mem_map_table(pdev, pvt);
1336 
1337 	if (dev_idx == I3100)
1338 		mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1339 	else
1340 		mci->edac_cap |= EDAC_FLAG_NONE;
1341 	edac_dbg(3, "tolm, remapbase, remaplimit\n");
1342 
1343 	/* load the top of low memory, remap base, and remap limit vars */
1344 	pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1345 	pvt->tolm = ((u32) pci_data) << 4;
1346 	pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1347 	pvt->remapbase = ((u32) pci_data) << 14;
1348 	pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1349 	pvt->remaplimit = ((u32) pci_data) << 14;
1350 	e752x_printk(KERN_INFO,
1351 			"tolm = %x, remapbase = %x, remaplimit = %x\n",
1352 			pvt->tolm, pvt->remapbase, pvt->remaplimit);
1353 
1354 	/* Here we assume that we will never see multiple instances of this
1355 	 * type of memory controller.  The ID is therefore hardcoded to 0.
1356 	 */
1357 	if (edac_mc_add_mc(mci)) {
1358 		edac_dbg(3, "failed edac_mc_add_mc()\n");
1359 		goto fail;
1360 	}
1361 
1362 	e752x_init_error_reporting_regs(pvt);
1363 	e752x_get_error_info(mci, &discard);	/* clear other MCH errors */
1364 
1365 	/* allocating generic PCI control info */
1366 	e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1367 	if (!e752x_pci) {
1368 		printk(KERN_WARNING
1369 			"%s(): Unable to create PCI control\n", __func__);
1370 		printk(KERN_WARNING
1371 			"%s(): PCI error report via EDAC not setup\n",
1372 			__func__);
1373 	}
1374 
1375 	/* get this far and it's successful */
1376 	edac_dbg(3, "success\n");
1377 	return 0;
1378 
1379 fail:
1380 	pci_dev_put(pvt->dev_d0f0);
1381 	pci_dev_put(pvt->dev_d0f1);
1382 	edac_mc_free(mci);
1383 
1384 	return -ENODEV;
1385 }
1386 
1387 /* returns count (>= 0), or negative on error */
1388 static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1389 {
1390 	edac_dbg(0, "\n");
1391 
1392 	/* wake up and enable device */
1393 	if (pci_enable_device(pdev) < 0)
1394 		return -EIO;
1395 
1396 	return e752x_probe1(pdev, ent->driver_data);
1397 }
1398 
1399 static void e752x_remove_one(struct pci_dev *pdev)
1400 {
1401 	struct mem_ctl_info *mci;
1402 	struct e752x_pvt *pvt;
1403 
1404 	edac_dbg(0, "\n");
1405 
1406 	if (e752x_pci)
1407 		edac_pci_release_generic_ctl(e752x_pci);
1408 
1409 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1410 		return;
1411 
1412 	pvt = (struct e752x_pvt *)mci->pvt_info;
1413 	pci_dev_put(pvt->dev_d0f0);
1414 	pci_dev_put(pvt->dev_d0f1);
1415 	edac_mc_free(mci);
1416 }
1417 
1418 static const struct pci_device_id e752x_pci_tbl[] = {
1419 	{
1420 	 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1421 	 E7520},
1422 	{
1423 	 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1424 	 E7525},
1425 	{
1426 	 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1427 	 E7320},
1428 	{
1429 	 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1430 	 I3100},
1431 	{
1432 	 0,
1433 	 }			/* 0 terminated list. */
1434 };
1435 
1436 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1437 
1438 static struct pci_driver e752x_driver = {
1439 	.name = EDAC_MOD_STR,
1440 	.probe = e752x_init_one,
1441 	.remove = e752x_remove_one,
1442 	.id_table = e752x_pci_tbl,
1443 };
1444 
1445 static int __init e752x_init(void)
1446 {
1447 	int pci_rc;
1448 
1449 	edac_dbg(3, "\n");
1450 
1451        /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1452        opstate_init();
1453 
1454 	pci_rc = pci_register_driver(&e752x_driver);
1455 	return (pci_rc < 0) ? pci_rc : 0;
1456 }
1457 
1458 static void __exit e752x_exit(void)
1459 {
1460 	edac_dbg(3, "\n");
1461 	pci_unregister_driver(&e752x_driver);
1462 }
1463 
1464 module_init(e752x_init);
1465 module_exit(e752x_exit);
1466 
1467 MODULE_LICENSE("GPL");
1468 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1469 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1470 
1471 module_param(force_function_unhide, int, 0444);
1472 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1473 		 " 1=force unhide and hope BIOS doesn't fight driver for "
1474 		"Dev0:Fun1 access");
1475 
1476 module_param(edac_op_state, int, 0444);
1477 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1478 
1479 module_param(sysbus_parity, int, 0444);
1480 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1481 		" 1=enable system bus parity checking, default=auto-detect");
1482 module_param(report_non_memory_errors, int, 0644);
1483 MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1484 		"reporting, 1=enable non-memory error reporting");
1485