xref: /linux/drivers/edac/i7300_edac.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel 7300 class Memory Controllers kernel module (Clarksboro)
4  *
5  * Copyright (c) 2010 by:
6  *	 Mauro Carvalho Chehab
7  *
8  * Red Hat Inc. http://www.redhat.com
9  *
10  * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
11  *	http://www.intel.com/Assets/PDF/datasheet/318082.pdf
12  *
13  * TODO: The chipset allow checking for PCI Express errors also. Currently,
14  *	 the driver covers only memory error errors
15  *
16  * This driver uses "csrows" EDAC attribute to represent DIMM slot#
17  */
18 
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/slab.h>
24 #include <linux/edac.h>
25 #include <linux/mmzone.h>
26 
27 #include "edac_module.h"
28 
29 /*
30  * Alter this version for the I7300 module when modifications are made
31  */
32 #define I7300_REVISION    " Ver: 1.0.0"
33 
34 #define EDAC_MOD_STR      "i7300_edac"
35 
36 #define i7300_printk(level, fmt, arg...) \
37 	edac_printk(level, "i7300", fmt, ##arg)
38 
39 #define i7300_mc_printk(mci, level, fmt, arg...) \
40 	edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
41 
42 /***********************************************
43  * i7300 Limit constants Structs and static vars
44  ***********************************************/
45 
46 /*
47  * Memory topology is organized as:
48  *	Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
49  *	Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
50  * Each channel can have to 8 DIMM sets (called as SLOTS)
51  * Slots should generally be filled in pairs
52  *	Except on Single Channel mode of operation
53  *		just slot 0/channel0 filled on this mode
54  *	On normal operation mode, the two channels on a branch should be
55  *		filled together for the same SLOT#
56  * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
57  *		channels on both branches should be filled
58  */
59 
60 /* Limits for i7300 */
61 #define MAX_SLOTS		8
62 #define MAX_BRANCHES		2
63 #define MAX_CH_PER_BRANCH	2
64 #define MAX_CHANNELS		(MAX_CH_PER_BRANCH * MAX_BRANCHES)
65 #define MAX_MIR			3
66 
67 #define to_channel(ch, branch)	((((branch)) << 1) | (ch))
68 
69 #define to_csrow(slot, ch, branch)					\
70 		(to_channel(ch, branch) | ((slot) << 2))
71 
72 /* Device name and register DID (Device ID) */
73 struct i7300_dev_info {
74 	const char *ctl_name;	/* name for this device */
75 	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
76 };
77 
78 /* Table of devices attributes supported by this driver */
79 static const struct i7300_dev_info i7300_devs[] = {
80 	{
81 		.ctl_name = "I7300",
82 		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
83 	},
84 };
85 
86 struct i7300_dimm_info {
87 	int megabytes;		/* size, 0 means not present  */
88 };
89 
90 /* driver private data structure */
91 struct i7300_pvt {
92 	struct pci_dev *pci_dev_16_0_fsb_ctlr;		/* 16.0 */
93 	struct pci_dev *pci_dev_16_1_fsb_addr_map;	/* 16.1 */
94 	struct pci_dev *pci_dev_16_2_fsb_err_regs;	/* 16.2 */
95 	struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES];	/* 21.0  and 22.0 */
96 
97 	u16 tolm;				/* top of low memory */
98 	u64 ambase;				/* AMB BAR */
99 
100 	u32 mc_settings;			/* Report several settings */
101 	u32 mc_settings_a;
102 
103 	u16 mir[MAX_MIR];			/* Memory Interleave Reg*/
104 
105 	u16 mtr[MAX_SLOTS][MAX_BRANCHES];	/* Memory Technlogy Reg */
106 	u16 ambpresent[MAX_CHANNELS];		/* AMB present regs */
107 
108 	/* DIMM information matrix, allocating architecture maximums */
109 	struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
110 
111 	/* Temporary buffer for use when preparing error messages */
112 	char *tmp_prt_buffer;
113 };
114 
115 /* FIXME: Why do we need to have this static? */
116 static struct edac_pci_ctl_info *i7300_pci;
117 
118 /***************************************************
119  * i7300 Register definitions for memory enumeration
120  ***************************************************/
121 
122 /*
123  * Device 16,
124  * Function 0: System Address (not documented)
125  * Function 1: Memory Branch Map, Control, Errors Register
126  */
127 
128 	/* OFFSETS for Function 0 */
129 #define AMBASE			0x48 /* AMB Mem Mapped Reg Region Base */
130 #define MAXCH			0x56 /* Max Channel Number */
131 #define MAXDIMMPERCH		0x57 /* Max DIMM PER Channel Number */
132 
133 	/* OFFSETS for Function 1 */
134 #define MC_SETTINGS		0x40
135   #define IS_MIRRORED(mc)		((mc) & (1 << 16))
136   #define IS_ECC_ENABLED(mc)		((mc) & (1 << 5))
137   #define IS_RETRY_ENABLED(mc)		((mc) & (1 << 31))
138   #define IS_SCRBALGO_ENHANCED(mc)	((mc) & (1 << 8))
139 
140 #define MC_SETTINGS_A		0x58
141   #define IS_SINGLE_MODE(mca)		((mca) & (1 << 14))
142 
143 #define TOLM			0x6C
144 
145 #define MIR0			0x80
146 #define MIR1			0x84
147 #define MIR2			0x88
148 
149 /*
150  * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
151  * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
152  * seems that we cannot use this information directly for the same usage.
153  * Each memory slot may have up to 2 AMB interfaces, one for income and another
154  * for outcome interface to the next slot.
155  * For now, the driver just stores the AMB present registers, but rely only at
156  * the MTR info to detect memory.
157  * Datasheet is also not clear about how to map each AMBPRESENT registers to
158  * one of the 4 available channels.
159  */
160 #define AMBPRESENT_0	0x64
161 #define AMBPRESENT_1	0x66
162 
163 static const u16 mtr_regs[MAX_SLOTS] = {
164 	0x80, 0x84, 0x88, 0x8c,
165 	0x82, 0x86, 0x8a, 0x8e
166 };
167 
168 /*
169  * Defines to extract the vaious fields from the
170  *	MTRx - Memory Technology Registers
171  */
172 #define MTR_DIMMS_PRESENT(mtr)		((mtr) & (1 << 8))
173 #define MTR_DIMMS_ETHROTTLE(mtr)	((mtr) & (1 << 7))
174 #define MTR_DRAM_WIDTH(mtr)		(((mtr) & (1 << 6)) ? 8 : 4)
175 #define MTR_DRAM_BANKS(mtr)		(((mtr) & (1 << 5)) ? 8 : 4)
176 #define MTR_DIMM_RANKS(mtr)		(((mtr) & (1 << 4)) ? 1 : 0)
177 #define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
178 #define MTR_DRAM_BANKS_ADDR_BITS	2
179 #define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
180 #define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
181 #define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)
182 
183 /************************************************
184  * i7300 Register definitions for error detection
185  ************************************************/
186 
187 /*
188  * Device 16.1: FBD Error Registers
189  */
190 #define FERR_FAT_FBD	0x98
191 static const char *ferr_fat_fbd_name[] = {
192 	[22] = "Non-Redundant Fast Reset Timeout",
193 	[2]  = ">Tmid Thermal event with intelligent throttling disabled",
194 	[1]  = "Memory or FBD configuration CRC read error",
195 	[0]  = "Memory Write error on non-redundant retry or "
196 	       "FBD configuration Write error on retry",
197 };
198 #define GET_FBD_FAT_IDX(fbderr)	(((fbderr) >> 28) & 3)
199 #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
200 
201 #define FERR_NF_FBD	0xa0
202 static const char *ferr_nf_fbd_name[] = {
203 	[24] = "DIMM-Spare Copy Completed",
204 	[23] = "DIMM-Spare Copy Initiated",
205 	[22] = "Redundant Fast Reset Timeout",
206 	[21] = "Memory Write error on redundant retry",
207 	[18] = "SPD protocol Error",
208 	[17] = "FBD Northbound parity error on FBD Sync Status",
209 	[16] = "Correctable Patrol Data ECC",
210 	[15] = "Correctable Resilver- or Spare-Copy Data ECC",
211 	[14] = "Correctable Mirrored Demand Data ECC",
212 	[13] = "Correctable Non-Mirrored Demand Data ECC",
213 	[11] = "Memory or FBD configuration CRC read error",
214 	[10] = "FBD Configuration Write error on first attempt",
215 	[9]  = "Memory Write error on first attempt",
216 	[8]  = "Non-Aliased Uncorrectable Patrol Data ECC",
217 	[7]  = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
218 	[6]  = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
219 	[5]  = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
220 	[4]  = "Aliased Uncorrectable Patrol Data ECC",
221 	[3]  = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
222 	[2]  = "Aliased Uncorrectable Mirrored Demand Data ECC",
223 	[1]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
224 	[0]  = "Uncorrectable Data ECC on Replay",
225 };
226 #define GET_FBD_NF_IDX(fbderr)	(((fbderr) >> 28) & 3)
227 #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
228 			      (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
229 			      (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
230 			      (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
231 			      (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
232 			      (1 << 1)  | (1 << 0))
233 
234 #define EMASK_FBD	0xa8
235 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
236 			    (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
237 			    (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
238 			    (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
239 			    (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
240 			    (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
241 			    (1 << 1)  | (1 << 0))
242 
243 /*
244  * Device 16.2: Global Error Registers
245  */
246 
247 #define FERR_GLOBAL_HI	0x48
248 static const char *ferr_global_hi_name[] = {
249 	[3] = "FSB 3 Fatal Error",
250 	[2] = "FSB 2 Fatal Error",
251 	[1] = "FSB 1 Fatal Error",
252 	[0] = "FSB 0 Fatal Error",
253 };
254 #define ferr_global_hi_is_fatal(errno)	1
255 
256 #define FERR_GLOBAL_LO	0x40
257 static const char *ferr_global_lo_name[] = {
258 	[31] = "Internal MCH Fatal Error",
259 	[30] = "Intel QuickData Technology Device Fatal Error",
260 	[29] = "FSB1 Fatal Error",
261 	[28] = "FSB0 Fatal Error",
262 	[27] = "FBD Channel 3 Fatal Error",
263 	[26] = "FBD Channel 2 Fatal Error",
264 	[25] = "FBD Channel 1 Fatal Error",
265 	[24] = "FBD Channel 0 Fatal Error",
266 	[23] = "PCI Express Device 7Fatal Error",
267 	[22] = "PCI Express Device 6 Fatal Error",
268 	[21] = "PCI Express Device 5 Fatal Error",
269 	[20] = "PCI Express Device 4 Fatal Error",
270 	[19] = "PCI Express Device 3 Fatal Error",
271 	[18] = "PCI Express Device 2 Fatal Error",
272 	[17] = "PCI Express Device 1 Fatal Error",
273 	[16] = "ESI Fatal Error",
274 	[15] = "Internal MCH Non-Fatal Error",
275 	[14] = "Intel QuickData Technology Device Non Fatal Error",
276 	[13] = "FSB1 Non-Fatal Error",
277 	[12] = "FSB 0 Non-Fatal Error",
278 	[11] = "FBD Channel 3 Non-Fatal Error",
279 	[10] = "FBD Channel 2 Non-Fatal Error",
280 	[9]  = "FBD Channel 1 Non-Fatal Error",
281 	[8]  = "FBD Channel 0 Non-Fatal Error",
282 	[7]  = "PCI Express Device 7 Non-Fatal Error",
283 	[6]  = "PCI Express Device 6 Non-Fatal Error",
284 	[5]  = "PCI Express Device 5 Non-Fatal Error",
285 	[4]  = "PCI Express Device 4 Non-Fatal Error",
286 	[3]  = "PCI Express Device 3 Non-Fatal Error",
287 	[2]  = "PCI Express Device 2 Non-Fatal Error",
288 	[1]  = "PCI Express Device 1 Non-Fatal Error",
289 	[0]  = "ESI Non-Fatal Error",
290 };
291 #define ferr_global_lo_is_fatal(errno)	((errno < 16) ? 0 : 1)
292 
293 #define NRECMEMA	0xbe
294   #define NRECMEMA_BANK(v)	(((v) >> 12) & 7)
295   #define NRECMEMA_RANK(v)	(((v) >> 8) & 15)
296 
297 #define NRECMEMB	0xc0
298   #define NRECMEMB_IS_WR(v)	((v) & (1 << 31))
299   #define NRECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
300   #define NRECMEMB_RAS(v)	((v) & 0xffff)
301 
302 #define REDMEMA		0xdc
303 
304 #define REDMEMB		0x7c
305 
306 #define RECMEMA		0xe0
307   #define RECMEMA_BANK(v)	(((v) >> 12) & 7)
308   #define RECMEMA_RANK(v)	(((v) >> 8) & 15)
309 
310 #define RECMEMB		0xe4
311   #define RECMEMB_IS_WR(v)	((v) & (1 << 31))
312   #define RECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
313   #define RECMEMB_RAS(v)	((v) & 0xffff)
314 
315 /********************************************
316  * i7300 Functions related to error detection
317  ********************************************/
318 
319 /**
320  * get_err_from_table() - Gets the error message from a table
321  * @table:	table name (array of char *)
322  * @size:	number of elements at the table
323  * @pos:	position of the element to be returned
324  *
325  * This is a small routine that gets the pos-th element of a table. If the
326  * element doesn't exist (or it is empty), it returns "reserved".
327  * Instead of calling it directly, the better is to call via the macro
328  * GET_ERR_FROM_TABLE(), that automatically checks the table size via
329  * ARRAY_SIZE() macro
330  */
331 static const char *get_err_from_table(const char *table[], int size, int pos)
332 {
333 	if (unlikely(pos >= size))
334 		return "Reserved";
335 
336 	if (unlikely(!table[pos]))
337 		return "Reserved";
338 
339 	return table[pos];
340 }
341 
342 #define GET_ERR_FROM_TABLE(table, pos)				\
343 	get_err_from_table(table, ARRAY_SIZE(table), pos)
344 
345 /**
346  * i7300_process_error_global() - Retrieve the hardware error information from
347  *				  the hardware global error registers and
348  *				  sends it to dmesg
349  * @mci: struct mem_ctl_info pointer
350  */
351 static void i7300_process_error_global(struct mem_ctl_info *mci)
352 {
353 	struct i7300_pvt *pvt;
354 	u32 errnum, error_reg;
355 	unsigned long errors;
356 	const char *specific;
357 	bool is_fatal;
358 
359 	pvt = mci->pvt_info;
360 
361 	/* read in the 1st FATAL error register */
362 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
363 			      FERR_GLOBAL_HI, &error_reg);
364 	if (unlikely(error_reg)) {
365 		errors = error_reg;
366 		errnum = find_first_bit(&errors,
367 					ARRAY_SIZE(ferr_global_hi_name));
368 		specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
369 		is_fatal = ferr_global_hi_is_fatal(errnum);
370 
371 		/* Clear the error bit */
372 		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
373 				       FERR_GLOBAL_HI, error_reg);
374 
375 		goto error_global;
376 	}
377 
378 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
379 			      FERR_GLOBAL_LO, &error_reg);
380 	if (unlikely(error_reg)) {
381 		errors = error_reg;
382 		errnum = find_first_bit(&errors,
383 					ARRAY_SIZE(ferr_global_lo_name));
384 		specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
385 		is_fatal = ferr_global_lo_is_fatal(errnum);
386 
387 		/* Clear the error bit */
388 		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
389 				       FERR_GLOBAL_LO, error_reg);
390 
391 		goto error_global;
392 	}
393 	return;
394 
395 error_global:
396 	i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
397 			is_fatal ? "Fatal" : "NOT fatal", specific);
398 }
399 
400 /**
401  * i7300_process_fbd_error() - Retrieve the hardware error information from
402  *			       the FBD error registers and sends it via
403  *			       EDAC error API calls
404  * @mci: struct mem_ctl_info pointer
405  */
406 static void i7300_process_fbd_error(struct mem_ctl_info *mci)
407 {
408 	struct i7300_pvt *pvt;
409 	u32 errnum, value, error_reg;
410 	u16 val16;
411 	unsigned branch, channel, bank, rank, cas, ras;
412 	u32 syndrome;
413 
414 	unsigned long errors;
415 	const char *specific;
416 	bool is_wr;
417 
418 	pvt = mci->pvt_info;
419 
420 	/* read in the 1st FATAL error register */
421 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
422 			      FERR_FAT_FBD, &error_reg);
423 	if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
424 		errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
425 		errnum = find_first_bit(&errors,
426 					ARRAY_SIZE(ferr_fat_fbd_name));
427 		specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
428 		branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
429 
430 		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
431 				     NRECMEMA, &val16);
432 		bank = NRECMEMA_BANK(val16);
433 		rank = NRECMEMA_RANK(val16);
434 
435 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
436 				NRECMEMB, &value);
437 		is_wr = NRECMEMB_IS_WR(value);
438 		cas = NRECMEMB_CAS(value);
439 		ras = NRECMEMB_RAS(value);
440 
441 		/* Clean the error register */
442 		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
443 				FERR_FAT_FBD, error_reg);
444 
445 		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
446 			 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
447 			 bank, ras, cas, errors, specific);
448 
449 		edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
450 				     branch, -1, rank,
451 				     is_wr ? "Write error" : "Read error",
452 				     pvt->tmp_prt_buffer);
453 
454 	}
455 
456 	/* read in the 1st NON-FATAL error register */
457 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
458 			      FERR_NF_FBD, &error_reg);
459 	if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
460 		errors = error_reg & FERR_NF_FBD_ERR_MASK;
461 		errnum = find_first_bit(&errors,
462 					ARRAY_SIZE(ferr_nf_fbd_name));
463 		specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
464 		branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
465 
466 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
467 			REDMEMA, &syndrome);
468 
469 		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
470 				     RECMEMA, &val16);
471 		bank = RECMEMA_BANK(val16);
472 		rank = RECMEMA_RANK(val16);
473 
474 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
475 				RECMEMB, &value);
476 		is_wr = RECMEMB_IS_WR(value);
477 		cas = RECMEMB_CAS(value);
478 		ras = RECMEMB_RAS(value);
479 
480 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
481 				     REDMEMB, &value);
482 		channel = (branch << 1);
483 
484 		/* Second channel ? */
485 		channel += !!(value & BIT(17));
486 
487 		/* Clear the error bit */
488 		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
489 				FERR_NF_FBD, error_reg);
490 
491 		/* Form out message */
492 		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
493 			 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
494 			 bank, ras, cas, errors, specific);
495 
496 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
497 				     syndrome,
498 				     branch >> 1, channel % 2, rank,
499 				     is_wr ? "Write error" : "Read error",
500 				     pvt->tmp_prt_buffer);
501 	}
502 	return;
503 }
504 
505 /**
506  * i7300_check_error() - Calls the error checking subroutines
507  * @mci: struct mem_ctl_info pointer
508  */
509 static void i7300_check_error(struct mem_ctl_info *mci)
510 {
511 	i7300_process_error_global(mci);
512 	i7300_process_fbd_error(mci);
513 };
514 
515 /**
516  * i7300_clear_error() - Clears the error registers
517  * @mci: struct mem_ctl_info pointer
518  */
519 static void i7300_clear_error(struct mem_ctl_info *mci)
520 {
521 	struct i7300_pvt *pvt = mci->pvt_info;
522 	u32 value;
523 	/*
524 	 * All error values are RWC - we need to read and write 1 to the
525 	 * bit that we want to cleanup
526 	 */
527 
528 	/* Clear global error registers */
529 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
530 			      FERR_GLOBAL_HI, &value);
531 	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
532 			      FERR_GLOBAL_HI, value);
533 
534 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
535 			      FERR_GLOBAL_LO, &value);
536 	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
537 			      FERR_GLOBAL_LO, value);
538 
539 	/* Clear FBD error registers */
540 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
541 			      FERR_FAT_FBD, &value);
542 	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
543 			      FERR_FAT_FBD, value);
544 
545 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
546 			      FERR_NF_FBD, &value);
547 	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
548 			      FERR_NF_FBD, value);
549 }
550 
551 /**
552  * i7300_enable_error_reporting() - Enable the memory reporting logic at the
553  *				    hardware
554  * @mci: struct mem_ctl_info pointer
555  */
556 static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
557 {
558 	struct i7300_pvt *pvt = mci->pvt_info;
559 	u32 fbd_error_mask;
560 
561 	/* Read the FBD Error Mask Register */
562 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
563 			      EMASK_FBD, &fbd_error_mask);
564 
565 	/* Enable with a '0' */
566 	fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
567 
568 	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
569 			       EMASK_FBD, fbd_error_mask);
570 }
571 
572 /************************************************
573  * i7300 Functions related to memory enumberation
574  ************************************************/
575 
576 /**
577  * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
578  * @pvt: pointer to the private data struct used by i7300 driver
579  * @slot: DIMM slot (0 to 7)
580  * @ch: Channel number within the branch (0 or 1)
581  * @branch: Branch number (0 or 1)
582  * @dinfo: Pointer to DIMM info where dimm size is stored
583  * @p_csrow: Pointer to the struct csrow_info that corresponds to that element
584  */
585 static int decode_mtr(struct i7300_pvt *pvt,
586 		      int slot, int ch, int branch,
587 		      struct i7300_dimm_info *dinfo,
588 		      struct dimm_info *dimm)
589 {
590 	int mtr, ans, addrBits, channel;
591 
592 	channel = to_channel(ch, branch);
593 
594 	mtr = pvt->mtr[slot][branch];
595 	ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
596 
597 	edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
598 		 slot, channel, ans ? "" : "NOT ");
599 
600 	/* Determine if there is a DIMM present in this DIMM slot */
601 	if (!ans)
602 		return 0;
603 
604 	/* Start with the number of bits for a Bank
605 	* on the DRAM */
606 	addrBits = MTR_DRAM_BANKS_ADDR_BITS;
607 	/* Add thenumber of ROW bits */
608 	addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
609 	/* add the number of COLUMN bits */
610 	addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
611 	/* add the number of RANK bits */
612 	addrBits += MTR_DIMM_RANKS(mtr);
613 
614 	addrBits += 6;	/* add 64 bits per DIMM */
615 	addrBits -= 20;	/* divide by 2^^20 */
616 	addrBits -= 3;	/* 8 bits per bytes */
617 
618 	dinfo->megabytes = 1 << addrBits;
619 
620 	edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
621 
622 	edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
623 		 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
624 
625 	edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
626 	edac_dbg(2, "\t\tNUMRANK: %s\n",
627 		 MTR_DIMM_RANKS(mtr) ? "double" : "single");
628 	edac_dbg(2, "\t\tNUMROW: %s\n",
629 		 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
630 		 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
631 		 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
632 		 "65,536 - 16 rows");
633 	edac_dbg(2, "\t\tNUMCOL: %s\n",
634 		 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
635 		 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
636 		 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
637 		 "reserved");
638 	edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
639 
640 	/*
641 	 * The type of error detection actually depends of the
642 	 * mode of operation. When it is just one single memory chip, at
643 	 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
644 	 * In normal or mirrored mode, it uses Lockstep mode,
645 	 * with the possibility of using an extended algorithm for x8 memories
646 	 * See datasheet Sections 7.3.6 to 7.3.8
647 	 */
648 
649 	dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
650 	dimm->grain = 8;
651 	dimm->mtype = MEM_FB_DDR2;
652 	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
653 		dimm->edac_mode = EDAC_SECDED;
654 		edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
655 	} else {
656 		edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
657 		if (MTR_DRAM_WIDTH(mtr) == 8)
658 			dimm->edac_mode = EDAC_S8ECD8ED;
659 		else
660 			dimm->edac_mode = EDAC_S4ECD4ED;
661 	}
662 
663 	/* ask what device type on this row */
664 	if (MTR_DRAM_WIDTH(mtr) == 8) {
665 		edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
666 			 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
667 			 "enhanced" : "normal");
668 
669 		dimm->dtype = DEV_X8;
670 	} else
671 		dimm->dtype = DEV_X4;
672 
673 	return mtr;
674 }
675 
676 /**
677  * print_dimm_size() - Prints dump of the memory organization
678  * @pvt: pointer to the private data struct used by i7300 driver
679  *
680  * Useful for debug. If debug is disabled, this routine do nothing
681  */
682 static void print_dimm_size(struct i7300_pvt *pvt)
683 {
684 #ifdef CONFIG_EDAC_DEBUG
685 	struct i7300_dimm_info *dinfo;
686 	char *p;
687 	int space, n;
688 	int channel, slot;
689 
690 	space = PAGE_SIZE;
691 	p = pvt->tmp_prt_buffer;
692 
693 	n = snprintf(p, space, "              ");
694 	p += n;
695 	space -= n;
696 	for (channel = 0; channel < MAX_CHANNELS; channel++) {
697 		n = snprintf(p, space, "channel %d | ", channel);
698 		p += n;
699 		space -= n;
700 	}
701 	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
702 	p = pvt->tmp_prt_buffer;
703 	space = PAGE_SIZE;
704 	n = snprintf(p, space, "-------------------------------"
705 			       "------------------------------");
706 	p += n;
707 	space -= n;
708 	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
709 	p = pvt->tmp_prt_buffer;
710 	space = PAGE_SIZE;
711 
712 	for (slot = 0; slot < MAX_SLOTS; slot++) {
713 		n = snprintf(p, space, "csrow/SLOT %d  ", slot);
714 		p += n;
715 		space -= n;
716 
717 		for (channel = 0; channel < MAX_CHANNELS; channel++) {
718 			dinfo = &pvt->dimm_info[slot][channel];
719 			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
720 			p += n;
721 			space -= n;
722 		}
723 
724 		edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
725 		p = pvt->tmp_prt_buffer;
726 		space = PAGE_SIZE;
727 	}
728 
729 	n = snprintf(p, space, "-------------------------------"
730 			       "------------------------------");
731 	p += n;
732 	space -= n;
733 	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
734 	p = pvt->tmp_prt_buffer;
735 	space = PAGE_SIZE;
736 #endif
737 }
738 
739 /**
740  * i7300_init_csrows() - Initialize the 'csrows' table within
741  *			 the mci control structure with the
742  *			 addressing of memory.
743  * @mci: struct mem_ctl_info pointer
744  */
745 static int i7300_init_csrows(struct mem_ctl_info *mci)
746 {
747 	struct i7300_pvt *pvt;
748 	struct i7300_dimm_info *dinfo;
749 	int rc = -ENODEV;
750 	int mtr;
751 	int ch, branch, slot, channel, max_channel, max_branch;
752 	struct dimm_info *dimm;
753 
754 	pvt = mci->pvt_info;
755 
756 	edac_dbg(2, "Memory Technology Registers:\n");
757 
758 	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
759 		max_branch = 1;
760 		max_channel = 1;
761 	} else {
762 		max_branch = MAX_BRANCHES;
763 		max_channel = MAX_CH_PER_BRANCH;
764 	}
765 
766 	/* Get the AMB present registers for the four channels */
767 	for (branch = 0; branch < max_branch; branch++) {
768 		/* Read and dump branch 0's MTRs */
769 		channel = to_channel(0, branch);
770 		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
771 				     AMBPRESENT_0,
772 				&pvt->ambpresent[channel]);
773 		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
774 			 channel, pvt->ambpresent[channel]);
775 
776 		if (max_channel == 1)
777 			continue;
778 
779 		channel = to_channel(1, branch);
780 		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
781 				     AMBPRESENT_1,
782 				&pvt->ambpresent[channel]);
783 		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
784 			 channel, pvt->ambpresent[channel]);
785 	}
786 
787 	/* Get the set of MTR[0-7] regs by each branch */
788 	for (slot = 0; slot < MAX_SLOTS; slot++) {
789 		int where = mtr_regs[slot];
790 		for (branch = 0; branch < max_branch; branch++) {
791 			pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
792 					where,
793 					&pvt->mtr[slot][branch]);
794 			for (ch = 0; ch < max_channel; ch++) {
795 				int channel = to_channel(ch, branch);
796 
797 				dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
798 					       mci->n_layers, branch, ch, slot);
799 
800 				dinfo = &pvt->dimm_info[slot][channel];
801 
802 				mtr = decode_mtr(pvt, slot, ch, branch,
803 						 dinfo, dimm);
804 
805 				/* if no DIMMS on this row, continue */
806 				if (!MTR_DIMMS_PRESENT(mtr))
807 					continue;
808 
809 				rc = 0;
810 
811 			}
812 		}
813 	}
814 
815 	return rc;
816 }
817 
818 /**
819  * decode_mir() - Decodes Memory Interleave Register (MIR) info
820  * @int mir_no: number of the MIR register to decode
821  * @mir: array with the MIR data cached on the driver
822  */
823 static void decode_mir(int mir_no, u16 mir[MAX_MIR])
824 {
825 	if (mir[mir_no] & 3)
826 		edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
827 			 mir_no,
828 			 (mir[mir_no] >> 4) & 0xfff,
829 			 (mir[mir_no] & 1) ? "B0" : "",
830 			 (mir[mir_no] & 2) ? "B1" : "");
831 }
832 
833 /**
834  * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
835  * @mci: struct mem_ctl_info pointer
836  *
837  * Data read is cached internally for its usage when needed
838  */
839 static int i7300_get_mc_regs(struct mem_ctl_info *mci)
840 {
841 	struct i7300_pvt *pvt;
842 	u32 actual_tolm;
843 	int i, rc;
844 
845 	pvt = mci->pvt_info;
846 
847 	pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
848 			(u32 *) &pvt->ambase);
849 
850 	edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
851 
852 	/* Get the Branch Map regs */
853 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
854 	pvt->tolm >>= 12;
855 	edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
856 		 pvt->tolm, pvt->tolm);
857 
858 	actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
859 	edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
860 		 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
861 
862 	/* Get memory controller settings */
863 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
864 			     &pvt->mc_settings);
865 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
866 			     &pvt->mc_settings_a);
867 
868 	if (IS_SINGLE_MODE(pvt->mc_settings_a))
869 		edac_dbg(0, "Memory controller operating on single mode\n");
870 	else
871 		edac_dbg(0, "Memory controller operating on %smirrored mode\n",
872 			 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
873 
874 	edac_dbg(0, "Error detection is %s\n",
875 		 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
876 	edac_dbg(0, "Retry is %s\n",
877 		 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
878 
879 	/* Get Memory Interleave Range registers */
880 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
881 			     &pvt->mir[0]);
882 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
883 			     &pvt->mir[1]);
884 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
885 			     &pvt->mir[2]);
886 
887 	/* Decode the MIR regs */
888 	for (i = 0; i < MAX_MIR; i++)
889 		decode_mir(i, pvt->mir);
890 
891 	rc = i7300_init_csrows(mci);
892 	if (rc < 0)
893 		return rc;
894 
895 	/* Go and determine the size of each DIMM and place in an
896 	 * orderly matrix */
897 	print_dimm_size(pvt);
898 
899 	return 0;
900 }
901 
902 /*************************************************
903  * i7300 Functions related to device probe/release
904  *************************************************/
905 
906 /**
907  * i7300_put_devices() - Release the PCI devices
908  * @mci: struct mem_ctl_info pointer
909  */
910 static void i7300_put_devices(struct mem_ctl_info *mci)
911 {
912 	struct i7300_pvt *pvt;
913 	int branch;
914 
915 	pvt = mci->pvt_info;
916 
917 	/* Decrement usage count for devices */
918 	for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
919 		pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
920 	pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
921 	pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
922 }
923 
924 /**
925  * i7300_get_devices() - Find and perform 'get' operation on the MCH's
926  *			 device/functions we want to reference for this driver
927  * @mci: struct mem_ctl_info pointer
928  *
929  * Access and prepare the several devices for usage:
930  * I7300 devices used by this driver:
931  *    Device 16, functions 0,1 and 2:	PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
932  *    Device 21 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
933  *    Device 22 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
934  */
935 static int i7300_get_devices(struct mem_ctl_info *mci)
936 {
937 	struct i7300_pvt *pvt;
938 	struct pci_dev *pdev;
939 
940 	pvt = mci->pvt_info;
941 
942 	/* Attempt to 'get' the MCH register we want */
943 	pdev = NULL;
944 	while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
945 				      PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
946 				      pdev))) {
947 		/* Store device 16 funcs 1 and 2 */
948 		switch (PCI_FUNC(pdev->devfn)) {
949 		case 1:
950 			if (!pvt->pci_dev_16_1_fsb_addr_map)
951 				pvt->pci_dev_16_1_fsb_addr_map =
952 							pci_dev_get(pdev);
953 			break;
954 		case 2:
955 			if (!pvt->pci_dev_16_2_fsb_err_regs)
956 				pvt->pci_dev_16_2_fsb_err_regs =
957 							pci_dev_get(pdev);
958 			break;
959 		}
960 	}
961 
962 	if (!pvt->pci_dev_16_1_fsb_addr_map ||
963 	    !pvt->pci_dev_16_2_fsb_err_regs) {
964 		/* At least one device was not found */
965 		i7300_printk(KERN_ERR,
966 			"'system address,Process Bus' device not found:"
967 			"vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
968 			PCI_VENDOR_ID_INTEL,
969 			PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
970 		goto error;
971 	}
972 
973 	edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
974 		 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
975 		 pvt->pci_dev_16_0_fsb_ctlr->vendor,
976 		 pvt->pci_dev_16_0_fsb_ctlr->device);
977 	edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
978 		 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
979 		 pvt->pci_dev_16_1_fsb_addr_map->vendor,
980 		 pvt->pci_dev_16_1_fsb_addr_map->device);
981 	edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
982 		 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
983 		 pvt->pci_dev_16_2_fsb_err_regs->vendor,
984 		 pvt->pci_dev_16_2_fsb_err_regs->device);
985 
986 	pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
987 					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
988 					    NULL);
989 	if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
990 		i7300_printk(KERN_ERR,
991 			"MC: 'BRANCH 0' device not found:"
992 			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
993 			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
994 		goto error;
995 	}
996 
997 	pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
998 					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
999 					    NULL);
1000 	if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
1001 		i7300_printk(KERN_ERR,
1002 			"MC: 'BRANCH 1' device not found:"
1003 			"vendor 0x%x device 0x%x Func 0 "
1004 			"(broken BIOS?)\n",
1005 			PCI_VENDOR_ID_INTEL,
1006 			PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
1007 		goto error;
1008 	}
1009 
1010 	return 0;
1011 
1012 error:
1013 	i7300_put_devices(mci);
1014 	return -ENODEV;
1015 }
1016 
1017 /**
1018  * i7300_init_one() - Probe for one instance of the device
1019  * @pdev: struct pci_dev pointer
1020  * @id: struct pci_device_id pointer - currently unused
1021  */
1022 static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1023 {
1024 	struct mem_ctl_info *mci;
1025 	struct edac_mc_layer layers[3];
1026 	struct i7300_pvt *pvt;
1027 	int rc;
1028 
1029 	/* wake up device */
1030 	rc = pci_enable_device(pdev);
1031 	if (rc == -EIO)
1032 		return rc;
1033 
1034 	edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1035 		 pdev->bus->number,
1036 		 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1037 
1038 	/* We only are looking for func 0 of the set */
1039 	if (PCI_FUNC(pdev->devfn) != 0)
1040 		return -ENODEV;
1041 
1042 	/* allocate a new MC control structure */
1043 	layers[0].type = EDAC_MC_LAYER_BRANCH;
1044 	layers[0].size = MAX_BRANCHES;
1045 	layers[0].is_virt_csrow = false;
1046 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1047 	layers[1].size = MAX_CH_PER_BRANCH;
1048 	layers[1].is_virt_csrow = true;
1049 	layers[2].type = EDAC_MC_LAYER_SLOT;
1050 	layers[2].size = MAX_SLOTS;
1051 	layers[2].is_virt_csrow = true;
1052 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1053 	if (mci == NULL)
1054 		return -ENOMEM;
1055 
1056 	edac_dbg(0, "MC: mci = %p\n", mci);
1057 
1058 	mci->pdev = &pdev->dev;	/* record ptr  to the generic device */
1059 
1060 	pvt = mci->pvt_info;
1061 	pvt->pci_dev_16_0_fsb_ctlr = pdev;	/* Record this device in our private */
1062 
1063 	pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1064 	if (!pvt->tmp_prt_buffer) {
1065 		edac_mc_free(mci);
1066 		return -ENOMEM;
1067 	}
1068 
1069 	/* 'get' the pci devices we want to reserve for our use */
1070 	if (i7300_get_devices(mci))
1071 		goto fail0;
1072 
1073 	mci->mc_idx = 0;
1074 	mci->mtype_cap = MEM_FLAG_FB_DDR2;
1075 	mci->edac_ctl_cap = EDAC_FLAG_NONE;
1076 	mci->edac_cap = EDAC_FLAG_NONE;
1077 	mci->mod_name = "i7300_edac.c";
1078 	mci->ctl_name = i7300_devs[0].ctl_name;
1079 	mci->dev_name = pci_name(pdev);
1080 	mci->ctl_page_to_phys = NULL;
1081 
1082 	/* Set the function pointer to an actual operation function */
1083 	mci->edac_check = i7300_check_error;
1084 
1085 	/* initialize the MC control structure 'csrows' table
1086 	 * with the mapping and control information */
1087 	if (i7300_get_mc_regs(mci)) {
1088 		edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1089 		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
1090 	} else {
1091 		edac_dbg(1, "MC: Enable error reporting now\n");
1092 		i7300_enable_error_reporting(mci);
1093 	}
1094 
1095 	/* add this new MC control structure to EDAC's list of MCs */
1096 	if (edac_mc_add_mc(mci)) {
1097 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1098 		/* FIXME: perhaps some code should go here that disables error
1099 		 * reporting if we just enabled it
1100 		 */
1101 		goto fail1;
1102 	}
1103 
1104 	i7300_clear_error(mci);
1105 
1106 	/* allocating generic PCI control info */
1107 	i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1108 	if (!i7300_pci) {
1109 		printk(KERN_WARNING
1110 			"%s(): Unable to create PCI control\n",
1111 			__func__);
1112 		printk(KERN_WARNING
1113 			"%s(): PCI error report via EDAC not setup\n",
1114 			__func__);
1115 	}
1116 
1117 	return 0;
1118 
1119 	/* Error exit unwinding stack */
1120 fail1:
1121 
1122 	i7300_put_devices(mci);
1123 
1124 fail0:
1125 	kfree(pvt->tmp_prt_buffer);
1126 	edac_mc_free(mci);
1127 	return -ENODEV;
1128 }
1129 
1130 /**
1131  * i7300_remove_one() - Remove the driver
1132  * @pdev: struct pci_dev pointer
1133  */
1134 static void i7300_remove_one(struct pci_dev *pdev)
1135 {
1136 	struct mem_ctl_info *mci;
1137 	char *tmp;
1138 
1139 	edac_dbg(0, "\n");
1140 
1141 	if (i7300_pci)
1142 		edac_pci_release_generic_ctl(i7300_pci);
1143 
1144 	mci = edac_mc_del_mc(&pdev->dev);
1145 	if (!mci)
1146 		return;
1147 
1148 	tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1149 
1150 	/* retrieve references to resources, and free those resources */
1151 	i7300_put_devices(mci);
1152 
1153 	kfree(tmp);
1154 	edac_mc_free(mci);
1155 }
1156 
1157 /*
1158  * pci_device_id: table for which devices we are looking for
1159  *
1160  * Has only 8086:360c PCI ID
1161  */
1162 static const struct pci_device_id i7300_pci_tbl[] = {
1163 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1164 	{0,}			/* 0 terminated list. */
1165 };
1166 
1167 MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1168 
1169 /*
1170  * i7300_driver: pci_driver structure for this module
1171  */
1172 static struct pci_driver i7300_driver = {
1173 	.name = "i7300_edac",
1174 	.probe = i7300_init_one,
1175 	.remove = i7300_remove_one,
1176 	.id_table = i7300_pci_tbl,
1177 };
1178 
1179 /**
1180  * i7300_init() - Registers the driver
1181  */
1182 static int __init i7300_init(void)
1183 {
1184 	int pci_rc;
1185 
1186 	edac_dbg(2, "\n");
1187 
1188 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1189 	opstate_init();
1190 
1191 	pci_rc = pci_register_driver(&i7300_driver);
1192 
1193 	return (pci_rc < 0) ? pci_rc : 0;
1194 }
1195 
1196 /**
1197  * i7300_init() - Unregisters the driver
1198  */
1199 static void __exit i7300_exit(void)
1200 {
1201 	edac_dbg(2, "\n");
1202 	pci_unregister_driver(&i7300_driver);
1203 }
1204 
1205 module_init(i7300_init);
1206 module_exit(i7300_exit);
1207 
1208 MODULE_LICENSE("GPL");
1209 MODULE_AUTHOR("Mauro Carvalho Chehab");
1210 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1211 MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1212 		   I7300_REVISION);
1213 
1214 module_param(edac_op_state, int, 0444);
1215 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1216