xref: /linux/drivers/edac/i7core_edac.c (revision 7cbea96f6660786158ed7dc81fe40273d860355b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Intel i7 core/Nehalem Memory Controller kernel module
3  *
4  * This driver supports the memory controllers found on the Intel
5  * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
6  * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
7  * and Westmere-EP.
8  *
9  * Copyright (c) 2009-2010 by:
10  *	 Mauro Carvalho Chehab
11  *
12  * Red Hat Inc. https://www.redhat.com
13  *
14  * Forked and adapted from the i5400_edac driver
15  *
16  * Based on the following public Intel datasheets:
17  * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
18  * Datasheet, Volume 2:
19  *	http://download.intel.com/design/processor/datashts/320835.pdf
20  * Intel Xeon Processor 5500 Series Datasheet Volume 2
21  *	http://www.intel.com/Assets/PDF/datasheet/321322.pdf
22  * also available at:
23  * 	http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
24  */
25 
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/pci.h>
29 #include <linux/pci_ids.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/dmi.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <asm/mce.h>
37 #include <asm/processor.h>
38 #include <asm/div64.h>
39 
40 #include "edac_module.h"
41 
42 /* Static vars */
43 static LIST_HEAD(i7core_edac_list);
44 static DEFINE_MUTEX(i7core_edac_lock);
45 static int probed;
46 
47 static int use_pci_fixup;
48 module_param(use_pci_fixup, int, 0444);
49 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
50 /*
51  * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
52  * registers start at bus 255, and are not reported by BIOS.
53  * We currently find devices with only 2 sockets. In order to support more QPI
54  * Quick Path Interconnect, just increment this number.
55  */
56 #define MAX_SOCKET_BUSES	2
57 
58 
59 /*
60  * Alter this version for the module when modifications are made
61  */
62 #define I7CORE_REVISION    " Ver: 1.0.0"
63 #define EDAC_MOD_STR      "i7core_edac"
64 
65 /*
66  * Debug macros
67  */
68 #define i7core_printk(level, fmt, arg...)			\
69 	edac_printk(level, "i7core", fmt, ##arg)
70 
71 #define i7core_mc_printk(mci, level, fmt, arg...)		\
72 	edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
73 
74 /*
75  * i7core Memory Controller Registers
76  */
77 
78 	/* OFFSETS for Device 0 Function 0 */
79 
80 #define MC_CFG_CONTROL	0x90
81   #define MC_CFG_UNLOCK		0x02
82   #define MC_CFG_LOCK		0x00
83 
84 	/* OFFSETS for Device 3 Function 0 */
85 
86 #define MC_CONTROL	0x48
87 #define MC_STATUS	0x4c
88 #define MC_MAX_DOD	0x64
89 
90 /*
91  * OFFSETS for Device 3 Function 4, as indicated on Xeon 5500 datasheet:
92  * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
93  */
94 
95 #define MC_TEST_ERR_RCV1	0x60
96   #define DIMM2_COR_ERR(r)			((r) & 0x7fff)
97 
98 #define MC_TEST_ERR_RCV0	0x64
99   #define DIMM1_COR_ERR(r)			(((r) >> 16) & 0x7fff)
100   #define DIMM0_COR_ERR(r)			((r) & 0x7fff)
101 
102 /* OFFSETS for Device 3 Function 2, as indicated on Xeon 5500 datasheet */
103 #define MC_SSRCONTROL		0x48
104   #define SSR_MODE_DISABLE	0x00
105   #define SSR_MODE_ENABLE	0x01
106   #define SSR_MODE_MASK		0x03
107 
108 #define MC_SCRUB_CONTROL	0x4c
109   #define STARTSCRUB		(1 << 24)
110   #define SCRUBINTERVAL_MASK    0xffffff
111 
112 #define MC_COR_ECC_CNT_0	0x80
113 #define MC_COR_ECC_CNT_1	0x84
114 #define MC_COR_ECC_CNT_2	0x88
115 #define MC_COR_ECC_CNT_3	0x8c
116 #define MC_COR_ECC_CNT_4	0x90
117 #define MC_COR_ECC_CNT_5	0x94
118 
119 #define DIMM_TOP_COR_ERR(r)			(((r) >> 16) & 0x7fff)
120 #define DIMM_BOT_COR_ERR(r)			((r) & 0x7fff)
121 
122 
123 	/* OFFSETS for Devices 4,5 and 6 Function 0 */
124 
125 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
126   #define THREE_DIMMS_PRESENT		(1 << 24)
127   #define SINGLE_QUAD_RANK_PRESENT	(1 << 23)
128   #define QUAD_RANK_PRESENT		(1 << 22)
129   #define REGISTERED_DIMM		(1 << 15)
130 
131 #define MC_CHANNEL_MAPPER	0x60
132   #define RDLCH(r, ch)		((((r) >> (3 + (ch * 6))) & 0x07) - 1)
133   #define WRLCH(r, ch)		((((r) >> (ch * 6)) & 0x07) - 1)
134 
135 #define MC_CHANNEL_RANK_PRESENT 0x7c
136   #define RANK_PRESENT_MASK		0xffff
137 
138 #define MC_CHANNEL_ADDR_MATCH	0xf0
139 #define MC_CHANNEL_ERROR_MASK	0xf8
140 #define MC_CHANNEL_ERROR_INJECT	0xfc
141   #define INJECT_ADDR_PARITY	0x10
142   #define INJECT_ECC		0x08
143   #define MASK_CACHELINE	0x06
144   #define MASK_FULL_CACHELINE	0x06
145   #define MASK_MSB32_CACHELINE	0x04
146   #define MASK_LSB32_CACHELINE	0x02
147   #define NO_MASK_CACHELINE	0x00
148   #define REPEAT_EN		0x01
149 
150 	/* OFFSETS for Devices 4,5 and 6 Function 1 */
151 
152 #define MC_DOD_CH_DIMM0		0x48
153 #define MC_DOD_CH_DIMM1		0x4c
154 #define MC_DOD_CH_DIMM2		0x50
155   #define RANKOFFSET_MASK	((1 << 12) | (1 << 11) | (1 << 10))
156   #define RANKOFFSET(x)		((x & RANKOFFSET_MASK) >> 10)
157   #define DIMM_PRESENT_MASK	(1 << 9)
158   #define DIMM_PRESENT(x)	(((x) & DIMM_PRESENT_MASK) >> 9)
159   #define MC_DOD_NUMBANK_MASK		((1 << 8) | (1 << 7))
160   #define MC_DOD_NUMBANK(x)		(((x) & MC_DOD_NUMBANK_MASK) >> 7)
161   #define MC_DOD_NUMRANK_MASK		((1 << 6) | (1 << 5))
162   #define MC_DOD_NUMRANK(x)		(((x) & MC_DOD_NUMRANK_MASK) >> 5)
163   #define MC_DOD_NUMROW_MASK		((1 << 4) | (1 << 3) | (1 << 2))
164   #define MC_DOD_NUMROW(x)		(((x) & MC_DOD_NUMROW_MASK) >> 2)
165   #define MC_DOD_NUMCOL_MASK		3
166   #define MC_DOD_NUMCOL(x)		((x) & MC_DOD_NUMCOL_MASK)
167 
168 #define MC_RANK_PRESENT		0x7c
169 
170 #define MC_SAG_CH_0	0x80
171 #define MC_SAG_CH_1	0x84
172 #define MC_SAG_CH_2	0x88
173 #define MC_SAG_CH_3	0x8c
174 #define MC_SAG_CH_4	0x90
175 #define MC_SAG_CH_5	0x94
176 #define MC_SAG_CH_6	0x98
177 #define MC_SAG_CH_7	0x9c
178 
179 #define MC_RIR_LIMIT_CH_0	0x40
180 #define MC_RIR_LIMIT_CH_1	0x44
181 #define MC_RIR_LIMIT_CH_2	0x48
182 #define MC_RIR_LIMIT_CH_3	0x4C
183 #define MC_RIR_LIMIT_CH_4	0x50
184 #define MC_RIR_LIMIT_CH_5	0x54
185 #define MC_RIR_LIMIT_CH_6	0x58
186 #define MC_RIR_LIMIT_CH_7	0x5C
187 #define MC_RIR_LIMIT_MASK	((1 << 10) - 1)
188 
189 #define MC_RIR_WAY_CH		0x80
190   #define MC_RIR_WAY_OFFSET_MASK	(((1 << 14) - 1) & ~0x7)
191   #define MC_RIR_WAY_RANK_MASK		0x7
192 
193 /*
194  * i7core structs
195  */
196 
197 #define NUM_CHANS 3
198 #define MAX_DIMMS 3		/* Max DIMMS per channel */
199 #define MAX_MCR_FUNC  4
200 #define MAX_CHAN_FUNC 3
201 
202 struct i7core_info {
203 	u32	mc_control;
204 	u32	mc_status;
205 	u32	max_dod;
206 	u32	ch_map;
207 };
208 
209 
210 struct i7core_inject {
211 	int	enable;
212 
213 	u32	section;
214 	u32	type;
215 	u32	eccmask;
216 
217 	/* Error address mask */
218 	int channel, dimm, rank, bank, page, col;
219 };
220 
221 struct i7core_channel {
222 	bool		is_3dimms_present;
223 	bool		is_single_4rank;
224 	bool		has_4rank;
225 	u32		dimms;
226 };
227 
228 struct pci_id_descr {
229 	int			dev;
230 	int			func;
231 	int 			dev_id;
232 	int			optional;
233 };
234 
235 struct pci_id_table {
236 	const struct pci_id_descr	*descr;
237 	int				n_devs;
238 };
239 
240 struct i7core_dev {
241 	struct list_head	list;
242 	u8			socket;
243 	struct mem_ctl_info	*mci;
244 	int			n_devs;
245 	struct pci_dev		*pdev[] __counted_by(n_devs);
246 };
247 
248 struct i7core_pvt {
249 	struct device *addrmatch_dev, *chancounts_dev;
250 
251 	struct pci_dev	*pci_noncore;
252 	struct pci_dev	*pci_mcr[MAX_MCR_FUNC + 1];
253 	struct pci_dev	*pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
254 
255 	struct i7core_dev *i7core_dev;
256 
257 	struct i7core_info	info;
258 	struct i7core_inject	inject;
259 	struct i7core_channel	channel[NUM_CHANS];
260 
261 	int		ce_count_available;
262 
263 			/* ECC corrected errors counts per udimm */
264 	unsigned long	udimm_ce_count[MAX_DIMMS];
265 	int		udimm_last_ce_count[MAX_DIMMS];
266 			/* ECC corrected errors counts per rdimm */
267 	unsigned long	rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
268 	int		rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
269 
270 	bool		is_registered, enable_scrub;
271 
272 	/* DCLK Frequency used for computing scrub rate */
273 	int			dclk_freq;
274 
275 	/* Struct to control EDAC polling */
276 	struct edac_pci_ctl_info *i7core_pci;
277 };
278 
279 #define PCI_DESCR(device, function, device_id)	\
280 	.dev = (device),			\
281 	.func = (function),			\
282 	.dev_id = (device_id)
283 
284 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
285 		/* Memory controller */
286 	{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR)     },
287 	{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD)  },
288 			/* Exists only for RDIMM */
289 	{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1  },
290 	{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
291 
292 		/* Channel 0 */
293 	{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
294 	{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
295 	{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
296 	{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC)   },
297 
298 		/* Channel 1 */
299 	{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
300 	{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
301 	{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
302 	{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC)   },
303 
304 		/* Channel 2 */
305 	{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
306 	{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
307 	{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
308 	{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC)   },
309 
310 		/* Generic Non-core registers */
311 	/*
312 	 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
313 	 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
314 	 * the probing code needs to test for the other address in case of
315 	 * failure of this one
316 	 */
317 	{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE)  },
318 
319 };
320 
321 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
322 	{ PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR)         },
323 	{ PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD)      },
324 	{ PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST)     },
325 
326 	{ PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
327 	{ PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
328 	{ PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
329 	{ PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC)   },
330 
331 	{ PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
332 	{ PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
333 	{ PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
334 	{ PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC)   },
335 
336 	/*
337 	 * This is the PCI device has an alternate address on some
338 	 * processors like Core i7 860
339 	 */
340 	{ PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE)     },
341 };
342 
343 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
344 		/* Memory controller */
345 	{ PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2)     },
346 	{ PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2)  },
347 			/* Exists only for RDIMM */
348 	{ PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1  },
349 	{ PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
350 
351 		/* Channel 0 */
352 	{ PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
353 	{ PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
354 	{ PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
355 	{ PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2)   },
356 
357 		/* Channel 1 */
358 	{ PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
359 	{ PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
360 	{ PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
361 	{ PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2)   },
362 
363 		/* Channel 2 */
364 	{ PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
365 	{ PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
366 	{ PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
367 	{ PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2)   },
368 
369 		/* Generic Non-core registers */
370 	{ PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2)  },
371 
372 };
373 
374 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
375 static const struct pci_id_table pci_dev_table[] = {
376 	PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
377 	PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
378 	PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
379 	{ NULL, }
380 };
381 
382 /*
383  *	pci_device_id	table for which devices we are looking for
384  */
385 static const struct pci_device_id i7core_pci_tbl[] = {
386 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
387 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
388 	{ 0, }
389 };
390 
391 /****************************************************************************
392 			Ancillary status routines
393  ****************************************************************************/
394 
395 	/* MC_CONTROL bits */
396 #define CH_ACTIVE(pvt, ch)	((pvt)->info.mc_control & (1 << (8 + ch)))
397 #define ECCx8(pvt)		((pvt)->info.mc_control & (1 << 1))
398 
399 	/* MC_STATUS bits */
400 #define ECC_ENABLED(pvt)	((pvt)->info.mc_status & (1 << 4))
401 #define CH_DISABLED(pvt, ch)	((pvt)->info.mc_status & (1 << ch))
402 
403 	/* MC_MAX_DOD read functions */
404 static inline int numdimms(u32 dimms)
405 {
406 	return (dimms & 0x3) + 1;
407 }
408 
409 static inline int numrank(u32 rank)
410 {
411 	static const int ranks[] = { 1, 2, 4, -EINVAL };
412 
413 	return ranks[rank & 0x3];
414 }
415 
416 static inline int numbank(u32 bank)
417 {
418 	static const int banks[] = { 4, 8, 16, -EINVAL };
419 
420 	return banks[bank & 0x3];
421 }
422 
423 static inline int numrow(u32 row)
424 {
425 	static const int rows[] = {
426 		1 << 12, 1 << 13, 1 << 14, 1 << 15,
427 		1 << 16, -EINVAL, -EINVAL, -EINVAL,
428 	};
429 
430 	return rows[row & 0x7];
431 }
432 
433 static inline int numcol(u32 col)
434 {
435 	static const int cols[] = {
436 		1 << 10, 1 << 11, 1 << 12, -EINVAL,
437 	};
438 	return cols[col & 0x3];
439 }
440 
441 static struct i7core_dev *get_i7core_dev(u8 socket)
442 {
443 	struct i7core_dev *i7core_dev;
444 
445 	list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
446 		if (i7core_dev->socket == socket)
447 			return i7core_dev;
448 	}
449 
450 	return NULL;
451 }
452 
453 static struct i7core_dev *alloc_i7core_dev(u8 socket,
454 					   const struct pci_id_table *table)
455 {
456 	struct i7core_dev *i7core_dev;
457 
458 	i7core_dev = kzalloc_flex(*i7core_dev, pdev, table->n_devs);
459 	if (!i7core_dev)
460 		return NULL;
461 
462 	i7core_dev->n_devs = table->n_devs;
463 	i7core_dev->socket = socket;
464 	list_add_tail(&i7core_dev->list, &i7core_edac_list);
465 
466 	return i7core_dev;
467 }
468 
469 static void free_i7core_dev(struct i7core_dev *i7core_dev)
470 {
471 	list_del(&i7core_dev->list);
472 	kfree(i7core_dev);
473 }
474 
475 /****************************************************************************
476 			Memory check routines
477  ****************************************************************************/
478 
479 static int get_dimm_config(struct mem_ctl_info *mci)
480 {
481 	struct i7core_pvt *pvt = mci->pvt_info;
482 	struct pci_dev *pdev;
483 	int i, j;
484 	enum edac_type mode;
485 	enum mem_type mtype;
486 	struct dimm_info *dimm;
487 
488 	/* Get data from the MC register, function 0 */
489 	pdev = pvt->pci_mcr[0];
490 	if (!pdev)
491 		return -ENODEV;
492 
493 	/* Device 3 function 0 reads */
494 	pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
495 	pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
496 	pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
497 	pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
498 
499 	edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
500 		 pvt->i7core_dev->socket, pvt->info.mc_control,
501 		 pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
502 
503 	if (ECC_ENABLED(pvt)) {
504 		edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
505 		if (ECCx8(pvt))
506 			mode = EDAC_S8ECD8ED;
507 		else
508 			mode = EDAC_S4ECD4ED;
509 	} else {
510 		edac_dbg(0, "ECC disabled\n");
511 		mode = EDAC_NONE;
512 	}
513 
514 	/* FIXME: need to handle the error codes */
515 	edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
516 		 numdimms(pvt->info.max_dod),
517 		 numrank(pvt->info.max_dod >> 2),
518 		 numbank(pvt->info.max_dod >> 4),
519 		 numrow(pvt->info.max_dod >> 6),
520 		 numcol(pvt->info.max_dod >> 9));
521 
522 	for (i = 0; i < NUM_CHANS; i++) {
523 		u32 data, dimm_dod[3], value[8];
524 
525 		if (!pvt->pci_ch[i][0])
526 			continue;
527 
528 		if (!CH_ACTIVE(pvt, i)) {
529 			edac_dbg(0, "Channel %i is not active\n", i);
530 			continue;
531 		}
532 		if (CH_DISABLED(pvt, i)) {
533 			edac_dbg(0, "Channel %i is disabled\n", i);
534 			continue;
535 		}
536 
537 		/* Devices 4-6 function 0 */
538 		pci_read_config_dword(pvt->pci_ch[i][0],
539 				MC_CHANNEL_DIMM_INIT_PARAMS, &data);
540 
541 
542 		if (data & THREE_DIMMS_PRESENT)
543 			pvt->channel[i].is_3dimms_present = true;
544 
545 		if (data & SINGLE_QUAD_RANK_PRESENT)
546 			pvt->channel[i].is_single_4rank = true;
547 
548 		if (data & QUAD_RANK_PRESENT)
549 			pvt->channel[i].has_4rank = true;
550 
551 		if (data & REGISTERED_DIMM)
552 			mtype = MEM_RDDR3;
553 		else
554 			mtype = MEM_DDR3;
555 
556 		/* Devices 4-6 function 1 */
557 		pci_read_config_dword(pvt->pci_ch[i][1],
558 				MC_DOD_CH_DIMM0, &dimm_dod[0]);
559 		pci_read_config_dword(pvt->pci_ch[i][1],
560 				MC_DOD_CH_DIMM1, &dimm_dod[1]);
561 		pci_read_config_dword(pvt->pci_ch[i][1],
562 				MC_DOD_CH_DIMM2, &dimm_dod[2]);
563 
564 		edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
565 			 i,
566 			 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
567 			 data,
568 			 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
569 			 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
570 			 pvt->channel[i].has_4rank ? "HAS_4R " : "",
571 			 (data & REGISTERED_DIMM) ? 'R' : 'U');
572 
573 		for (j = 0; j < 3; j++) {
574 			u32 banks, ranks, rows, cols;
575 			u32 size, npages;
576 
577 			if (!DIMM_PRESENT(dimm_dod[j]))
578 				continue;
579 
580 			dimm = edac_get_dimm(mci, i, j, 0);
581 			banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
582 			ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
583 			rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
584 			cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
585 
586 			/* DDR3 has 8 I/O banks */
587 			size = (rows * cols * banks * ranks) >> (20 - 3);
588 
589 			edac_dbg(0, "\tdimm %d %d MiB offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
590 				 j, size,
591 				 RANKOFFSET(dimm_dod[j]),
592 				 banks, ranks, rows, cols);
593 
594 			npages = MiB_TO_PAGES(size);
595 
596 			dimm->nr_pages = npages;
597 
598 			switch (banks) {
599 			case 4:
600 				dimm->dtype = DEV_X4;
601 				break;
602 			case 8:
603 				dimm->dtype = DEV_X8;
604 				break;
605 			case 16:
606 				dimm->dtype = DEV_X16;
607 				break;
608 			default:
609 				dimm->dtype = DEV_UNKNOWN;
610 			}
611 
612 			snprintf(dimm->label, sizeof(dimm->label),
613 				 "CPU#%uChannel#%u_DIMM#%u",
614 				 pvt->i7core_dev->socket, i, j);
615 			dimm->grain = 8;
616 			dimm->edac_mode = mode;
617 			dimm->mtype = mtype;
618 		}
619 
620 		pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
621 		pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
622 		pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
623 		pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
624 		pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
625 		pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
626 		pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
627 		pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
628 		edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
629 		for (j = 0; j < 8; j++)
630 			edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
631 				 (value[j] >> 27) & 0x1,
632 				 (value[j] >> 24) & 0x7,
633 				 (value[j] & ((1 << 24) - 1)));
634 	}
635 
636 	return 0;
637 }
638 
639 /****************************************************************************
640 			Error insertion routines
641  ****************************************************************************/
642 
643 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
644 
645 /* The i7core has independent error injection features per channel.
646    However, to have a simpler code, we don't allow enabling error injection
647    on more than one channel.
648    Also, since a change at an inject parameter will be applied only at enable,
649    we're disabling error injection on all write calls to the sysfs nodes that
650    controls the error code injection.
651  */
652 static int disable_inject(const struct mem_ctl_info *mci)
653 {
654 	struct i7core_pvt *pvt = mci->pvt_info;
655 
656 	pvt->inject.enable = 0;
657 
658 	if (!pvt->pci_ch[pvt->inject.channel][0])
659 		return -ENODEV;
660 
661 	pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
662 				MC_CHANNEL_ERROR_INJECT, 0);
663 
664 	return 0;
665 }
666 
667 /*
668  * i7core inject inject.section
669  *
670  *	accept and store error injection inject.section value
671  *	bit 0 - refers to the lower 32-byte half cacheline
672  *	bit 1 - refers to the upper 32-byte half cacheline
673  */
674 static ssize_t i7core_inject_section_store(struct device *dev,
675 					   struct device_attribute *mattr,
676 					   const char *data, size_t count)
677 {
678 	struct mem_ctl_info *mci = to_mci(dev);
679 	struct i7core_pvt *pvt = mci->pvt_info;
680 	unsigned long value;
681 	int rc;
682 
683 	if (pvt->inject.enable)
684 		disable_inject(mci);
685 
686 	rc = kstrtoul(data, 10, &value);
687 	if ((rc < 0) || (value > 3))
688 		return -EIO;
689 
690 	pvt->inject.section = (u32) value;
691 	return count;
692 }
693 
694 static ssize_t i7core_inject_section_show(struct device *dev,
695 					  struct device_attribute *mattr,
696 					  char *data)
697 {
698 	struct mem_ctl_info *mci = to_mci(dev);
699 	struct i7core_pvt *pvt = mci->pvt_info;
700 	return sprintf(data, "0x%08x\n", pvt->inject.section);
701 }
702 
703 /*
704  * i7core inject.type
705  *
706  *	accept and store error injection inject.section value
707  *	bit 0 - repeat enable - Enable error repetition
708  *	bit 1 - inject ECC error
709  *	bit 2 - inject parity error
710  */
711 static ssize_t i7core_inject_type_store(struct device *dev,
712 					struct device_attribute *mattr,
713 					const char *data, size_t count)
714 {
715 	struct mem_ctl_info *mci = to_mci(dev);
716 	struct i7core_pvt *pvt = mci->pvt_info;
717 	unsigned long value;
718 	int rc;
719 
720 	if (pvt->inject.enable)
721 		disable_inject(mci);
722 
723 	rc = kstrtoul(data, 10, &value);
724 	if ((rc < 0) || (value > 7))
725 		return -EIO;
726 
727 	pvt->inject.type = (u32) value;
728 	return count;
729 }
730 
731 static ssize_t i7core_inject_type_show(struct device *dev,
732 				       struct device_attribute *mattr,
733 				       char *data)
734 {
735 	struct mem_ctl_info *mci = to_mci(dev);
736 	struct i7core_pvt *pvt = mci->pvt_info;
737 
738 	return sprintf(data, "0x%08x\n", pvt->inject.type);
739 }
740 
741 /*
742  * i7core_inject_inject.eccmask_store
743  *
744  * The type of error (UE/CE) will depend on the inject.eccmask value:
745  *   Any bits set to a 1 will flip the corresponding ECC bit
746  *   Correctable errors can be injected by flipping 1 bit or the bits within
747  *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
748  *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
749  *   uncorrectable error to be injected.
750  */
751 static ssize_t i7core_inject_eccmask_store(struct device *dev,
752 					   struct device_attribute *mattr,
753 					   const char *data, size_t count)
754 {
755 	struct mem_ctl_info *mci = to_mci(dev);
756 	struct i7core_pvt *pvt = mci->pvt_info;
757 	unsigned long value;
758 	int rc;
759 
760 	if (pvt->inject.enable)
761 		disable_inject(mci);
762 
763 	rc = kstrtoul(data, 10, &value);
764 	if (rc < 0)
765 		return -EIO;
766 
767 	pvt->inject.eccmask = (u32) value;
768 	return count;
769 }
770 
771 static ssize_t i7core_inject_eccmask_show(struct device *dev,
772 					  struct device_attribute *mattr,
773 					  char *data)
774 {
775 	struct mem_ctl_info *mci = to_mci(dev);
776 	struct i7core_pvt *pvt = mci->pvt_info;
777 
778 	return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
779 }
780 
781 /*
782  * i7core_addrmatch
783  *
784  * The type of error (UE/CE) will depend on the inject.eccmask value:
785  *   Any bits set to a 1 will flip the corresponding ECC bit
786  *   Correctable errors can be injected by flipping 1 bit or the bits within
787  *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
788  *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
789  *   uncorrectable error to be injected.
790  */
791 
792 #define DECLARE_ADDR_MATCH(param, limit)			\
793 static ssize_t i7core_inject_store_##param(			\
794 	struct device *dev,					\
795 	struct device_attribute *mattr,				\
796 	const char *data, size_t count)				\
797 {								\
798 	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
799 	struct i7core_pvt *pvt;					\
800 	long value;						\
801 	int rc;							\
802 								\
803 	edac_dbg(1, "\n");					\
804 	pvt = mci->pvt_info;					\
805 								\
806 	if (pvt->inject.enable)					\
807 		disable_inject(mci);				\
808 								\
809 	if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
810 		value = -1;					\
811 	else {							\
812 		rc = kstrtoul(data, 10, &value);		\
813 		if ((rc < 0) || (value >= limit))		\
814 			return -EIO;				\
815 	}							\
816 								\
817 	pvt->inject.param = value;				\
818 								\
819 	return count;						\
820 }								\
821 								\
822 static ssize_t i7core_inject_show_##param(			\
823 	struct device *dev,					\
824 	struct device_attribute *mattr,				\
825 	char *data)						\
826 {								\
827 	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
828 	struct i7core_pvt *pvt;					\
829 								\
830 	pvt = mci->pvt_info;					\
831 	edac_dbg(1, "pvt=%p\n", pvt);				\
832 	if (pvt->inject.param < 0)				\
833 		return sprintf(data, "any\n");			\
834 	else							\
835 		return sprintf(data, "%d\n", pvt->inject.param);\
836 }
837 
838 #define ATTR_ADDR_MATCH(param)					\
839 	static DEVICE_ATTR(param, S_IRUGO | S_IWUSR,		\
840 		    i7core_inject_show_##param,			\
841 		    i7core_inject_store_##param)
842 
843 DECLARE_ADDR_MATCH(channel, 3);
844 DECLARE_ADDR_MATCH(dimm, 3);
845 DECLARE_ADDR_MATCH(rank, 4);
846 DECLARE_ADDR_MATCH(bank, 32);
847 DECLARE_ADDR_MATCH(page, 0x10000);
848 DECLARE_ADDR_MATCH(col, 0x4000);
849 
850 ATTR_ADDR_MATCH(channel);
851 ATTR_ADDR_MATCH(dimm);
852 ATTR_ADDR_MATCH(rank);
853 ATTR_ADDR_MATCH(bank);
854 ATTR_ADDR_MATCH(page);
855 ATTR_ADDR_MATCH(col);
856 
857 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
858 {
859 	u32 read;
860 	int count;
861 
862 	edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
863 		 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
864 		 where, val);
865 
866 	for (count = 0; count < 10; count++) {
867 		if (count)
868 			msleep(100);
869 		pci_write_config_dword(dev, where, val);
870 		pci_read_config_dword(dev, where, &read);
871 
872 		if (read == val)
873 			return 0;
874 	}
875 
876 	i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
877 		"write=%08x. Read=%08x\n",
878 		dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
879 		where, val, read);
880 
881 	return -EINVAL;
882 }
883 
884 /*
885  * This routine prepares the Memory Controller for error injection.
886  * The error will be injected when some process tries to write to the
887  * memory that matches the given criteria.
888  * The criteria can be set in terms of a mask where dimm, rank, bank, page
889  * and col can be specified.
890  * A -1 value for any of the mask items will make the MCU to ignore
891  * that matching criteria for error injection.
892  *
893  * It should be noticed that the error will only happen after a write operation
894  * on a memory that matches the condition. if REPEAT_EN is not enabled at
895  * inject mask, then it will produce just one error. Otherwise, it will repeat
896  * until the injectmask would be cleaned.
897  *
898  * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
899  *    is reliable enough to check if the MC is using the
900  *    three channels. However, this is not clear at the datasheet.
901  */
902 static ssize_t i7core_inject_enable_store(struct device *dev,
903 					  struct device_attribute *mattr,
904 					  const char *data, size_t count)
905 {
906 	struct mem_ctl_info *mci = to_mci(dev);
907 	struct i7core_pvt *pvt = mci->pvt_info;
908 	u32 injectmask;
909 	u64 mask = 0;
910 	int  rc;
911 	long enable;
912 
913 	if (!pvt->pci_ch[pvt->inject.channel][0])
914 		return 0;
915 
916 	rc = kstrtoul(data, 10, &enable);
917 	if ((rc < 0))
918 		return 0;
919 
920 	if (enable) {
921 		pvt->inject.enable = 1;
922 	} else {
923 		disable_inject(mci);
924 		return count;
925 	}
926 
927 	/* Sets pvt->inject.dimm mask */
928 	if (pvt->inject.dimm < 0)
929 		mask |= 1LL << 41;
930 	else {
931 		if (pvt->channel[pvt->inject.channel].dimms > 2)
932 			mask |= (pvt->inject.dimm & 0x3LL) << 35;
933 		else
934 			mask |= (pvt->inject.dimm & 0x1LL) << 36;
935 	}
936 
937 	/* Sets pvt->inject.rank mask */
938 	if (pvt->inject.rank < 0)
939 		mask |= 1LL << 40;
940 	else {
941 		if (pvt->channel[pvt->inject.channel].dimms > 2)
942 			mask |= (pvt->inject.rank & 0x1LL) << 34;
943 		else
944 			mask |= (pvt->inject.rank & 0x3LL) << 34;
945 	}
946 
947 	/* Sets pvt->inject.bank mask */
948 	if (pvt->inject.bank < 0)
949 		mask |= 1LL << 39;
950 	else
951 		mask |= (pvt->inject.bank & 0x15LL) << 30;
952 
953 	/* Sets pvt->inject.page mask */
954 	if (pvt->inject.page < 0)
955 		mask |= 1LL << 38;
956 	else
957 		mask |= (pvt->inject.page & 0xffff) << 14;
958 
959 	/* Sets pvt->inject.column mask */
960 	if (pvt->inject.col < 0)
961 		mask |= 1LL << 37;
962 	else
963 		mask |= (pvt->inject.col & 0x3fff);
964 
965 	/*
966 	 * bit    0: REPEAT_EN
967 	 * bits 1-2: MASK_HALF_CACHELINE
968 	 * bit    3: INJECT_ECC
969 	 * bit    4: INJECT_ADDR_PARITY
970 	 */
971 
972 	injectmask = (pvt->inject.type & 1) |
973 		     (pvt->inject.section & 0x3) << 1 |
974 		     (pvt->inject.type & 0x6) << (3 - 1);
975 
976 	/* Unlock writes to registers - this register is write only */
977 	pci_write_config_dword(pvt->pci_noncore,
978 			       MC_CFG_CONTROL, 0x2);
979 
980 	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
981 			       MC_CHANNEL_ADDR_MATCH, mask);
982 	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
983 			       MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
984 
985 	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
986 			       MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
987 
988 	write_and_test(pvt->pci_ch[pvt->inject.channel][0],
989 			       MC_CHANNEL_ERROR_INJECT, injectmask);
990 
991 	/*
992 	 * This is something undocumented, based on my tests
993 	 * Without writing 8 to this register, errors aren't injected. Not sure
994 	 * why.
995 	 */
996 	pci_write_config_dword(pvt->pci_noncore,
997 			       MC_CFG_CONTROL, 8);
998 
999 	edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
1000 		 mask, pvt->inject.eccmask, injectmask);
1001 
1002 
1003 	return count;
1004 }
1005 
1006 static ssize_t i7core_inject_enable_show(struct device *dev,
1007 					 struct device_attribute *mattr,
1008 					 char *data)
1009 {
1010 	struct mem_ctl_info *mci = to_mci(dev);
1011 	struct i7core_pvt *pvt = mci->pvt_info;
1012 	u32 injectmask;
1013 
1014 	if (!pvt->pci_ch[pvt->inject.channel][0])
1015 		return 0;
1016 
1017 	pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1018 			       MC_CHANNEL_ERROR_INJECT, &injectmask);
1019 
1020 	edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
1021 
1022 	if (injectmask & 0x0c)
1023 		pvt->inject.enable = 1;
1024 
1025 	return sprintf(data, "%d\n", pvt->inject.enable);
1026 }
1027 
1028 #define DECLARE_COUNTER(param)					\
1029 static ssize_t i7core_show_counter_##param(			\
1030 	struct device *dev,					\
1031 	struct device_attribute *mattr,				\
1032 	char *data)						\
1033 {								\
1034 	struct mem_ctl_info *mci = dev_get_drvdata(dev);	\
1035 	struct i7core_pvt *pvt = mci->pvt_info;			\
1036 								\
1037 	edac_dbg(1, "\n");					\
1038 	if (!pvt->ce_count_available || (pvt->is_registered))	\
1039 		return sprintf(data, "data unavailable\n");	\
1040 	return sprintf(data, "%lu\n",				\
1041 			pvt->udimm_ce_count[param]);		\
1042 }
1043 
1044 #define ATTR_COUNTER(param)					\
1045 	static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR,	\
1046 		    i7core_show_counter_##param,		\
1047 		    NULL)
1048 
1049 DECLARE_COUNTER(0);
1050 DECLARE_COUNTER(1);
1051 DECLARE_COUNTER(2);
1052 
1053 ATTR_COUNTER(0);
1054 ATTR_COUNTER(1);
1055 ATTR_COUNTER(2);
1056 
1057 /*
1058  * inject_addrmatch device sysfs struct
1059  */
1060 
1061 static struct attribute *i7core_addrmatch_attrs[] = {
1062 	&dev_attr_channel.attr,
1063 	&dev_attr_dimm.attr,
1064 	&dev_attr_rank.attr,
1065 	&dev_attr_bank.attr,
1066 	&dev_attr_page.attr,
1067 	&dev_attr_col.attr,
1068 	NULL
1069 };
1070 
1071 static const struct attribute_group addrmatch_grp = {
1072 	.attrs	= i7core_addrmatch_attrs,
1073 };
1074 
1075 static const struct attribute_group *addrmatch_groups[] = {
1076 	&addrmatch_grp,
1077 	NULL
1078 };
1079 
1080 static void addrmatch_release(struct device *device)
1081 {
1082 	edac_dbg(1, "Releasing device %s\n", dev_name(device));
1083 	kfree(device);
1084 }
1085 
1086 static const struct device_type addrmatch_type = {
1087 	.groups		= addrmatch_groups,
1088 	.release	= addrmatch_release,
1089 };
1090 
1091 /*
1092  * all_channel_counts sysfs struct
1093  */
1094 
1095 static struct attribute *i7core_udimm_counters_attrs[] = {
1096 	&dev_attr_udimm0.attr,
1097 	&dev_attr_udimm1.attr,
1098 	&dev_attr_udimm2.attr,
1099 	NULL
1100 };
1101 
1102 static const struct attribute_group all_channel_counts_grp = {
1103 	.attrs	= i7core_udimm_counters_attrs,
1104 };
1105 
1106 static const struct attribute_group *all_channel_counts_groups[] = {
1107 	&all_channel_counts_grp,
1108 	NULL
1109 };
1110 
1111 static void all_channel_counts_release(struct device *device)
1112 {
1113 	edac_dbg(1, "Releasing device %s\n", dev_name(device));
1114 	kfree(device);
1115 }
1116 
1117 static const struct device_type all_channel_counts_type = {
1118 	.groups		= all_channel_counts_groups,
1119 	.release	= all_channel_counts_release,
1120 };
1121 
1122 /*
1123  * inject sysfs attributes
1124  */
1125 
1126 static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
1127 		   i7core_inject_section_show, i7core_inject_section_store);
1128 
1129 static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR,
1130 		   i7core_inject_type_show, i7core_inject_type_store);
1131 
1132 
1133 static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR,
1134 		   i7core_inject_eccmask_show, i7core_inject_eccmask_store);
1135 
1136 static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR,
1137 		   i7core_inject_enable_show, i7core_inject_enable_store);
1138 
1139 static struct attribute *i7core_dev_attrs[] = {
1140 	&dev_attr_inject_section.attr,
1141 	&dev_attr_inject_type.attr,
1142 	&dev_attr_inject_eccmask.attr,
1143 	&dev_attr_inject_enable.attr,
1144 	NULL
1145 };
1146 
1147 ATTRIBUTE_GROUPS(i7core_dev);
1148 
1149 static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
1150 {
1151 	struct i7core_pvt *pvt = mci->pvt_info;
1152 	int rc;
1153 
1154 	pvt->addrmatch_dev = kzalloc_obj(*pvt->addrmatch_dev);
1155 	if (!pvt->addrmatch_dev)
1156 		return -ENOMEM;
1157 
1158 	pvt->addrmatch_dev->type = &addrmatch_type;
1159 	pvt->addrmatch_dev->bus = mci->dev.bus;
1160 	device_initialize(pvt->addrmatch_dev);
1161 	pvt->addrmatch_dev->parent = &mci->dev;
1162 	dev_set_name(pvt->addrmatch_dev, "inject_addrmatch");
1163 	dev_set_drvdata(pvt->addrmatch_dev, mci);
1164 
1165 	edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev));
1166 
1167 	rc = device_add(pvt->addrmatch_dev);
1168 	if (rc < 0)
1169 		goto err_put_addrmatch;
1170 
1171 	if (!pvt->is_registered) {
1172 		pvt->chancounts_dev = kzalloc_obj(*pvt->chancounts_dev);
1173 		if (!pvt->chancounts_dev) {
1174 			rc = -ENOMEM;
1175 			goto err_del_addrmatch;
1176 		}
1177 
1178 		pvt->chancounts_dev->type = &all_channel_counts_type;
1179 		pvt->chancounts_dev->bus = mci->dev.bus;
1180 		device_initialize(pvt->chancounts_dev);
1181 		pvt->chancounts_dev->parent = &mci->dev;
1182 		dev_set_name(pvt->chancounts_dev, "all_channel_counts");
1183 		dev_set_drvdata(pvt->chancounts_dev, mci);
1184 
1185 		edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev));
1186 
1187 		rc = device_add(pvt->chancounts_dev);
1188 		if (rc < 0)
1189 			goto err_put_chancounts;
1190 	}
1191 	return 0;
1192 
1193 err_put_chancounts:
1194 	put_device(pvt->chancounts_dev);
1195 err_del_addrmatch:
1196 	device_del(pvt->addrmatch_dev);
1197 err_put_addrmatch:
1198 	put_device(pvt->addrmatch_dev);
1199 
1200 	return rc;
1201 }
1202 
1203 static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
1204 {
1205 	struct i7core_pvt *pvt = mci->pvt_info;
1206 
1207 	edac_dbg(1, "\n");
1208 
1209 	if (!pvt->is_registered) {
1210 		device_del(pvt->chancounts_dev);
1211 		put_device(pvt->chancounts_dev);
1212 	}
1213 	device_del(pvt->addrmatch_dev);
1214 	put_device(pvt->addrmatch_dev);
1215 }
1216 
1217 /****************************************************************************
1218 	Device initialization routines: put/get, init/exit
1219  ****************************************************************************/
1220 
1221 /*
1222  *	i7core_put_all_devices	'put' all the devices that we have
1223  *				reserved via 'get'
1224  */
1225 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1226 {
1227 	int i;
1228 
1229 	edac_dbg(0, "\n");
1230 	for (i = 0; i < i7core_dev->n_devs; i++) {
1231 		struct pci_dev *pdev = i7core_dev->pdev[i];
1232 		if (!pdev)
1233 			continue;
1234 		edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1235 			 pdev->bus->number,
1236 			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1237 		pci_dev_put(pdev);
1238 	}
1239 }
1240 
1241 static void i7core_put_all_devices(void)
1242 {
1243 	struct i7core_dev *i7core_dev, *tmp;
1244 
1245 	list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1246 		i7core_put_devices(i7core_dev);
1247 		free_i7core_dev(i7core_dev);
1248 	}
1249 }
1250 
1251 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1252 {
1253 	struct pci_dev *pdev = NULL;
1254 	int i;
1255 
1256 	/*
1257 	 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
1258 	 * aren't announced by acpi. So, we need to use a legacy scan probing
1259 	 * to detect them
1260 	 */
1261 	while (table && table->descr) {
1262 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1263 		if (unlikely(!pdev)) {
1264 			for (i = 0; i < MAX_SOCKET_BUSES; i++)
1265 				pcibios_scan_specific_bus(255-i);
1266 		}
1267 		pci_dev_put(pdev);
1268 		table++;
1269 	}
1270 }
1271 
1272 static unsigned i7core_pci_lastbus(void)
1273 {
1274 	int last_bus = 0, bus;
1275 	struct pci_bus *b = NULL;
1276 
1277 	while ((b = pci_find_next_bus(b)) != NULL) {
1278 		bus = b->number;
1279 		edac_dbg(0, "Found bus %d\n", bus);
1280 		if (bus > last_bus)
1281 			last_bus = bus;
1282 	}
1283 
1284 	edac_dbg(0, "Last bus %d\n", last_bus);
1285 
1286 	return last_bus;
1287 }
1288 
1289 /*
1290  *	i7core_get_all_devices	Find and perform 'get' operation on the MCH's
1291  *			device/functions we want to reference for this driver
1292  *
1293  *			Need to 'get' device 16 func 1 and func 2
1294  */
1295 static int i7core_get_onedevice(struct pci_dev **prev,
1296 				const struct pci_id_table *table,
1297 				const unsigned devno,
1298 				const unsigned last_bus)
1299 {
1300 	struct i7core_dev *i7core_dev;
1301 	const struct pci_id_descr *dev_descr = &table->descr[devno];
1302 
1303 	struct pci_dev *pdev = NULL;
1304 	u8 bus = 0;
1305 	u8 socket = 0;
1306 
1307 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1308 			      dev_descr->dev_id, *prev);
1309 
1310 	/*
1311 	 * On Xeon 55xx, the Intel QuickPath Arch Generic Non-core regs
1312 	 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1313 	 * to probe for the alternate address in case of failure
1314 	 */
1315 	if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
1316 		pci_dev_get(*prev);	/* pci_get_device will put it */
1317 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1318 				      PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1319 	}
1320 
1321 	if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
1322 	    !pdev) {
1323 		pci_dev_get(*prev);	/* pci_get_device will put it */
1324 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1325 				      PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1326 				      *prev);
1327 	}
1328 
1329 	if (!pdev) {
1330 		if (*prev) {
1331 			*prev = pdev;
1332 			return 0;
1333 		}
1334 
1335 		if (dev_descr->optional)
1336 			return 0;
1337 
1338 		if (devno == 0)
1339 			return -ENODEV;
1340 
1341 		i7core_printk(KERN_INFO,
1342 			"Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1343 			dev_descr->dev, dev_descr->func,
1344 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1345 
1346 		/* End of list, leave */
1347 		return -ENODEV;
1348 	}
1349 	bus = pdev->bus->number;
1350 
1351 	socket = last_bus - bus;
1352 
1353 	i7core_dev = get_i7core_dev(socket);
1354 	if (!i7core_dev) {
1355 		i7core_dev = alloc_i7core_dev(socket, table);
1356 		if (!i7core_dev) {
1357 			pci_dev_put(pdev);
1358 			return -ENOMEM;
1359 		}
1360 	}
1361 
1362 	if (i7core_dev->pdev[devno]) {
1363 		i7core_printk(KERN_ERR,
1364 			"Duplicated device for "
1365 			"dev %02x:%02x.%d PCI ID %04x:%04x\n",
1366 			bus, dev_descr->dev, dev_descr->func,
1367 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1368 		pci_dev_put(pdev);
1369 		return -ENODEV;
1370 	}
1371 
1372 	i7core_dev->pdev[devno] = pdev;
1373 
1374 	/* Sanity check */
1375 	if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1376 			PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1377 		i7core_printk(KERN_ERR,
1378 			"Device PCI ID %04x:%04x "
1379 			"has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1380 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1381 			bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1382 			bus, dev_descr->dev, dev_descr->func);
1383 		return -ENODEV;
1384 	}
1385 
1386 	/* Be sure that the device is enabled */
1387 	if (unlikely(pci_enable_device(pdev) < 0)) {
1388 		i7core_printk(KERN_ERR,
1389 			"Couldn't enable "
1390 			"dev %02x:%02x.%d PCI ID %04x:%04x\n",
1391 			bus, dev_descr->dev, dev_descr->func,
1392 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1393 		return -ENODEV;
1394 	}
1395 
1396 	edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1397 		 socket, bus, dev_descr->dev,
1398 		 dev_descr->func,
1399 		 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1400 
1401 	/*
1402 	 * As stated on drivers/pci/search.c, the reference count for
1403 	 * @from is always decremented if it is not %NULL. So, as we need
1404 	 * to get all devices up to null, we need to do a get for the device
1405 	 */
1406 	pci_dev_get(pdev);
1407 
1408 	*prev = pdev;
1409 
1410 	return 0;
1411 }
1412 
1413 static int i7core_get_all_devices(void)
1414 {
1415 	int i, rc, last_bus;
1416 	struct pci_dev *pdev = NULL;
1417 	const struct pci_id_table *table = pci_dev_table;
1418 
1419 	last_bus = i7core_pci_lastbus();
1420 
1421 	while (table && table->descr) {
1422 		for (i = 0; i < table->n_devs; i++) {
1423 			pdev = NULL;
1424 			do {
1425 				rc = i7core_get_onedevice(&pdev, table, i,
1426 							  last_bus);
1427 				if (rc < 0) {
1428 					if (i == 0) {
1429 						i = table->n_devs;
1430 						break;
1431 					}
1432 					i7core_put_all_devices();
1433 					return -ENODEV;
1434 				}
1435 			} while (pdev);
1436 		}
1437 		table++;
1438 	}
1439 
1440 	return 0;
1441 }
1442 
1443 static int mci_bind_devs(struct mem_ctl_info *mci,
1444 			 struct i7core_dev *i7core_dev)
1445 {
1446 	struct i7core_pvt *pvt = mci->pvt_info;
1447 	struct pci_dev *pdev;
1448 	int i, func, slot;
1449 	char *family;
1450 
1451 	pvt->is_registered = false;
1452 	pvt->enable_scrub  = false;
1453 	for (i = 0; i < i7core_dev->n_devs; i++) {
1454 		pdev = i7core_dev->pdev[i];
1455 		if (!pdev)
1456 			continue;
1457 
1458 		func = PCI_FUNC(pdev->devfn);
1459 		slot = PCI_SLOT(pdev->devfn);
1460 		if (slot == 3) {
1461 			if (unlikely(func > MAX_MCR_FUNC))
1462 				goto error;
1463 			pvt->pci_mcr[func] = pdev;
1464 		} else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1465 			if (unlikely(func > MAX_CHAN_FUNC))
1466 				goto error;
1467 			pvt->pci_ch[slot - 4][func] = pdev;
1468 		} else if (!slot && !func) {
1469 			pvt->pci_noncore = pdev;
1470 
1471 			/* Detect the processor family */
1472 			switch (pdev->device) {
1473 			case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1474 				family = "Xeon 35xx/ i7core";
1475 				pvt->enable_scrub = false;
1476 				break;
1477 			case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1478 				family = "i7-800/i5-700";
1479 				pvt->enable_scrub = false;
1480 				break;
1481 			case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1482 				family = "Xeon 34xx";
1483 				pvt->enable_scrub = false;
1484 				break;
1485 			case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1486 				family = "Xeon 55xx";
1487 				pvt->enable_scrub = true;
1488 				break;
1489 			case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1490 				family = "Xeon 56xx / i7-900";
1491 				pvt->enable_scrub = true;
1492 				break;
1493 			default:
1494 				family = "unknown";
1495 				pvt->enable_scrub = false;
1496 			}
1497 			edac_dbg(0, "Detected a processor type %s\n", family);
1498 		} else
1499 			goto error;
1500 
1501 		edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n",
1502 			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1503 			 pdev, i7core_dev->socket);
1504 
1505 		if (PCI_SLOT(pdev->devfn) == 3 &&
1506 			PCI_FUNC(pdev->devfn) == 2)
1507 			pvt->is_registered = true;
1508 	}
1509 
1510 	return 0;
1511 
1512 error:
1513 	i7core_printk(KERN_ERR, "Device %d, function %d "
1514 		      "is out of the expected range\n",
1515 		      slot, func);
1516 	return -EINVAL;
1517 }
1518 
1519 /****************************************************************************
1520 			Error check routines
1521  ****************************************************************************/
1522 
1523 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1524 					 const int chan,
1525 					 const int new0,
1526 					 const int new1,
1527 					 const int new2)
1528 {
1529 	struct i7core_pvt *pvt = mci->pvt_info;
1530 	int add0 = 0, add1 = 0, add2 = 0;
1531 	/* Updates CE counters if it is not the first time here */
1532 	if (pvt->ce_count_available) {
1533 		/* Updates CE counters */
1534 
1535 		add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1536 		add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1537 		add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1538 
1539 		if (add2 < 0)
1540 			add2 += 0x7fff;
1541 		pvt->rdimm_ce_count[chan][2] += add2;
1542 
1543 		if (add1 < 0)
1544 			add1 += 0x7fff;
1545 		pvt->rdimm_ce_count[chan][1] += add1;
1546 
1547 		if (add0 < 0)
1548 			add0 += 0x7fff;
1549 		pvt->rdimm_ce_count[chan][0] += add0;
1550 	} else
1551 		pvt->ce_count_available = 1;
1552 
1553 	/* Store the new values */
1554 	pvt->rdimm_last_ce_count[chan][2] = new2;
1555 	pvt->rdimm_last_ce_count[chan][1] = new1;
1556 	pvt->rdimm_last_ce_count[chan][0] = new0;
1557 
1558 	/*updated the edac core */
1559 	if (add0 != 0)
1560 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0,
1561 				     0, 0, 0,
1562 				     chan, 0, -1, "error", "");
1563 	if (add1 != 0)
1564 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1,
1565 				     0, 0, 0,
1566 				     chan, 1, -1, "error", "");
1567 	if (add2 != 0)
1568 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2,
1569 				     0, 0, 0,
1570 				     chan, 2, -1, "error", "");
1571 }
1572 
1573 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1574 {
1575 	struct i7core_pvt *pvt = mci->pvt_info;
1576 	u32 rcv[3][2];
1577 	int i, new0, new1, new2;
1578 
1579 	/*Read DEV 3: FUN 2:  MC_COR_ECC_CNT regs directly*/
1580 	pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1581 								&rcv[0][0]);
1582 	pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1583 								&rcv[0][1]);
1584 	pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1585 								&rcv[1][0]);
1586 	pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1587 								&rcv[1][1]);
1588 	pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1589 								&rcv[2][0]);
1590 	pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1591 								&rcv[2][1]);
1592 	for (i = 0 ; i < 3; i++) {
1593 		edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1594 			 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1595 		/*if the channel has 3 dimms*/
1596 		if (pvt->channel[i].dimms > 2) {
1597 			new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1598 			new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1599 			new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1600 		} else {
1601 			new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1602 					DIMM_BOT_COR_ERR(rcv[i][0]);
1603 			new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1604 					DIMM_BOT_COR_ERR(rcv[i][1]);
1605 			new2 = 0;
1606 		}
1607 
1608 		i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1609 	}
1610 }
1611 
1612 /* This function is based on the device 3 function 4 registers as described on:
1613  * Intel Xeon Processor 5500 Series Datasheet Volume 2
1614  *	http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1615  * also available at:
1616  * 	http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1617  */
1618 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1619 {
1620 	struct i7core_pvt *pvt = mci->pvt_info;
1621 	u32 rcv1, rcv0;
1622 	int new0, new1, new2;
1623 
1624 	if (!pvt->pci_mcr[4]) {
1625 		edac_dbg(0, "MCR registers not found\n");
1626 		return;
1627 	}
1628 
1629 	/* Corrected test errors */
1630 	pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1631 	pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1632 
1633 	/* Store the new values */
1634 	new2 = DIMM2_COR_ERR(rcv1);
1635 	new1 = DIMM1_COR_ERR(rcv0);
1636 	new0 = DIMM0_COR_ERR(rcv0);
1637 
1638 	/* Updates CE counters if it is not the first time here */
1639 	if (pvt->ce_count_available) {
1640 		/* Updates CE counters */
1641 		int add0, add1, add2;
1642 
1643 		add2 = new2 - pvt->udimm_last_ce_count[2];
1644 		add1 = new1 - pvt->udimm_last_ce_count[1];
1645 		add0 = new0 - pvt->udimm_last_ce_count[0];
1646 
1647 		if (add2 < 0)
1648 			add2 += 0x7fff;
1649 		pvt->udimm_ce_count[2] += add2;
1650 
1651 		if (add1 < 0)
1652 			add1 += 0x7fff;
1653 		pvt->udimm_ce_count[1] += add1;
1654 
1655 		if (add0 < 0)
1656 			add0 += 0x7fff;
1657 		pvt->udimm_ce_count[0] += add0;
1658 
1659 		if (add0 | add1 | add2)
1660 			i7core_printk(KERN_ERR, "New Corrected error(s): "
1661 				      "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1662 				      add0, add1, add2);
1663 	} else
1664 		pvt->ce_count_available = 1;
1665 
1666 	/* Store the new values */
1667 	pvt->udimm_last_ce_count[2] = new2;
1668 	pvt->udimm_last_ce_count[1] = new1;
1669 	pvt->udimm_last_ce_count[0] = new0;
1670 }
1671 
1672 /*
1673  * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1674  * Architectures Software Developer’s Manual Volume 3B.
1675  * Nehalem are defined as family 0x06, model 0x1a
1676  *
1677  * The MCA registers used here are the following ones:
1678  *     struct mce field	MCA Register
1679  *     m->status	MSR_IA32_MC8_STATUS
1680  *     m->addr		MSR_IA32_MC8_ADDR
1681  *     m->misc		MSR_IA32_MC8_MISC
1682  * In the case of Nehalem, the error information is masked at .status and .misc
1683  * fields
1684  */
1685 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1686 				    const struct mce *m)
1687 {
1688 	struct i7core_pvt *pvt = mci->pvt_info;
1689 	char *optype, *err;
1690 	enum hw_event_mc_err_type tp_event;
1691 	unsigned long error = m->status & 0x1ff0000l;
1692 	bool uncorrected_error = m->mcgstatus & 1ll << 61;
1693 	bool ripv = m->mcgstatus & 1;
1694 	u32 optypenum = (m->status >> 4) & 0x07;
1695 	u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1696 	u32 dimm = (m->misc >> 16) & 0x3;
1697 	u32 channel = (m->misc >> 18) & 0x3;
1698 	u32 syndrome = m->misc >> 32;
1699 	u32 errnum = find_first_bit(&error, 32);
1700 
1701 	if (uncorrected_error) {
1702 		core_err_cnt = 1;
1703 		if (ripv)
1704 			tp_event = HW_EVENT_ERR_UNCORRECTED;
1705 		else
1706 			tp_event = HW_EVENT_ERR_FATAL;
1707 	} else {
1708 		tp_event = HW_EVENT_ERR_CORRECTED;
1709 	}
1710 
1711 	switch (optypenum) {
1712 	case 0:
1713 		optype = "generic undef request";
1714 		break;
1715 	case 1:
1716 		optype = "read error";
1717 		break;
1718 	case 2:
1719 		optype = "write error";
1720 		break;
1721 	case 3:
1722 		optype = "addr/cmd error";
1723 		break;
1724 	case 4:
1725 		optype = "scrubbing error";
1726 		break;
1727 	default:
1728 		optype = "reserved";
1729 		break;
1730 	}
1731 
1732 	switch (errnum) {
1733 	case 16:
1734 		err = "read ECC error";
1735 		break;
1736 	case 17:
1737 		err = "RAS ECC error";
1738 		break;
1739 	case 18:
1740 		err = "write parity error";
1741 		break;
1742 	case 19:
1743 		err = "redundancy loss";
1744 		break;
1745 	case 20:
1746 		err = "reserved";
1747 		break;
1748 	case 21:
1749 		err = "memory range error";
1750 		break;
1751 	case 22:
1752 		err = "RTID out of range";
1753 		break;
1754 	case 23:
1755 		err = "address parity error";
1756 		break;
1757 	case 24:
1758 		err = "byte enable parity error";
1759 		break;
1760 	default:
1761 		err = "unknown";
1762 	}
1763 
1764 	/*
1765 	 * Call the helper to output message
1766 	 * FIXME: what to do if core_err_cnt > 1? Currently, it generates
1767 	 * only one event
1768 	 */
1769 	if (uncorrected_error || !pvt->is_registered)
1770 		edac_mc_handle_error(tp_event, mci, core_err_cnt,
1771 				     m->addr >> PAGE_SHIFT,
1772 				     m->addr & ~PAGE_MASK,
1773 				     syndrome,
1774 				     channel, dimm, -1,
1775 				     err, optype);
1776 }
1777 
1778 /*
1779  *	i7core_check_error	Retrieve and process errors reported by the
1780  *				hardware. Called by the Core module.
1781  */
1782 static void i7core_check_error(struct mem_ctl_info *mci, struct mce *m)
1783 {
1784 	struct i7core_pvt *pvt = mci->pvt_info;
1785 
1786 	i7core_mce_output_error(mci, m);
1787 
1788 	/*
1789 	 * Now, let's increment CE error counts
1790 	 */
1791 	if (!pvt->is_registered)
1792 		i7core_udimm_check_mc_ecc_err(mci);
1793 	else
1794 		i7core_rdimm_check_mc_ecc_err(mci);
1795 }
1796 
1797 /*
1798  * Check that logging is enabled and that this is the right type
1799  * of error for us to handle.
1800  */
1801 static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1802 				  void *data)
1803 {
1804 	struct mce *mce = (struct mce *)data;
1805 	struct i7core_dev *i7_dev;
1806 	struct mem_ctl_info *mci;
1807 
1808 	i7_dev = get_i7core_dev(mce->socketid);
1809 	if (!i7_dev || (mce->kflags & MCE_HANDLED_CEC))
1810 		return NOTIFY_DONE;
1811 
1812 	mci = i7_dev->mci;
1813 
1814 	/*
1815 	 * Just let mcelog handle it if the error is
1816 	 * outside the memory controller
1817 	 */
1818 	if (((mce->status & 0xffff) >> 7) != 1)
1819 		return NOTIFY_DONE;
1820 
1821 	/* Bank 8 registers are the only ones that we know how to handle */
1822 	if (mce->bank != 8)
1823 		return NOTIFY_DONE;
1824 
1825 	i7core_check_error(mci, mce);
1826 
1827 	/* Advise mcelog that the errors were handled */
1828 	mce->kflags |= MCE_HANDLED_EDAC;
1829 	return NOTIFY_OK;
1830 }
1831 
1832 static struct notifier_block i7_mce_dec = {
1833 	.notifier_call	= i7core_mce_check_error,
1834 	.priority	= MCE_PRIO_EDAC,
1835 };
1836 
1837 struct memdev_dmi_entry {
1838 	u8 type;
1839 	u8 length;
1840 	u16 handle;
1841 	u16 phys_mem_array_handle;
1842 	u16 mem_err_info_handle;
1843 	u16 total_width;
1844 	u16 data_width;
1845 	u16 size;
1846 	u8 form;
1847 	u8 device_set;
1848 	u8 device_locator;
1849 	u8 bank_locator;
1850 	u8 memory_type;
1851 	u16 type_detail;
1852 	u16 speed;
1853 	u8 manufacturer;
1854 	u8 serial_number;
1855 	u8 asset_tag;
1856 	u8 part_number;
1857 	u8 attributes;
1858 	u32 extended_size;
1859 	u16 conf_mem_clk_speed;
1860 } __attribute__((__packed__));
1861 
1862 
1863 /*
1864  * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1865  * memory devices show the same speed, and if they don't then consider
1866  * all speeds to be invalid.
1867  */
1868 static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1869 {
1870 	int *dclk_freq = _dclk_freq;
1871 	u16 dmi_mem_clk_speed;
1872 
1873 	if (*dclk_freq == -1)
1874 		return;
1875 
1876 	if (dh->type == DMI_ENTRY_MEM_DEVICE) {
1877 		struct memdev_dmi_entry *memdev_dmi_entry =
1878 			(struct memdev_dmi_entry *)dh;
1879 		unsigned long conf_mem_clk_speed_offset =
1880 			(unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
1881 			(unsigned long)&memdev_dmi_entry->type;
1882 		unsigned long speed_offset =
1883 			(unsigned long)&memdev_dmi_entry->speed -
1884 			(unsigned long)&memdev_dmi_entry->type;
1885 
1886 		/* Check that a DIMM is present */
1887 		if (memdev_dmi_entry->size == 0)
1888 			return;
1889 
1890 		/*
1891 		 * Pick the configured speed if it's available, otherwise
1892 		 * pick the DIMM speed, or we don't have a speed.
1893 		 */
1894 		if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
1895 			dmi_mem_clk_speed =
1896 				memdev_dmi_entry->conf_mem_clk_speed;
1897 		} else if (memdev_dmi_entry->length > speed_offset) {
1898 			dmi_mem_clk_speed = memdev_dmi_entry->speed;
1899 		} else {
1900 			*dclk_freq = -1;
1901 			return;
1902 		}
1903 
1904 		if (*dclk_freq == 0) {
1905 			/* First pass, speed was 0 */
1906 			if (dmi_mem_clk_speed > 0) {
1907 				/* Set speed if a valid speed is read */
1908 				*dclk_freq = dmi_mem_clk_speed;
1909 			} else {
1910 				/* Otherwise we don't have a valid speed */
1911 				*dclk_freq = -1;
1912 			}
1913 		} else if (*dclk_freq > 0 &&
1914 			   *dclk_freq != dmi_mem_clk_speed) {
1915 			/*
1916 			 * If we have a speed, check that all DIMMS are the same
1917 			 * speed, otherwise set the speed as invalid.
1918 			 */
1919 			*dclk_freq = -1;
1920 		}
1921 	}
1922 }
1923 
1924 /*
1925  * The default DCLK frequency is used as a fallback if we
1926  * fail to find anything reliable in the DMI. The value
1927  * is taken straight from the datasheet.
1928  */
1929 #define DEFAULT_DCLK_FREQ 800
1930 
1931 static int get_dclk_freq(void)
1932 {
1933 	int dclk_freq = 0;
1934 
1935 	dmi_walk(decode_dclk, (void *)&dclk_freq);
1936 
1937 	if (dclk_freq < 1)
1938 		return DEFAULT_DCLK_FREQ;
1939 
1940 	return dclk_freq;
1941 }
1942 
1943 /*
1944  * set_sdram_scrub_rate		This routine sets byte/sec bandwidth scrub rate
1945  *				to hardware according to SCRUBINTERVAL formula
1946  *				found in datasheet.
1947  */
1948 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
1949 {
1950 	struct i7core_pvt *pvt = mci->pvt_info;
1951 	struct pci_dev *pdev;
1952 	u32 dw_scrub;
1953 	u32 dw_ssr;
1954 
1955 	/* Get data from the MC register, function 2 */
1956 	pdev = pvt->pci_mcr[2];
1957 	if (!pdev)
1958 		return -ENODEV;
1959 
1960 	pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
1961 
1962 	if (new_bw == 0) {
1963 		/* Prepare to disable petrol scrub */
1964 		dw_scrub &= ~STARTSCRUB;
1965 		/* Stop the patrol scrub engine */
1966 		write_and_test(pdev, MC_SCRUB_CONTROL,
1967 			       dw_scrub & ~SCRUBINTERVAL_MASK);
1968 
1969 		/* Get current status of scrub rate and set bit to disable */
1970 		pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
1971 		dw_ssr &= ~SSR_MODE_MASK;
1972 		dw_ssr |= SSR_MODE_DISABLE;
1973 	} else {
1974 		const int cache_line_size = 64;
1975 		const u32 freq_dclk_mhz = pvt->dclk_freq;
1976 		unsigned long long scrub_interval;
1977 		/*
1978 		 * Translate the desired scrub rate to a register value and
1979 		 * program the corresponding register value.
1980 		 */
1981 		scrub_interval = (unsigned long long)freq_dclk_mhz *
1982 			cache_line_size * 1000000;
1983 		do_div(scrub_interval, new_bw);
1984 
1985 		if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
1986 			return -EINVAL;
1987 
1988 		dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
1989 
1990 		/* Start the patrol scrub engine */
1991 		pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
1992 				       STARTSCRUB | dw_scrub);
1993 
1994 		/* Get current status of scrub rate and set bit to enable */
1995 		pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
1996 		dw_ssr &= ~SSR_MODE_MASK;
1997 		dw_ssr |= SSR_MODE_ENABLE;
1998 	}
1999 	/* Disable or enable scrubbing */
2000 	pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2001 
2002 	return new_bw;
2003 }
2004 
2005 /*
2006  * get_sdram_scrub_rate		This routine convert current scrub rate value
2007  *				into byte/sec bandwidth according to
2008  *				SCRUBINTERVAL formula found in datasheet.
2009  */
2010 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2011 {
2012 	struct i7core_pvt *pvt = mci->pvt_info;
2013 	struct pci_dev *pdev;
2014 	const u32 cache_line_size = 64;
2015 	const u32 freq_dclk_mhz = pvt->dclk_freq;
2016 	unsigned long long scrub_rate;
2017 	u32 scrubval;
2018 
2019 	/* Get data from the MC register, function 2 */
2020 	pdev = pvt->pci_mcr[2];
2021 	if (!pdev)
2022 		return -ENODEV;
2023 
2024 	/* Get current scrub control data */
2025 	pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2026 
2027 	/* Mask highest 8-bits to 0 */
2028 	scrubval &=  SCRUBINTERVAL_MASK;
2029 	if (!scrubval)
2030 		return 0;
2031 
2032 	/* Calculate scrub rate value into byte/sec bandwidth */
2033 	scrub_rate =  (unsigned long long)freq_dclk_mhz *
2034 		1000000 * cache_line_size;
2035 	do_div(scrub_rate, scrubval);
2036 	return (int)scrub_rate;
2037 }
2038 
2039 static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2040 {
2041 	struct i7core_pvt *pvt = mci->pvt_info;
2042 	u32 pci_lock;
2043 
2044 	/* Unlock writes to pci registers */
2045 	pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2046 	pci_lock &= ~0x3;
2047 	pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2048 			       pci_lock | MC_CFG_UNLOCK);
2049 
2050 	mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2051 	mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2052 }
2053 
2054 static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2055 {
2056 	struct i7core_pvt *pvt = mci->pvt_info;
2057 	u32 pci_lock;
2058 
2059 	/* Lock writes to pci registers */
2060 	pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2061 	pci_lock &= ~0x3;
2062 	pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2063 			       pci_lock | MC_CFG_LOCK);
2064 }
2065 
2066 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2067 {
2068 	pvt->i7core_pci = edac_pci_create_generic_ctl(
2069 						&pvt->i7core_dev->pdev[0]->dev,
2070 						EDAC_MOD_STR);
2071 	if (unlikely(!pvt->i7core_pci))
2072 		i7core_printk(KERN_WARNING,
2073 			      "Unable to setup PCI error report via EDAC\n");
2074 }
2075 
2076 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2077 {
2078 	if (likely(pvt->i7core_pci))
2079 		edac_pci_release_generic_ctl(pvt->i7core_pci);
2080 	else
2081 		i7core_printk(KERN_ERR,
2082 				"Couldn't find mem_ctl_info for socket %d\n",
2083 				pvt->i7core_dev->socket);
2084 	pvt->i7core_pci = NULL;
2085 }
2086 
2087 static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2088 {
2089 	struct mem_ctl_info *mci = i7core_dev->mci;
2090 	struct i7core_pvt *pvt;
2091 
2092 	if (unlikely(!mci || !mci->pvt_info)) {
2093 		edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
2094 
2095 		i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2096 		return;
2097 	}
2098 
2099 	pvt = mci->pvt_info;
2100 
2101 	edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2102 
2103 	/* Disable scrubrate setting */
2104 	if (pvt->enable_scrub)
2105 		disable_sdram_scrub_setting(mci);
2106 
2107 	/* Disable EDAC polling */
2108 	i7core_pci_ctl_release(pvt);
2109 
2110 	/* Remove MC sysfs nodes */
2111 	i7core_delete_sysfs_devices(mci);
2112 	edac_mc_del_mc(mci->pdev);
2113 
2114 	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
2115 	kfree(mci->ctl_name);
2116 	edac_mc_free(mci);
2117 	i7core_dev->mci = NULL;
2118 }
2119 
2120 static int i7core_register_mci(struct i7core_dev *i7core_dev)
2121 {
2122 	struct mem_ctl_info *mci;
2123 	struct i7core_pvt *pvt;
2124 	int rc;
2125 	struct edac_mc_layer layers[2];
2126 
2127 	/* allocate a new MC control structure */
2128 
2129 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
2130 	layers[0].size = NUM_CHANS;
2131 	layers[0].is_virt_csrow = false;
2132 	layers[1].type = EDAC_MC_LAYER_SLOT;
2133 	layers[1].size = MAX_DIMMS;
2134 	layers[1].is_virt_csrow = true;
2135 	mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
2136 			    sizeof(*pvt));
2137 	if (unlikely(!mci))
2138 		return -ENOMEM;
2139 
2140 	edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
2141 
2142 	pvt = mci->pvt_info;
2143 	memset(pvt, 0, sizeof(*pvt));
2144 
2145 	/* Associates i7core_dev and mci for future usage */
2146 	pvt->i7core_dev = i7core_dev;
2147 	i7core_dev->mci = mci;
2148 
2149 	/*
2150 	 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2151 	 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2152 	 * memory channels
2153 	 */
2154 	mci->mtype_cap = MEM_FLAG_DDR3;
2155 	mci->edac_ctl_cap = EDAC_FLAG_NONE;
2156 	mci->edac_cap = EDAC_FLAG_NONE;
2157 	mci->mod_name = "i7core_edac.c";
2158 
2159 	mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket);
2160 	if (!mci->ctl_name) {
2161 		rc = -ENOMEM;
2162 		goto fail1;
2163 	}
2164 
2165 	mci->dev_name = pci_name(i7core_dev->pdev[0]);
2166 	mci->ctl_page_to_phys = NULL;
2167 
2168 	/* Store pci devices at mci for faster access */
2169 	rc = mci_bind_devs(mci, i7core_dev);
2170 	if (unlikely(rc < 0))
2171 		goto fail0;
2172 
2173 
2174 	/* Get dimm basic config */
2175 	get_dimm_config(mci);
2176 	/* record ptr to the generic device */
2177 	mci->pdev = &i7core_dev->pdev[0]->dev;
2178 
2179 	/* Enable scrubrate setting */
2180 	if (pvt->enable_scrub)
2181 		enable_sdram_scrub_setting(mci);
2182 
2183 	/* add this new MC control structure to EDAC's list of MCs */
2184 	if (unlikely(edac_mc_add_mc_with_groups(mci, i7core_dev_groups))) {
2185 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2186 		/* FIXME: perhaps some code should go here that disables error
2187 		 * reporting if we just enabled it
2188 		 */
2189 
2190 		rc = -EINVAL;
2191 		goto fail0;
2192 	}
2193 	if (i7core_create_sysfs_devices(mci)) {
2194 		edac_dbg(0, "MC: failed to create sysfs nodes\n");
2195 		edac_mc_del_mc(mci->pdev);
2196 		rc = -EINVAL;
2197 		goto fail0;
2198 	}
2199 
2200 	/* Default error mask is any memory */
2201 	pvt->inject.channel = 0;
2202 	pvt->inject.dimm = -1;
2203 	pvt->inject.rank = -1;
2204 	pvt->inject.bank = -1;
2205 	pvt->inject.page = -1;
2206 	pvt->inject.col = -1;
2207 
2208 	/* allocating generic PCI control info */
2209 	i7core_pci_ctl_create(pvt);
2210 
2211 	/* DCLK for scrub rate setting */
2212 	pvt->dclk_freq = get_dclk_freq();
2213 
2214 	return 0;
2215 
2216 fail0:
2217 	kfree(mci->ctl_name);
2218 
2219 fail1:
2220 	edac_mc_free(mci);
2221 	i7core_dev->mci = NULL;
2222 	return rc;
2223 }
2224 
2225 /*
2226  *	i7core_probe	Probe for ONE instance of device to see if it is
2227  *			present.
2228  *	return:
2229  *		0 for FOUND a device
2230  *		< 0 for error code
2231  */
2232 
2233 static int i7core_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2234 {
2235 	int rc, count = 0;
2236 	struct i7core_dev *i7core_dev;
2237 
2238 	/* get the pci devices we want to reserve for our use */
2239 	mutex_lock(&i7core_edac_lock);
2240 
2241 	/*
2242 	 * All memory controllers are allocated at the first pass.
2243 	 */
2244 	if (unlikely(probed >= 1)) {
2245 		mutex_unlock(&i7core_edac_lock);
2246 		return -ENODEV;
2247 	}
2248 	probed++;
2249 
2250 	rc = i7core_get_all_devices();
2251 	if (unlikely(rc < 0))
2252 		goto fail0;
2253 
2254 	list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2255 		count++;
2256 		rc = i7core_register_mci(i7core_dev);
2257 		if (unlikely(rc < 0))
2258 			goto fail1;
2259 	}
2260 
2261 	/*
2262 	 * Nehalem-EX uses a different memory controller. However, as the
2263 	 * memory controller is not visible on some Nehalem/Nehalem-EP, we
2264 	 * need to indirectly probe via a X58 PCI device. The same devices
2265 	 * are found on (some) Nehalem-EX. So, on those machines, the
2266 	 * probe routine needs to return -ENODEV, as the actual Memory
2267 	 * Controller registers won't be detected.
2268 	 */
2269 	if (!count) {
2270 		rc = -ENODEV;
2271 		goto fail1;
2272 	}
2273 
2274 	i7core_printk(KERN_INFO,
2275 		      "Driver loaded, %d memory controller(s) found.\n",
2276 		      count);
2277 
2278 	mutex_unlock(&i7core_edac_lock);
2279 	return 0;
2280 
2281 fail1:
2282 	list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2283 		i7core_unregister_mci(i7core_dev);
2284 
2285 	i7core_put_all_devices();
2286 fail0:
2287 	mutex_unlock(&i7core_edac_lock);
2288 	return rc;
2289 }
2290 
2291 /*
2292  *	i7core_remove	destructor for one instance of device
2293  *
2294  */
2295 static void i7core_remove(struct pci_dev *pdev)
2296 {
2297 	struct i7core_dev *i7core_dev;
2298 
2299 	edac_dbg(0, "\n");
2300 
2301 	/*
2302 	 * we have a trouble here: pdev value for removal will be wrong, since
2303 	 * it will point to the X58 register used to detect that the machine
2304 	 * is a Nehalem or upper design. However, due to the way several PCI
2305 	 * devices are grouped together to provide MC functionality, we need
2306 	 * to use a different method for releasing the devices
2307 	 */
2308 
2309 	mutex_lock(&i7core_edac_lock);
2310 
2311 	if (unlikely(!probed)) {
2312 		mutex_unlock(&i7core_edac_lock);
2313 		return;
2314 	}
2315 
2316 	list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2317 		i7core_unregister_mci(i7core_dev);
2318 
2319 	/* Release PCI resources */
2320 	i7core_put_all_devices();
2321 
2322 	probed--;
2323 
2324 	mutex_unlock(&i7core_edac_lock);
2325 }
2326 
2327 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2328 
2329 /*
2330  *	i7core_driver	pci_driver structure for this module
2331  *
2332  */
2333 static struct pci_driver i7core_driver = {
2334 	.name     = "i7core_edac",
2335 	.probe    = i7core_probe,
2336 	.remove   = i7core_remove,
2337 	.id_table = i7core_pci_tbl,
2338 };
2339 
2340 /*
2341  *	i7core_init		Module entry function
2342  *			Try to initialize this module for its devices
2343  */
2344 static int __init i7core_init(void)
2345 {
2346 	int pci_rc;
2347 
2348 	edac_dbg(2, "\n");
2349 
2350 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
2351 	opstate_init();
2352 
2353 	if (use_pci_fixup)
2354 		i7core_xeon_pci_fixup(pci_dev_table);
2355 
2356 	pci_rc = pci_register_driver(&i7core_driver);
2357 
2358 	if (pci_rc >= 0) {
2359 		mce_register_decode_chain(&i7_mce_dec);
2360 		return 0;
2361 	}
2362 
2363 	i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2364 		      pci_rc);
2365 
2366 	return pci_rc;
2367 }
2368 
2369 /*
2370  *	i7core_exit()	Module exit function
2371  *			Unregister the driver
2372  */
2373 static void __exit i7core_exit(void)
2374 {
2375 	edac_dbg(2, "\n");
2376 	pci_unregister_driver(&i7core_driver);
2377 	mce_unregister_decode_chain(&i7_mce_dec);
2378 }
2379 
2380 module_init(i7core_init);
2381 module_exit(i7core_exit);
2382 
2383 MODULE_LICENSE("GPL");
2384 MODULE_AUTHOR("Mauro Carvalho Chehab");
2385 MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
2386 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2387 		   I7CORE_REVISION);
2388 
2389 module_param(edac_op_state, int, 0444);
2390 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2391