xref: /linux/drivers/edac/amd64_edac.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3 
4 static struct edac_pci_ctl_info *pci_ctl;
5 
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8 
9 /*
10  * Set by command line parameter. If BIOS has enabled the ECC, this override is
11  * cleared to prevent re-enabling the hardware by this driver.
12  */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15 
16 static struct msr __percpu *msrs;
17 
18 /*
19  * count successfully initialized driver instances for setup_pci_device()
20  */
21 static atomic_t drv_instances = ATOMIC_INIT(0);
22 
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
26 
27 /*
28  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30  * or higher value'.
31  *
32  *FIXME: Produce a better mapping/linearisation.
33  */
34 static const struct scrubrate {
35        u32 scrubval;           /* bit pattern for scrub rate */
36        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
37 } scrubrates[] = {
38 	{ 0x01, 1600000000UL},
39 	{ 0x02, 800000000UL},
40 	{ 0x03, 400000000UL},
41 	{ 0x04, 200000000UL},
42 	{ 0x05, 100000000UL},
43 	{ 0x06, 50000000UL},
44 	{ 0x07, 25000000UL},
45 	{ 0x08, 12284069UL},
46 	{ 0x09, 6274509UL},
47 	{ 0x0A, 3121951UL},
48 	{ 0x0B, 1560975UL},
49 	{ 0x0C, 781440UL},
50 	{ 0x0D, 390720UL},
51 	{ 0x0E, 195300UL},
52 	{ 0x0F, 97650UL},
53 	{ 0x10, 48854UL},
54 	{ 0x11, 24427UL},
55 	{ 0x12, 12213UL},
56 	{ 0x13, 6101UL},
57 	{ 0x14, 3051UL},
58 	{ 0x15, 1523UL},
59 	{ 0x16, 761UL},
60 	{ 0x00, 0UL},        /* scrubbing off */
61 };
62 
63 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 			       u32 *val, const char *func)
65 {
66 	int err = 0;
67 
68 	err = pci_read_config_dword(pdev, offset, val);
69 	if (err)
70 		amd64_warn("%s: error reading F%dx%03x.\n",
71 			   func, PCI_FUNC(pdev->devfn), offset);
72 
73 	return err;
74 }
75 
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 				u32 val, const char *func)
78 {
79 	int err = 0;
80 
81 	err = pci_write_config_dword(pdev, offset, val);
82 	if (err)
83 		amd64_warn("%s: error writing to F%dx%03x.\n",
84 			   func, PCI_FUNC(pdev->devfn), offset);
85 
86 	return err;
87 }
88 
89 /*
90  *
91  * Depending on the family, F2 DCT reads need special handling:
92  *
93  * K8: has a single DCT only
94  *
95  * F10h: each DCT has its own set of regs
96  *	DCT0 -> F2x040..
97  *	DCT1 -> F2x140..
98  *
99  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100  *
101  * F16h: has only 1 DCT
102  */
103 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
104 			       const char *func)
105 {
106 	if (addr >= 0x100)
107 		return -EINVAL;
108 
109 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
110 }
111 
112 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
113 				 const char *func)
114 {
115 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
116 }
117 
118 /*
119  * Select DCT to which PCI cfg accesses are routed
120  */
121 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
122 {
123 	u32 reg = 0;
124 
125 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
126 	reg &= (pvt->model >= 0x30) ? ~3 : ~1;
127 	reg |= dct;
128 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
129 }
130 
131 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
132 				 const char *func)
133 {
134 	u8 dct  = 0;
135 
136 	/* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
137 	if (addr >= 0x140 && addr <= 0x1a0) {
138 		dct   = (pvt->model >= 0x30) ? 3 : 1;
139 		addr -= 0x100;
140 	}
141 
142 	f15h_select_dct(pvt, dct);
143 
144 	return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
145 }
146 
147 /*
148  * Memory scrubber control interface. For K8, memory scrubbing is handled by
149  * hardware and can involve L2 cache, dcache as well as the main memory. With
150  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151  * functionality.
152  *
153  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
154  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
155  * bytes/sec for the setting.
156  *
157  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
158  * other archs, we might not have access to the caches directly.
159  */
160 
161 /*
162  * scan the scrub rate mapping table for a close or matching bandwidth value to
163  * issue. If requested is too big, then use last maximum value found.
164  */
165 static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
166 {
167 	u32 scrubval;
168 	int i;
169 
170 	/*
171 	 * map the configured rate (new_bw) to a value specific to the AMD64
172 	 * memory controller and apply to register. Search for the first
173 	 * bandwidth entry that is greater or equal than the setting requested
174 	 * and program that. If at last entry, turn off DRAM scrubbing.
175 	 *
176 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
177 	 * by falling back to the last element in scrubrates[].
178 	 */
179 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
180 		/*
181 		 * skip scrub rates which aren't recommended
182 		 * (see F10 BKDG, F3x58)
183 		 */
184 		if (scrubrates[i].scrubval < min_rate)
185 			continue;
186 
187 		if (scrubrates[i].bandwidth <= new_bw)
188 			break;
189 	}
190 
191 	scrubval = scrubrates[i].scrubval;
192 
193 	pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
194 
195 	if (scrubval)
196 		return scrubrates[i].bandwidth;
197 
198 	return 0;
199 }
200 
201 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
202 {
203 	struct amd64_pvt *pvt = mci->pvt_info;
204 	u32 min_scrubrate = 0x5;
205 
206 	if (pvt->fam == 0xf)
207 		min_scrubrate = 0x0;
208 
209 	/* Erratum #505 */
210 	if (pvt->fam == 0x15 && pvt->model < 0x10)
211 		f15h_select_dct(pvt, 0);
212 
213 	return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
214 }
215 
216 static int get_scrub_rate(struct mem_ctl_info *mci)
217 {
218 	struct amd64_pvt *pvt = mci->pvt_info;
219 	u32 scrubval = 0;
220 	int i, retval = -EINVAL;
221 
222 	/* Erratum #505 */
223 	if (pvt->fam == 0x15 && pvt->model < 0x10)
224 		f15h_select_dct(pvt, 0);
225 
226 	amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
227 
228 	scrubval = scrubval & 0x001F;
229 
230 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
231 		if (scrubrates[i].scrubval == scrubval) {
232 			retval = scrubrates[i].bandwidth;
233 			break;
234 		}
235 	}
236 	return retval;
237 }
238 
239 /*
240  * returns true if the SysAddr given by sys_addr matches the
241  * DRAM base/limit associated with node_id
242  */
243 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
244 {
245 	u64 addr;
246 
247 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
248 	 * all ones if the most significant implemented address bit is 1.
249 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
250 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
251 	 * Application Programming.
252 	 */
253 	addr = sys_addr & 0x000000ffffffffffull;
254 
255 	return ((addr >= get_dram_base(pvt, nid)) &&
256 		(addr <= get_dram_limit(pvt, nid)));
257 }
258 
259 /*
260  * Attempt to map a SysAddr to a node. On success, return a pointer to the
261  * mem_ctl_info structure for the node that the SysAddr maps to.
262  *
263  * On failure, return NULL.
264  */
265 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
266 						u64 sys_addr)
267 {
268 	struct amd64_pvt *pvt;
269 	u8 node_id;
270 	u32 intlv_en, bits;
271 
272 	/*
273 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
274 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
275 	 */
276 	pvt = mci->pvt_info;
277 
278 	/*
279 	 * The value of this field should be the same for all DRAM Base
280 	 * registers.  Therefore we arbitrarily choose to read it from the
281 	 * register for node 0.
282 	 */
283 	intlv_en = dram_intlv_en(pvt, 0);
284 
285 	if (intlv_en == 0) {
286 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
287 			if (base_limit_match(pvt, sys_addr, node_id))
288 				goto found;
289 		}
290 		goto err_no_match;
291 	}
292 
293 	if (unlikely((intlv_en != 0x01) &&
294 		     (intlv_en != 0x03) &&
295 		     (intlv_en != 0x07))) {
296 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
297 		return NULL;
298 	}
299 
300 	bits = (((u32) sys_addr) >> 12) & intlv_en;
301 
302 	for (node_id = 0; ; ) {
303 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
304 			break;	/* intlv_sel field matches */
305 
306 		if (++node_id >= DRAM_RANGES)
307 			goto err_no_match;
308 	}
309 
310 	/* sanity test for sys_addr */
311 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
312 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
313 			   "range for node %d with node interleaving enabled.\n",
314 			   __func__, sys_addr, node_id);
315 		return NULL;
316 	}
317 
318 found:
319 	return edac_mc_find((int)node_id);
320 
321 err_no_match:
322 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
323 		 (unsigned long)sys_addr);
324 
325 	return NULL;
326 }
327 
328 /*
329  * compute the CS base address of the @csrow on the DRAM controller @dct.
330  * For details see F2x[5C:40] in the processor's BKDG
331  */
332 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
333 				 u64 *base, u64 *mask)
334 {
335 	u64 csbase, csmask, base_bits, mask_bits;
336 	u8 addr_shift;
337 
338 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
339 		csbase		= pvt->csels[dct].csbases[csrow];
340 		csmask		= pvt->csels[dct].csmasks[csrow];
341 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
342 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
343 		addr_shift	= 4;
344 
345 	/*
346 	 * F16h and F15h, models 30h and later need two addr_shift values:
347 	 * 8 for high and 6 for low (cf. F16h BKDG).
348 	 */
349 	} else if (pvt->fam == 0x16 ||
350 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
351 		csbase          = pvt->csels[dct].csbases[csrow];
352 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
353 
354 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
355 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
356 
357 		*mask = ~0ULL;
358 		/* poke holes for the csmask */
359 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
360 			   (GENMASK_ULL(30, 19) << 8));
361 
362 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
363 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
364 
365 		return;
366 	} else {
367 		csbase		= pvt->csels[dct].csbases[csrow];
368 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
369 		addr_shift	= 8;
370 
371 		if (pvt->fam == 0x15)
372 			base_bits = mask_bits =
373 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
374 		else
375 			base_bits = mask_bits =
376 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
377 	}
378 
379 	*base  = (csbase & base_bits) << addr_shift;
380 
381 	*mask  = ~0ULL;
382 	/* poke holes for the csmask */
383 	*mask &= ~(mask_bits << addr_shift);
384 	/* OR them in */
385 	*mask |= (csmask & mask_bits) << addr_shift;
386 }
387 
388 #define for_each_chip_select(i, dct, pvt) \
389 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
390 
391 #define chip_select_base(i, dct, pvt) \
392 	pvt->csels[dct].csbases[i]
393 
394 #define for_each_chip_select_mask(i, dct, pvt) \
395 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
396 
397 /*
398  * @input_addr is an InputAddr associated with the node given by mci. Return the
399  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
400  */
401 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
402 {
403 	struct amd64_pvt *pvt;
404 	int csrow;
405 	u64 base, mask;
406 
407 	pvt = mci->pvt_info;
408 
409 	for_each_chip_select(csrow, 0, pvt) {
410 		if (!csrow_enabled(csrow, 0, pvt))
411 			continue;
412 
413 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
414 
415 		mask = ~mask;
416 
417 		if ((input_addr & mask) == (base & mask)) {
418 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
419 				 (unsigned long)input_addr, csrow,
420 				 pvt->mc_node_id);
421 
422 			return csrow;
423 		}
424 	}
425 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
426 		 (unsigned long)input_addr, pvt->mc_node_id);
427 
428 	return -1;
429 }
430 
431 /*
432  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
433  * for the node represented by mci. Info is passed back in *hole_base,
434  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
435  * info is invalid. Info may be invalid for either of the following reasons:
436  *
437  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
438  *   Address Register does not exist.
439  *
440  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
441  *   indicating that its contents are not valid.
442  *
443  * The values passed back in *hole_base, *hole_offset, and *hole_size are
444  * complete 32-bit values despite the fact that the bitfields in the DHAR
445  * only represent bits 31-24 of the base and offset values.
446  */
447 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
448 			     u64 *hole_offset, u64 *hole_size)
449 {
450 	struct amd64_pvt *pvt = mci->pvt_info;
451 
452 	/* only revE and later have the DRAM Hole Address Register */
453 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
454 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
455 			 pvt->ext_model, pvt->mc_node_id);
456 		return 1;
457 	}
458 
459 	/* valid for Fam10h and above */
460 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
461 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
462 		return 1;
463 	}
464 
465 	if (!dhar_valid(pvt)) {
466 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
467 			 pvt->mc_node_id);
468 		return 1;
469 	}
470 
471 	/* This node has Memory Hoisting */
472 
473 	/* +------------------+--------------------+--------------------+-----
474 	 * | memory           | DRAM hole          | relocated          |
475 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
476 	 * |                  |                    | DRAM hole          |
477 	 * |                  |                    | [0x100000000,      |
478 	 * |                  |                    |  (0x100000000+     |
479 	 * |                  |                    |   (0xffffffff-x))] |
480 	 * +------------------+--------------------+--------------------+-----
481 	 *
482 	 * Above is a diagram of physical memory showing the DRAM hole and the
483 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
484 	 * starts at address x (the base address) and extends through address
485 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
486 	 * addresses in the hole so that they start at 0x100000000.
487 	 */
488 
489 	*hole_base = dhar_base(pvt);
490 	*hole_size = (1ULL << 32) - *hole_base;
491 
492 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
493 					: k8_dhar_offset(pvt);
494 
495 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
496 		 pvt->mc_node_id, (unsigned long)*hole_base,
497 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
498 
499 	return 0;
500 }
501 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
502 
503 /*
504  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
505  * assumed that sys_addr maps to the node given by mci.
506  *
507  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
508  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
509  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
510  * then it is also involved in translating a SysAddr to a DramAddr. Sections
511  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
512  * These parts of the documentation are unclear. I interpret them as follows:
513  *
514  * When node n receives a SysAddr, it processes the SysAddr as follows:
515  *
516  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
517  *    Limit registers for node n. If the SysAddr is not within the range
518  *    specified by the base and limit values, then node n ignores the Sysaddr
519  *    (since it does not map to node n). Otherwise continue to step 2 below.
520  *
521  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
522  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
523  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
524  *    hole. If not, skip to step 3 below. Else get the value of the
525  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
526  *    offset defined by this value from the SysAddr.
527  *
528  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
529  *    Base register for node n. To obtain the DramAddr, subtract the base
530  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
531  */
532 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
533 {
534 	struct amd64_pvt *pvt = mci->pvt_info;
535 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
536 	int ret;
537 
538 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
539 
540 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
541 				      &hole_size);
542 	if (!ret) {
543 		if ((sys_addr >= (1ULL << 32)) &&
544 		    (sys_addr < ((1ULL << 32) + hole_size))) {
545 			/* use DHAR to translate SysAddr to DramAddr */
546 			dram_addr = sys_addr - hole_offset;
547 
548 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
549 				 (unsigned long)sys_addr,
550 				 (unsigned long)dram_addr);
551 
552 			return dram_addr;
553 		}
554 	}
555 
556 	/*
557 	 * Translate the SysAddr to a DramAddr as shown near the start of
558 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
559 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
560 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
561 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
562 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
563 	 * Programmer's Manual Volume 1 Application Programming.
564 	 */
565 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
566 
567 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
568 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
569 	return dram_addr;
570 }
571 
572 /*
573  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
574  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
575  * for node interleaving.
576  */
577 static int num_node_interleave_bits(unsigned intlv_en)
578 {
579 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
580 	int n;
581 
582 	BUG_ON(intlv_en > 7);
583 	n = intlv_shift_table[intlv_en];
584 	return n;
585 }
586 
587 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
588 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
589 {
590 	struct amd64_pvt *pvt;
591 	int intlv_shift;
592 	u64 input_addr;
593 
594 	pvt = mci->pvt_info;
595 
596 	/*
597 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
598 	 * concerning translating a DramAddr to an InputAddr.
599 	 */
600 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
601 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
602 		      (dram_addr & 0xfff);
603 
604 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
605 		 intlv_shift, (unsigned long)dram_addr,
606 		 (unsigned long)input_addr);
607 
608 	return input_addr;
609 }
610 
611 /*
612  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
613  * assumed that @sys_addr maps to the node given by mci.
614  */
615 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
616 {
617 	u64 input_addr;
618 
619 	input_addr =
620 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
621 
622 	edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
623 		 (unsigned long)sys_addr, (unsigned long)input_addr);
624 
625 	return input_addr;
626 }
627 
628 /* Map the Error address to a PAGE and PAGE OFFSET. */
629 static inline void error_address_to_page_and_offset(u64 error_address,
630 						    struct err_info *err)
631 {
632 	err->page = (u32) (error_address >> PAGE_SHIFT);
633 	err->offset = ((u32) error_address) & ~PAGE_MASK;
634 }
635 
636 /*
637  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
638  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
639  * of a node that detected an ECC memory error.  mci represents the node that
640  * the error address maps to (possibly different from the node that detected
641  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
642  * error.
643  */
644 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
645 {
646 	int csrow;
647 
648 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
649 
650 	if (csrow == -1)
651 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
652 				  "address 0x%lx\n", (unsigned long)sys_addr);
653 	return csrow;
654 }
655 
656 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
657 
658 /*
659  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
660  * are ECC capable.
661  */
662 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
663 {
664 	u8 bit;
665 	unsigned long edac_cap = EDAC_FLAG_NONE;
666 
667 	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
668 		? 19
669 		: 17;
670 
671 	if (pvt->dclr0 & BIT(bit))
672 		edac_cap = EDAC_FLAG_SECDED;
673 
674 	return edac_cap;
675 }
676 
677 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
678 
679 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
680 {
681 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
682 
683 	edac_dbg(1, "  DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
684 		 (dclr & BIT(16)) ?  "un" : "",
685 		 (dclr & BIT(19)) ? "yes" : "no");
686 
687 	edac_dbg(1, "  PAR/ERR parity: %s\n",
688 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
689 
690 	if (pvt->fam == 0x10)
691 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
692 			 (dclr & BIT(11)) ?  "128b" : "64b");
693 
694 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
695 		 (dclr & BIT(12)) ?  "yes" : "no",
696 		 (dclr & BIT(13)) ?  "yes" : "no",
697 		 (dclr & BIT(14)) ?  "yes" : "no",
698 		 (dclr & BIT(15)) ?  "yes" : "no");
699 }
700 
701 /* Display and decode various NB registers for debug purposes. */
702 static void dump_misc_regs(struct amd64_pvt *pvt)
703 {
704 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
705 
706 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
707 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
708 
709 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
710 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
711 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
712 
713 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
714 
715 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
716 
717 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
718 		 pvt->dhar, dhar_base(pvt),
719 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
720 				   : f10_dhar_offset(pvt));
721 
722 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
723 
724 	debug_display_dimm_sizes(pvt, 0);
725 
726 	/* everything below this point is Fam10h and above */
727 	if (pvt->fam == 0xf)
728 		return;
729 
730 	debug_display_dimm_sizes(pvt, 1);
731 
732 	amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
733 
734 	/* Only if NOT ganged does dclr1 have valid info */
735 	if (!dct_ganging_enabled(pvt))
736 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
737 }
738 
739 /*
740  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
741  */
742 static void prep_chip_selects(struct amd64_pvt *pvt)
743 {
744 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
745 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
746 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
747 	} else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
748 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
749 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
750 	} else {
751 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
752 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
753 	}
754 }
755 
756 /*
757  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
758  */
759 static void read_dct_base_mask(struct amd64_pvt *pvt)
760 {
761 	int cs;
762 
763 	prep_chip_selects(pvt);
764 
765 	for_each_chip_select(cs, 0, pvt) {
766 		int reg0   = DCSB0 + (cs * 4);
767 		int reg1   = DCSB1 + (cs * 4);
768 		u32 *base0 = &pvt->csels[0].csbases[cs];
769 		u32 *base1 = &pvt->csels[1].csbases[cs];
770 
771 		if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
772 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
773 				 cs, *base0, reg0);
774 
775 		if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
776 			continue;
777 
778 		if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
779 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
780 				 cs, *base1, reg1);
781 	}
782 
783 	for_each_chip_select_mask(cs, 0, pvt) {
784 		int reg0   = DCSM0 + (cs * 4);
785 		int reg1   = DCSM1 + (cs * 4);
786 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
787 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
788 
789 		if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
790 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
791 				 cs, *mask0, reg0);
792 
793 		if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
794 			continue;
795 
796 		if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
797 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
798 				 cs, *mask1, reg1);
799 	}
800 }
801 
802 static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
803 {
804 	enum mem_type type;
805 
806 	/* F15h supports only DDR3 */
807 	if (pvt->fam >= 0x15)
808 		type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
809 	else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
810 		if (pvt->dchr0 & DDR3_MODE)
811 			type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
812 		else
813 			type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
814 	} else {
815 		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
816 	}
817 
818 	amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
819 
820 	return type;
821 }
822 
823 /* Get the number of DCT channels the memory controller is using. */
824 static int k8_early_channel_count(struct amd64_pvt *pvt)
825 {
826 	int flag;
827 
828 	if (pvt->ext_model >= K8_REV_F)
829 		/* RevF (NPT) and later */
830 		flag = pvt->dclr0 & WIDTH_128;
831 	else
832 		/* RevE and earlier */
833 		flag = pvt->dclr0 & REVE_WIDTH_128;
834 
835 	/* not used */
836 	pvt->dclr1 = 0;
837 
838 	return (flag) ? 2 : 1;
839 }
840 
841 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
842 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
843 {
844 	u64 addr;
845 	u8 start_bit = 1;
846 	u8 end_bit   = 47;
847 
848 	if (pvt->fam == 0xf) {
849 		start_bit = 3;
850 		end_bit   = 39;
851 	}
852 
853 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
854 
855 	/*
856 	 * Erratum 637 workaround
857 	 */
858 	if (pvt->fam == 0x15) {
859 		struct amd64_pvt *pvt;
860 		u64 cc6_base, tmp_addr;
861 		u32 tmp;
862 		u16 mce_nid;
863 		u8 intlv_en;
864 
865 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
866 			return addr;
867 
868 		mce_nid	= amd_get_nb_id(m->extcpu);
869 		pvt	= mcis[mce_nid]->pvt_info;
870 
871 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
872 		intlv_en = tmp >> 21 & 0x7;
873 
874 		/* add [47:27] + 3 trailing bits */
875 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
876 
877 		/* reverse and add DramIntlvEn */
878 		cc6_base |= intlv_en ^ 0x7;
879 
880 		/* pin at [47:24] */
881 		cc6_base <<= 24;
882 
883 		if (!intlv_en)
884 			return cc6_base | (addr & GENMASK_ULL(23, 0));
885 
886 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
887 
888 							/* faster log2 */
889 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
890 
891 		/* OR DramIntlvSel into bits [14:12] */
892 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
893 
894 		/* add remaining [11:0] bits from original MC4_ADDR */
895 		tmp_addr |= addr & GENMASK_ULL(11, 0);
896 
897 		return cc6_base | tmp_addr;
898 	}
899 
900 	return addr;
901 }
902 
903 static struct pci_dev *pci_get_related_function(unsigned int vendor,
904 						unsigned int device,
905 						struct pci_dev *related)
906 {
907 	struct pci_dev *dev = NULL;
908 
909 	while ((dev = pci_get_device(vendor, device, dev))) {
910 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
911 		    (dev->bus->number == related->bus->number) &&
912 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
913 			break;
914 	}
915 
916 	return dev;
917 }
918 
919 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
920 {
921 	struct amd_northbridge *nb;
922 	struct pci_dev *f1 = NULL;
923 	unsigned int pci_func;
924 	int off = range << 3;
925 	u32 llim;
926 
927 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
928 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
929 
930 	if (pvt->fam == 0xf)
931 		return;
932 
933 	if (!dram_rw(pvt, range))
934 		return;
935 
936 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
937 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
938 
939 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
940 	if (pvt->fam != 0x15)
941 		return;
942 
943 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
944 	if (WARN_ON(!nb))
945 		return;
946 
947 	pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
948 					: PCI_DEVICE_ID_AMD_15H_NB_F1;
949 
950 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
951 	if (WARN_ON(!f1))
952 		return;
953 
954 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
955 
956 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
957 
958 				    /* {[39:27],111b} */
959 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
960 
961 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
962 
963 				    /* [47:40] */
964 	pvt->ranges[range].lim.hi |= llim >> 13;
965 
966 	pci_dev_put(f1);
967 }
968 
969 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
970 				    struct err_info *err)
971 {
972 	struct amd64_pvt *pvt = mci->pvt_info;
973 
974 	error_address_to_page_and_offset(sys_addr, err);
975 
976 	/*
977 	 * Find out which node the error address belongs to. This may be
978 	 * different from the node that detected the error.
979 	 */
980 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
981 	if (!err->src_mci) {
982 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
983 			     (unsigned long)sys_addr);
984 		err->err_code = ERR_NODE;
985 		return;
986 	}
987 
988 	/* Now map the sys_addr to a CSROW */
989 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
990 	if (err->csrow < 0) {
991 		err->err_code = ERR_CSROW;
992 		return;
993 	}
994 
995 	/* CHIPKILL enabled */
996 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
997 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
998 		if (err->channel < 0) {
999 			/*
1000 			 * Syndrome didn't map, so we don't know which of the
1001 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1002 			 * as suspect.
1003 			 */
1004 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1005 				      "possible error reporting race\n",
1006 				      err->syndrome);
1007 			err->err_code = ERR_CHANNEL;
1008 			return;
1009 		}
1010 	} else {
1011 		/*
1012 		 * non-chipkill ecc mode
1013 		 *
1014 		 * The k8 documentation is unclear about how to determine the
1015 		 * channel number when using non-chipkill memory.  This method
1016 		 * was obtained from email communication with someone at AMD.
1017 		 * (Wish the email was placed in this comment - norsk)
1018 		 */
1019 		err->channel = ((sys_addr & BIT(3)) != 0);
1020 	}
1021 }
1022 
1023 static int ddr2_cs_size(unsigned i, bool dct_width)
1024 {
1025 	unsigned shift = 0;
1026 
1027 	if (i <= 2)
1028 		shift = i;
1029 	else if (!(i & 0x1))
1030 		shift = i >> 1;
1031 	else
1032 		shift = (i + 1) >> 1;
1033 
1034 	return 128 << (shift + !!dct_width);
1035 }
1036 
1037 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1038 				  unsigned cs_mode)
1039 {
1040 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1041 
1042 	if (pvt->ext_model >= K8_REV_F) {
1043 		WARN_ON(cs_mode > 11);
1044 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1045 	}
1046 	else if (pvt->ext_model >= K8_REV_D) {
1047 		unsigned diff;
1048 		WARN_ON(cs_mode > 10);
1049 
1050 		/*
1051 		 * the below calculation, besides trying to win an obfuscated C
1052 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1053 		 * mappings are:
1054 		 *
1055 		 * cs_mode	CS size (mb)
1056 		 * =======	============
1057 		 * 0		32
1058 		 * 1		64
1059 		 * 2		128
1060 		 * 3		128
1061 		 * 4		256
1062 		 * 5		512
1063 		 * 6		256
1064 		 * 7		512
1065 		 * 8		1024
1066 		 * 9		1024
1067 		 * 10		2048
1068 		 *
1069 		 * Basically, it calculates a value with which to shift the
1070 		 * smallest CS size of 32MB.
1071 		 *
1072 		 * ddr[23]_cs_size have a similar purpose.
1073 		 */
1074 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1075 
1076 		return 32 << (cs_mode - diff);
1077 	}
1078 	else {
1079 		WARN_ON(cs_mode > 6);
1080 		return 32 << cs_mode;
1081 	}
1082 }
1083 
1084 /*
1085  * Get the number of DCT channels in use.
1086  *
1087  * Return:
1088  *	number of Memory Channels in operation
1089  * Pass back:
1090  *	contents of the DCL0_LOW register
1091  */
1092 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1093 {
1094 	int i, j, channels = 0;
1095 
1096 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1097 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1098 		return 2;
1099 
1100 	/*
1101 	 * Need to check if in unganged mode: In such, there are 2 channels,
1102 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1103 	 * bit will be OFF.
1104 	 *
1105 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1106 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1107 	 */
1108 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1109 
1110 	/*
1111 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1112 	 * is more than just one DIMM present in unganged mode. Need to check
1113 	 * both controllers since DIMMs can be placed in either one.
1114 	 */
1115 	for (i = 0; i < 2; i++) {
1116 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1117 
1118 		for (j = 0; j < 4; j++) {
1119 			if (DBAM_DIMM(j, dbam) > 0) {
1120 				channels++;
1121 				break;
1122 			}
1123 		}
1124 	}
1125 
1126 	if (channels > 2)
1127 		channels = 2;
1128 
1129 	amd64_info("MCT channel count: %d\n", channels);
1130 
1131 	return channels;
1132 }
1133 
1134 static int ddr3_cs_size(unsigned i, bool dct_width)
1135 {
1136 	unsigned shift = 0;
1137 	int cs_size = 0;
1138 
1139 	if (i == 0 || i == 3 || i == 4)
1140 		cs_size = -1;
1141 	else if (i <= 2)
1142 		shift = i;
1143 	else if (i == 12)
1144 		shift = 7;
1145 	else if (!(i & 0x1))
1146 		shift = i >> 1;
1147 	else
1148 		shift = (i + 1) >> 1;
1149 
1150 	if (cs_size != -1)
1151 		cs_size = (128 * (1 << !!dct_width)) << shift;
1152 
1153 	return cs_size;
1154 }
1155 
1156 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1157 				   unsigned cs_mode)
1158 {
1159 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1160 
1161 	WARN_ON(cs_mode > 11);
1162 
1163 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1164 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1165 	else
1166 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1167 }
1168 
1169 /*
1170  * F15h supports only 64bit DCT interfaces
1171  */
1172 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1173 				   unsigned cs_mode)
1174 {
1175 	WARN_ON(cs_mode > 12);
1176 
1177 	return ddr3_cs_size(cs_mode, false);
1178 }
1179 
1180 /*
1181  * F16h and F15h model 30h have only limited cs_modes.
1182  */
1183 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1184 				unsigned cs_mode)
1185 {
1186 	WARN_ON(cs_mode > 12);
1187 
1188 	if (cs_mode == 6 || cs_mode == 8 ||
1189 	    cs_mode == 9 || cs_mode == 12)
1190 		return -1;
1191 	else
1192 		return ddr3_cs_size(cs_mode, false);
1193 }
1194 
1195 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1196 {
1197 
1198 	if (pvt->fam == 0xf)
1199 		return;
1200 
1201 	if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1202 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1203 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1204 
1205 		edac_dbg(0, "  DCTs operate in %s mode\n",
1206 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1207 
1208 		if (!dct_ganging_enabled(pvt))
1209 			edac_dbg(0, "  Address range split per DCT: %s\n",
1210 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1211 
1212 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1213 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1214 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1215 
1216 		edac_dbg(0, "  channel interleave: %s, "
1217 			 "interleave bits selector: 0x%x\n",
1218 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1219 			 dct_sel_interleave_addr(pvt));
1220 	}
1221 
1222 	amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1223 }
1224 
1225 /*
1226  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1227  * 2.10.12 Memory Interleaving Modes).
1228  */
1229 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1230 				     u8 intlv_en, int num_dcts_intlv,
1231 				     u32 dct_sel)
1232 {
1233 	u8 channel = 0;
1234 	u8 select;
1235 
1236 	if (!(intlv_en))
1237 		return (u8)(dct_sel);
1238 
1239 	if (num_dcts_intlv == 2) {
1240 		select = (sys_addr >> 8) & 0x3;
1241 		channel = select ? 0x3 : 0;
1242 	} else if (num_dcts_intlv == 4) {
1243 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1244 		switch (intlv_addr) {
1245 		case 0x4:
1246 			channel = (sys_addr >> 8) & 0x3;
1247 			break;
1248 		case 0x5:
1249 			channel = (sys_addr >> 9) & 0x3;
1250 			break;
1251 		}
1252 	}
1253 	return channel;
1254 }
1255 
1256 /*
1257  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1258  * Interleaving Modes.
1259  */
1260 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1261 				bool hi_range_sel, u8 intlv_en)
1262 {
1263 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1264 
1265 	if (dct_ganging_enabled(pvt))
1266 		return 0;
1267 
1268 	if (hi_range_sel)
1269 		return dct_sel_high;
1270 
1271 	/*
1272 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1273 	 */
1274 	if (dct_interleave_enabled(pvt)) {
1275 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1276 
1277 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1278 		if (!intlv_addr)
1279 			return sys_addr >> 6 & 1;
1280 
1281 		if (intlv_addr & 0x2) {
1282 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1283 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1284 
1285 			return ((sys_addr >> shift) & 1) ^ temp;
1286 		}
1287 
1288 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1289 	}
1290 
1291 	if (dct_high_range_enabled(pvt))
1292 		return ~dct_sel_high & 1;
1293 
1294 	return 0;
1295 }
1296 
1297 /* Convert the sys_addr to the normalized DCT address */
1298 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1299 				 u64 sys_addr, bool hi_rng,
1300 				 u32 dct_sel_base_addr)
1301 {
1302 	u64 chan_off;
1303 	u64 dram_base		= get_dram_base(pvt, range);
1304 	u64 hole_off		= f10_dhar_offset(pvt);
1305 	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1306 
1307 	if (hi_rng) {
1308 		/*
1309 		 * if
1310 		 * base address of high range is below 4Gb
1311 		 * (bits [47:27] at [31:11])
1312 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1313 		 * sys_addr > 4Gb
1314 		 *
1315 		 *	remove hole offset from sys_addr
1316 		 * else
1317 		 *	remove high range offset from sys_addr
1318 		 */
1319 		if ((!(dct_sel_base_addr >> 16) ||
1320 		     dct_sel_base_addr < dhar_base(pvt)) &&
1321 		    dhar_valid(pvt) &&
1322 		    (sys_addr >= BIT_64(32)))
1323 			chan_off = hole_off;
1324 		else
1325 			chan_off = dct_sel_base_off;
1326 	} else {
1327 		/*
1328 		 * if
1329 		 * we have a valid hole		&&
1330 		 * sys_addr > 4Gb
1331 		 *
1332 		 *	remove hole
1333 		 * else
1334 		 *	remove dram base to normalize to DCT address
1335 		 */
1336 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1337 			chan_off = hole_off;
1338 		else
1339 			chan_off = dram_base;
1340 	}
1341 
1342 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1343 }
1344 
1345 /*
1346  * checks if the csrow passed in is marked as SPARED, if so returns the new
1347  * spare row
1348  */
1349 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1350 {
1351 	int tmp_cs;
1352 
1353 	if (online_spare_swap_done(pvt, dct) &&
1354 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1355 
1356 		for_each_chip_select(tmp_cs, dct, pvt) {
1357 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1358 				csrow = tmp_cs;
1359 				break;
1360 			}
1361 		}
1362 	}
1363 	return csrow;
1364 }
1365 
1366 /*
1367  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1368  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1369  *
1370  * Return:
1371  *	-EINVAL:  NOT FOUND
1372  *	0..csrow = Chip-Select Row
1373  */
1374 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1375 {
1376 	struct mem_ctl_info *mci;
1377 	struct amd64_pvt *pvt;
1378 	u64 cs_base, cs_mask;
1379 	int cs_found = -EINVAL;
1380 	int csrow;
1381 
1382 	mci = mcis[nid];
1383 	if (!mci)
1384 		return cs_found;
1385 
1386 	pvt = mci->pvt_info;
1387 
1388 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1389 
1390 	for_each_chip_select(csrow, dct, pvt) {
1391 		if (!csrow_enabled(csrow, dct, pvt))
1392 			continue;
1393 
1394 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1395 
1396 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1397 			 csrow, cs_base, cs_mask);
1398 
1399 		cs_mask = ~cs_mask;
1400 
1401 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1402 			 (in_addr & cs_mask), (cs_base & cs_mask));
1403 
1404 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1405 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1406 				cs_found =  csrow;
1407 				break;
1408 			}
1409 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1410 
1411 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1412 			break;
1413 		}
1414 	}
1415 	return cs_found;
1416 }
1417 
1418 /*
1419  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1420  * swapped with a region located at the bottom of memory so that the GPU can use
1421  * the interleaved region and thus two channels.
1422  */
1423 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1424 {
1425 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1426 
1427 	if (pvt->fam == 0x10) {
1428 		/* only revC3 and revE have that feature */
1429 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1430 			return sys_addr;
1431 	}
1432 
1433 	amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1434 
1435 	if (!(swap_reg & 0x1))
1436 		return sys_addr;
1437 
1438 	swap_base	= (swap_reg >> 3) & 0x7f;
1439 	swap_limit	= (swap_reg >> 11) & 0x7f;
1440 	rgn_size	= (swap_reg >> 20) & 0x7f;
1441 	tmp_addr	= sys_addr >> 27;
1442 
1443 	if (!(sys_addr >> 34) &&
1444 	    (((tmp_addr >= swap_base) &&
1445 	     (tmp_addr <= swap_limit)) ||
1446 	     (tmp_addr < rgn_size)))
1447 		return sys_addr ^ (u64)swap_base << 27;
1448 
1449 	return sys_addr;
1450 }
1451 
1452 /* For a given @dram_range, check if @sys_addr falls within it. */
1453 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1454 				  u64 sys_addr, int *chan_sel)
1455 {
1456 	int cs_found = -EINVAL;
1457 	u64 chan_addr;
1458 	u32 dct_sel_base;
1459 	u8 channel;
1460 	bool high_range = false;
1461 
1462 	u8 node_id    = dram_dst_node(pvt, range);
1463 	u8 intlv_en   = dram_intlv_en(pvt, range);
1464 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1465 
1466 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1467 		 range, sys_addr, get_dram_limit(pvt, range));
1468 
1469 	if (dhar_valid(pvt) &&
1470 	    dhar_base(pvt) <= sys_addr &&
1471 	    sys_addr < BIT_64(32)) {
1472 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1473 			    sys_addr);
1474 		return -EINVAL;
1475 	}
1476 
1477 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1478 		return -EINVAL;
1479 
1480 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1481 
1482 	dct_sel_base = dct_sel_baseaddr(pvt);
1483 
1484 	/*
1485 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1486 	 * select between DCT0 and DCT1.
1487 	 */
1488 	if (dct_high_range_enabled(pvt) &&
1489 	   !dct_ganging_enabled(pvt) &&
1490 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1491 		high_range = true;
1492 
1493 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1494 
1495 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1496 					  high_range, dct_sel_base);
1497 
1498 	/* Remove node interleaving, see F1x120 */
1499 	if (intlv_en)
1500 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1501 			    (chan_addr & 0xfff);
1502 
1503 	/* remove channel interleave */
1504 	if (dct_interleave_enabled(pvt) &&
1505 	   !dct_high_range_enabled(pvt) &&
1506 	   !dct_ganging_enabled(pvt)) {
1507 
1508 		if (dct_sel_interleave_addr(pvt) != 1) {
1509 			if (dct_sel_interleave_addr(pvt) == 0x3)
1510 				/* hash 9 */
1511 				chan_addr = ((chan_addr >> 10) << 9) |
1512 					     (chan_addr & 0x1ff);
1513 			else
1514 				/* A[6] or hash 6 */
1515 				chan_addr = ((chan_addr >> 7) << 6) |
1516 					     (chan_addr & 0x3f);
1517 		} else
1518 			/* A[12] */
1519 			chan_addr = ((chan_addr >> 13) << 12) |
1520 				     (chan_addr & 0xfff);
1521 	}
1522 
1523 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1524 
1525 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1526 
1527 	if (cs_found >= 0)
1528 		*chan_sel = channel;
1529 
1530 	return cs_found;
1531 }
1532 
1533 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1534 					u64 sys_addr, int *chan_sel)
1535 {
1536 	int cs_found = -EINVAL;
1537 	int num_dcts_intlv = 0;
1538 	u64 chan_addr, chan_offset;
1539 	u64 dct_base, dct_limit;
1540 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1541 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1542 
1543 	u64 dhar_offset		= f10_dhar_offset(pvt);
1544 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1545 	u8 node_id		= dram_dst_node(pvt, range);
1546 	u8 intlv_en		= dram_intlv_en(pvt, range);
1547 
1548 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1549 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1550 
1551 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1552 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
1553 
1554 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1555 		 range, sys_addr, get_dram_limit(pvt, range));
1556 
1557 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
1558 	    !(get_dram_limit(pvt, range) >= sys_addr))
1559 		return -EINVAL;
1560 
1561 	if (dhar_valid(pvt) &&
1562 	    dhar_base(pvt) <= sys_addr &&
1563 	    sys_addr < BIT_64(32)) {
1564 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1565 			    sys_addr);
1566 		return -EINVAL;
1567 	}
1568 
1569 	/* Verify sys_addr is within DCT Range. */
1570 	dct_base = (u64) dct_sel_baseaddr(pvt);
1571 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1572 
1573 	if (!(dct_cont_base_reg & BIT(0)) &&
1574 	    !(dct_base <= (sys_addr >> 27) &&
1575 	      dct_limit >= (sys_addr >> 27)))
1576 		return -EINVAL;
1577 
1578 	/* Verify number of dct's that participate in channel interleaving. */
1579 	num_dcts_intlv = (int) hweight8(intlv_en);
1580 
1581 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1582 		return -EINVAL;
1583 
1584 	channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1585 					     num_dcts_intlv, dct_sel);
1586 
1587 	/* Verify we stay within the MAX number of channels allowed */
1588 	if (channel > 3)
1589 		return -EINVAL;
1590 
1591 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1592 
1593 	/* Get normalized DCT addr */
1594 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1595 		chan_offset = dhar_offset;
1596 	else
1597 		chan_offset = dct_base << 27;
1598 
1599 	chan_addr = sys_addr - chan_offset;
1600 
1601 	/* remove channel interleave */
1602 	if (num_dcts_intlv == 2) {
1603 		if (intlv_addr == 0x4)
1604 			chan_addr = ((chan_addr >> 9) << 8) |
1605 						(chan_addr & 0xff);
1606 		else if (intlv_addr == 0x5)
1607 			chan_addr = ((chan_addr >> 10) << 9) |
1608 						(chan_addr & 0x1ff);
1609 		else
1610 			return -EINVAL;
1611 
1612 	} else if (num_dcts_intlv == 4) {
1613 		if (intlv_addr == 0x4)
1614 			chan_addr = ((chan_addr >> 10) << 8) |
1615 							(chan_addr & 0xff);
1616 		else if (intlv_addr == 0x5)
1617 			chan_addr = ((chan_addr >> 11) << 9) |
1618 							(chan_addr & 0x1ff);
1619 		else
1620 			return -EINVAL;
1621 	}
1622 
1623 	if (dct_offset_en) {
1624 		amd64_read_pci_cfg(pvt->F1,
1625 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
1626 				   &tmp);
1627 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
1628 	}
1629 
1630 	f15h_select_dct(pvt, channel);
1631 
1632 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1633 
1634 	/*
1635 	 * Find Chip select:
1636 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1637 	 * there is support for 4 DCT's, but only 2 are currently functional.
1638 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1639 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
1640 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1641 	 */
1642 	alias_channel =  (channel == 3) ? 1 : channel;
1643 
1644 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1645 
1646 	if (cs_found >= 0)
1647 		*chan_sel = alias_channel;
1648 
1649 	return cs_found;
1650 }
1651 
1652 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1653 					u64 sys_addr,
1654 					int *chan_sel)
1655 {
1656 	int cs_found = -EINVAL;
1657 	unsigned range;
1658 
1659 	for (range = 0; range < DRAM_RANGES; range++) {
1660 		if (!dram_rw(pvt, range))
1661 			continue;
1662 
1663 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
1664 			cs_found = f15_m30h_match_to_this_node(pvt, range,
1665 							       sys_addr,
1666 							       chan_sel);
1667 
1668 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
1669 			 (get_dram_limit(pvt, range) >= sys_addr)) {
1670 			cs_found = f1x_match_to_this_node(pvt, range,
1671 							  sys_addr, chan_sel);
1672 			if (cs_found >= 0)
1673 				break;
1674 		}
1675 	}
1676 	return cs_found;
1677 }
1678 
1679 /*
1680  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1681  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1682  *
1683  * The @sys_addr is usually an error address received from the hardware
1684  * (MCX_ADDR).
1685  */
1686 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1687 				     struct err_info *err)
1688 {
1689 	struct amd64_pvt *pvt = mci->pvt_info;
1690 
1691 	error_address_to_page_and_offset(sys_addr, err);
1692 
1693 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1694 	if (err->csrow < 0) {
1695 		err->err_code = ERR_CSROW;
1696 		return;
1697 	}
1698 
1699 	/*
1700 	 * We need the syndromes for channel detection only when we're
1701 	 * ganged. Otherwise @chan should already contain the channel at
1702 	 * this point.
1703 	 */
1704 	if (dct_ganging_enabled(pvt))
1705 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1706 }
1707 
1708 /*
1709  * debug routine to display the memory sizes of all logical DIMMs and its
1710  * CSROWs
1711  */
1712 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1713 {
1714 	int dimm, size0, size1;
1715 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1716 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
1717 
1718 	if (pvt->fam == 0xf) {
1719 		/* K8 families < revF not supported yet */
1720 	       if (pvt->ext_model < K8_REV_F)
1721 			return;
1722 	       else
1723 		       WARN_ON(ctrl != 0);
1724 	}
1725 
1726 	dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1727 	dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1728 						   : pvt->csels[0].csbases;
1729 
1730 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1731 		 ctrl, dbam);
1732 
1733 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1734 
1735 	/* Dump memory sizes for DIMM and its CSROWs */
1736 	for (dimm = 0; dimm < 4; dimm++) {
1737 
1738 		size0 = 0;
1739 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1740 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1741 						     DBAM_DIMM(dimm, dbam));
1742 
1743 		size1 = 0;
1744 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1745 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1746 						     DBAM_DIMM(dimm, dbam));
1747 
1748 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1749 				dimm * 2,     size0,
1750 				dimm * 2 + 1, size1);
1751 	}
1752 }
1753 
1754 static struct amd64_family_type family_types[] = {
1755 	[K8_CPUS] = {
1756 		.ctl_name = "K8",
1757 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1758 		.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1759 		.ops = {
1760 			.early_channel_count	= k8_early_channel_count,
1761 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
1762 			.dbam_to_cs		= k8_dbam_to_chip_select,
1763 			.read_dct_pci_cfg	= k8_read_dct_pci_cfg,
1764 		}
1765 	},
1766 	[F10_CPUS] = {
1767 		.ctl_name = "F10h",
1768 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1769 		.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1770 		.ops = {
1771 			.early_channel_count	= f1x_early_channel_count,
1772 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1773 			.dbam_to_cs		= f10_dbam_to_chip_select,
1774 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
1775 		}
1776 	},
1777 	[F15_CPUS] = {
1778 		.ctl_name = "F15h",
1779 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1780 		.f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1781 		.ops = {
1782 			.early_channel_count	= f1x_early_channel_count,
1783 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1784 			.dbam_to_cs		= f15_dbam_to_chip_select,
1785 			.read_dct_pci_cfg	= f15_read_dct_pci_cfg,
1786 		}
1787 	},
1788 	[F15_M30H_CPUS] = {
1789 		.ctl_name = "F15h_M30h",
1790 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1791 		.f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
1792 		.ops = {
1793 			.early_channel_count	= f1x_early_channel_count,
1794 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1795 			.dbam_to_cs		= f16_dbam_to_chip_select,
1796 			.read_dct_pci_cfg	= f15_read_dct_pci_cfg,
1797 		}
1798 	},
1799 	[F16_CPUS] = {
1800 		.ctl_name = "F16h",
1801 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1802 		.f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1803 		.ops = {
1804 			.early_channel_count	= f1x_early_channel_count,
1805 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1806 			.dbam_to_cs		= f16_dbam_to_chip_select,
1807 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
1808 		}
1809 	},
1810 	[F16_M30H_CPUS] = {
1811 		.ctl_name = "F16h_M30h",
1812 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
1813 		.f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
1814 		.ops = {
1815 			.early_channel_count	= f1x_early_channel_count,
1816 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1817 			.dbam_to_cs		= f16_dbam_to_chip_select,
1818 			.read_dct_pci_cfg	= f10_read_dct_pci_cfg,
1819 		}
1820 	},
1821 };
1822 
1823 /*
1824  * These are tables of eigenvectors (one per line) which can be used for the
1825  * construction of the syndrome tables. The modified syndrome search algorithm
1826  * uses those to find the symbol in error and thus the DIMM.
1827  *
1828  * Algorithm courtesy of Ross LaFetra from AMD.
1829  */
1830 static const u16 x4_vectors[] = {
1831 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
1832 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
1833 	0x0001, 0x0002, 0x0004, 0x0008,
1834 	0x1013, 0x3032, 0x4044, 0x8088,
1835 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
1836 	0x4857, 0xc4fe, 0x13cc, 0x3288,
1837 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1838 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1839 	0x15c1, 0x2a42, 0x89ac, 0x4758,
1840 	0x2b03, 0x1602, 0x4f0c, 0xca08,
1841 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1842 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
1843 	0x2b87, 0x164e, 0x642c, 0xdc18,
1844 	0x40b9, 0x80de, 0x1094, 0x20e8,
1845 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
1846 	0x11c1, 0x2242, 0x84ac, 0x4c58,
1847 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
1848 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1849 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
1850 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1851 	0x16b3, 0x3d62, 0x4f34, 0x8518,
1852 	0x1e2f, 0x391a, 0x5cac, 0xf858,
1853 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1854 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1855 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1856 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
1857 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
1858 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
1859 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
1860 	0x185d, 0x2ca6, 0x7914, 0x9e28,
1861 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
1862 	0x4199, 0x82ee, 0x19f4, 0x2e58,
1863 	0x4807, 0xc40e, 0x130c, 0x3208,
1864 	0x1905, 0x2e0a, 0x5804, 0xac08,
1865 	0x213f, 0x132a, 0xadfc, 0x5ba8,
1866 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1867 };
1868 
1869 static const u16 x8_vectors[] = {
1870 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1871 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1872 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1873 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1874 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1875 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1876 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1877 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1878 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1879 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1880 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1881 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1882 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1883 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1884 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1885 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1886 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1887 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1888 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1889 };
1890 
1891 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
1892 			   unsigned v_dim)
1893 {
1894 	unsigned int i, err_sym;
1895 
1896 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1897 		u16 s = syndrome;
1898 		unsigned v_idx =  err_sym * v_dim;
1899 		unsigned v_end = (err_sym + 1) * v_dim;
1900 
1901 		/* walk over all 16 bits of the syndrome */
1902 		for (i = 1; i < (1U << 16); i <<= 1) {
1903 
1904 			/* if bit is set in that eigenvector... */
1905 			if (v_idx < v_end && vectors[v_idx] & i) {
1906 				u16 ev_comp = vectors[v_idx++];
1907 
1908 				/* ... and bit set in the modified syndrome, */
1909 				if (s & i) {
1910 					/* remove it. */
1911 					s ^= ev_comp;
1912 
1913 					if (!s)
1914 						return err_sym;
1915 				}
1916 
1917 			} else if (s & i)
1918 				/* can't get to zero, move to next symbol */
1919 				break;
1920 		}
1921 	}
1922 
1923 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1924 	return -1;
1925 }
1926 
1927 static int map_err_sym_to_channel(int err_sym, int sym_size)
1928 {
1929 	if (sym_size == 4)
1930 		switch (err_sym) {
1931 		case 0x20:
1932 		case 0x21:
1933 			return 0;
1934 			break;
1935 		case 0x22:
1936 		case 0x23:
1937 			return 1;
1938 			break;
1939 		default:
1940 			return err_sym >> 4;
1941 			break;
1942 		}
1943 	/* x8 symbols */
1944 	else
1945 		switch (err_sym) {
1946 		/* imaginary bits not in a DIMM */
1947 		case 0x10:
1948 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1949 					  err_sym);
1950 			return -1;
1951 			break;
1952 
1953 		case 0x11:
1954 			return 0;
1955 			break;
1956 		case 0x12:
1957 			return 1;
1958 			break;
1959 		default:
1960 			return err_sym >> 3;
1961 			break;
1962 		}
1963 	return -1;
1964 }
1965 
1966 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1967 {
1968 	struct amd64_pvt *pvt = mci->pvt_info;
1969 	int err_sym = -1;
1970 
1971 	if (pvt->ecc_sym_sz == 8)
1972 		err_sym = decode_syndrome(syndrome, x8_vectors,
1973 					  ARRAY_SIZE(x8_vectors),
1974 					  pvt->ecc_sym_sz);
1975 	else if (pvt->ecc_sym_sz == 4)
1976 		err_sym = decode_syndrome(syndrome, x4_vectors,
1977 					  ARRAY_SIZE(x4_vectors),
1978 					  pvt->ecc_sym_sz);
1979 	else {
1980 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1981 		return err_sym;
1982 	}
1983 
1984 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1985 }
1986 
1987 static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
1988 			    u8 ecc_type)
1989 {
1990 	enum hw_event_mc_err_type err_type;
1991 	const char *string;
1992 
1993 	if (ecc_type == 2)
1994 		err_type = HW_EVENT_ERR_CORRECTED;
1995 	else if (ecc_type == 1)
1996 		err_type = HW_EVENT_ERR_UNCORRECTED;
1997 	else {
1998 		WARN(1, "Something is rotten in the state of Denmark.\n");
1999 		return;
2000 	}
2001 
2002 	switch (err->err_code) {
2003 	case DECODE_OK:
2004 		string = "";
2005 		break;
2006 	case ERR_NODE:
2007 		string = "Failed to map error addr to a node";
2008 		break;
2009 	case ERR_CSROW:
2010 		string = "Failed to map error addr to a csrow";
2011 		break;
2012 	case ERR_CHANNEL:
2013 		string = "unknown syndrome - possible error reporting race";
2014 		break;
2015 	default:
2016 		string = "WTF error";
2017 		break;
2018 	}
2019 
2020 	edac_mc_handle_error(err_type, mci, 1,
2021 			     err->page, err->offset, err->syndrome,
2022 			     err->csrow, err->channel, -1,
2023 			     string, "");
2024 }
2025 
2026 static inline void decode_bus_error(int node_id, struct mce *m)
2027 {
2028 	struct mem_ctl_info *mci = mcis[node_id];
2029 	struct amd64_pvt *pvt = mci->pvt_info;
2030 	u8 ecc_type = (m->status >> 45) & 0x3;
2031 	u8 xec = XEC(m->status, 0x1f);
2032 	u16 ec = EC(m->status);
2033 	u64 sys_addr;
2034 	struct err_info err;
2035 
2036 	/* Bail out early if this was an 'observed' error */
2037 	if (PP(ec) == NBSL_PP_OBS)
2038 		return;
2039 
2040 	/* Do only ECC errors */
2041 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2042 		return;
2043 
2044 	memset(&err, 0, sizeof(err));
2045 
2046 	sys_addr = get_error_address(pvt, m);
2047 
2048 	if (ecc_type == 2)
2049 		err.syndrome = extract_syndrome(m->status);
2050 
2051 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2052 
2053 	__log_bus_error(mci, &err, ecc_type);
2054 }
2055 
2056 /*
2057  * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2058  * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2059  */
2060 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2061 {
2062 	/* Reserve the ADDRESS MAP Device */
2063 	pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2064 	if (!pvt->F1) {
2065 		amd64_err("error address map device not found: "
2066 			  "vendor %x device 0x%x (broken BIOS?)\n",
2067 			  PCI_VENDOR_ID_AMD, f1_id);
2068 		return -ENODEV;
2069 	}
2070 
2071 	/* Reserve the MISC Device */
2072 	pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2073 	if (!pvt->F3) {
2074 		pci_dev_put(pvt->F1);
2075 		pvt->F1 = NULL;
2076 
2077 		amd64_err("error F3 device not found: "
2078 			  "vendor %x device 0x%x (broken BIOS?)\n",
2079 			  PCI_VENDOR_ID_AMD, f3_id);
2080 
2081 		return -ENODEV;
2082 	}
2083 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2084 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2085 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2086 
2087 	return 0;
2088 }
2089 
2090 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2091 {
2092 	pci_dev_put(pvt->F1);
2093 	pci_dev_put(pvt->F3);
2094 }
2095 
2096 /*
2097  * Retrieve the hardware registers of the memory controller (this includes the
2098  * 'Address Map' and 'Misc' device regs)
2099  */
2100 static void read_mc_regs(struct amd64_pvt *pvt)
2101 {
2102 	unsigned range;
2103 	u64 msr_val;
2104 	u32 tmp;
2105 
2106 	/*
2107 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2108 	 * those are Read-As-Zero
2109 	 */
2110 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2111 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2112 
2113 	/* check first whether TOP_MEM2 is enabled */
2114 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2115 	if (msr_val & (1U << 21)) {
2116 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2117 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2118 	} else
2119 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2120 
2121 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2122 
2123 	read_dram_ctl_register(pvt);
2124 
2125 	for (range = 0; range < DRAM_RANGES; range++) {
2126 		u8 rw;
2127 
2128 		/* read settings for this DRAM range */
2129 		read_dram_base_limit_regs(pvt, range);
2130 
2131 		rw = dram_rw(pvt, range);
2132 		if (!rw)
2133 			continue;
2134 
2135 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2136 			 range,
2137 			 get_dram_base(pvt, range),
2138 			 get_dram_limit(pvt, range));
2139 
2140 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2141 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2142 			 (rw & 0x1) ? "R" : "-",
2143 			 (rw & 0x2) ? "W" : "-",
2144 			 dram_intlv_sel(pvt, range),
2145 			 dram_dst_node(pvt, range));
2146 	}
2147 
2148 	read_dct_base_mask(pvt);
2149 
2150 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2151 	amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2152 
2153 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2154 
2155 	amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2156 	amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2157 
2158 	if (!dct_ganging_enabled(pvt)) {
2159 		amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2160 		amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2161 	}
2162 
2163 	pvt->ecc_sym_sz = 4;
2164 
2165 	if (pvt->fam >= 0x10) {
2166 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2167 		if (pvt->fam != 0x16)
2168 			/* F16h has only DCT0 */
2169 			amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2170 
2171 		/* F10h, revD and later can do x8 ECC too */
2172 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2173 			pvt->ecc_sym_sz = 8;
2174 	}
2175 	dump_misc_regs(pvt);
2176 }
2177 
2178 /*
2179  * NOTE: CPU Revision Dependent code
2180  *
2181  * Input:
2182  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2183  *	k8 private pointer to -->
2184  *			DRAM Bank Address mapping register
2185  *			node_id
2186  *			DCL register where dual_channel_active is
2187  *
2188  * The DBAM register consists of 4 sets of 4 bits each definitions:
2189  *
2190  * Bits:	CSROWs
2191  * 0-3		CSROWs 0 and 1
2192  * 4-7		CSROWs 2 and 3
2193  * 8-11		CSROWs 4 and 5
2194  * 12-15	CSROWs 6 and 7
2195  *
2196  * Values range from: 0 to 15
2197  * The meaning of the values depends on CPU revision and dual-channel state,
2198  * see relevant BKDG more info.
2199  *
2200  * The memory controller provides for total of only 8 CSROWs in its current
2201  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2202  * single channel or two (2) DIMMs in dual channel mode.
2203  *
2204  * The following code logic collapses the various tables for CSROW based on CPU
2205  * revision.
2206  *
2207  * Returns:
2208  *	The number of PAGE_SIZE pages on the specified CSROW number it
2209  *	encompasses
2210  *
2211  */
2212 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2213 {
2214 	u32 cs_mode, nr_pages;
2215 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2216 
2217 
2218 	/*
2219 	 * The math on this doesn't look right on the surface because x/2*4 can
2220 	 * be simplified to x*2 but this expression makes use of the fact that
2221 	 * it is integral math where 1/2=0. This intermediate value becomes the
2222 	 * number of bits to shift the DBAM register to extract the proper CSROW
2223 	 * field.
2224 	 */
2225 	cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2226 
2227 	nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2228 
2229 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2230 		    csrow_nr, dct,  cs_mode);
2231 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2232 
2233 	return nr_pages;
2234 }
2235 
2236 /*
2237  * Initialize the array of csrow attribute instances, based on the values
2238  * from pci config hardware registers.
2239  */
2240 static int init_csrows(struct mem_ctl_info *mci)
2241 {
2242 	struct amd64_pvt *pvt = mci->pvt_info;
2243 	struct csrow_info *csrow;
2244 	struct dimm_info *dimm;
2245 	enum edac_type edac_mode;
2246 	enum mem_type mtype;
2247 	int i, j, empty = 1;
2248 	int nr_pages = 0;
2249 	u32 val;
2250 
2251 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2252 
2253 	pvt->nbcfg = val;
2254 
2255 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2256 		 pvt->mc_node_id, val,
2257 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2258 
2259 	/*
2260 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2261 	 */
2262 	for_each_chip_select(i, 0, pvt) {
2263 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2264 		bool row_dct1 = false;
2265 
2266 		if (pvt->fam != 0xf)
2267 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2268 
2269 		if (!row_dct0 && !row_dct1)
2270 			continue;
2271 
2272 		csrow = mci->csrows[i];
2273 		empty = 0;
2274 
2275 		edac_dbg(1, "MC node: %d, csrow: %d\n",
2276 			    pvt->mc_node_id, i);
2277 
2278 		if (row_dct0) {
2279 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
2280 			csrow->channels[0]->dimm->nr_pages = nr_pages;
2281 		}
2282 
2283 		/* K8 has only one DCT */
2284 		if (pvt->fam != 0xf && row_dct1) {
2285 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2286 
2287 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2288 			nr_pages += row_dct1_pages;
2289 		}
2290 
2291 		mtype = determine_memory_type(pvt, i);
2292 
2293 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2294 
2295 		/*
2296 		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2297 		 */
2298 		if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2299 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2300 				    EDAC_S4ECD4ED : EDAC_SECDED;
2301 		else
2302 			edac_mode = EDAC_NONE;
2303 
2304 		for (j = 0; j < pvt->channel_count; j++) {
2305 			dimm = csrow->channels[j]->dimm;
2306 			dimm->mtype = mtype;
2307 			dimm->edac_mode = edac_mode;
2308 		}
2309 	}
2310 
2311 	return empty;
2312 }
2313 
2314 /* get all cores on this DCT */
2315 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2316 {
2317 	int cpu;
2318 
2319 	for_each_online_cpu(cpu)
2320 		if (amd_get_nb_id(cpu) == nid)
2321 			cpumask_set_cpu(cpu, mask);
2322 }
2323 
2324 /* check MCG_CTL on all the cpus on this node */
2325 static bool nb_mce_bank_enabled_on_node(u16 nid)
2326 {
2327 	cpumask_var_t mask;
2328 	int cpu, nbe;
2329 	bool ret = false;
2330 
2331 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2332 		amd64_warn("%s: Error allocating mask\n", __func__);
2333 		return false;
2334 	}
2335 
2336 	get_cpus_on_this_dct_cpumask(mask, nid);
2337 
2338 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2339 
2340 	for_each_cpu(cpu, mask) {
2341 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2342 		nbe = reg->l & MSR_MCGCTL_NBE;
2343 
2344 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2345 			 cpu, reg->q,
2346 			 (nbe ? "enabled" : "disabled"));
2347 
2348 		if (!nbe)
2349 			goto out;
2350 	}
2351 	ret = true;
2352 
2353 out:
2354 	free_cpumask_var(mask);
2355 	return ret;
2356 }
2357 
2358 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2359 {
2360 	cpumask_var_t cmask;
2361 	int cpu;
2362 
2363 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2364 		amd64_warn("%s: error allocating mask\n", __func__);
2365 		return false;
2366 	}
2367 
2368 	get_cpus_on_this_dct_cpumask(cmask, nid);
2369 
2370 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2371 
2372 	for_each_cpu(cpu, cmask) {
2373 
2374 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2375 
2376 		if (on) {
2377 			if (reg->l & MSR_MCGCTL_NBE)
2378 				s->flags.nb_mce_enable = 1;
2379 
2380 			reg->l |= MSR_MCGCTL_NBE;
2381 		} else {
2382 			/*
2383 			 * Turn off NB MCE reporting only when it was off before
2384 			 */
2385 			if (!s->flags.nb_mce_enable)
2386 				reg->l &= ~MSR_MCGCTL_NBE;
2387 		}
2388 	}
2389 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2390 
2391 	free_cpumask_var(cmask);
2392 
2393 	return 0;
2394 }
2395 
2396 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2397 				       struct pci_dev *F3)
2398 {
2399 	bool ret = true;
2400 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2401 
2402 	if (toggle_ecc_err_reporting(s, nid, ON)) {
2403 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2404 		return false;
2405 	}
2406 
2407 	amd64_read_pci_cfg(F3, NBCTL, &value);
2408 
2409 	s->old_nbctl   = value & mask;
2410 	s->nbctl_valid = true;
2411 
2412 	value |= mask;
2413 	amd64_write_pci_cfg(F3, NBCTL, value);
2414 
2415 	amd64_read_pci_cfg(F3, NBCFG, &value);
2416 
2417 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2418 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2419 
2420 	if (!(value & NBCFG_ECC_ENABLE)) {
2421 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2422 
2423 		s->flags.nb_ecc_prev = 0;
2424 
2425 		/* Attempt to turn on DRAM ECC Enable */
2426 		value |= NBCFG_ECC_ENABLE;
2427 		amd64_write_pci_cfg(F3, NBCFG, value);
2428 
2429 		amd64_read_pci_cfg(F3, NBCFG, &value);
2430 
2431 		if (!(value & NBCFG_ECC_ENABLE)) {
2432 			amd64_warn("Hardware rejected DRAM ECC enable,"
2433 				   "check memory DIMM configuration.\n");
2434 			ret = false;
2435 		} else {
2436 			amd64_info("Hardware accepted DRAM ECC Enable\n");
2437 		}
2438 	} else {
2439 		s->flags.nb_ecc_prev = 1;
2440 	}
2441 
2442 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2443 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2444 
2445 	return ret;
2446 }
2447 
2448 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2449 					struct pci_dev *F3)
2450 {
2451 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2452 
2453 
2454 	if (!s->nbctl_valid)
2455 		return;
2456 
2457 	amd64_read_pci_cfg(F3, NBCTL, &value);
2458 	value &= ~mask;
2459 	value |= s->old_nbctl;
2460 
2461 	amd64_write_pci_cfg(F3, NBCTL, value);
2462 
2463 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2464 	if (!s->flags.nb_ecc_prev) {
2465 		amd64_read_pci_cfg(F3, NBCFG, &value);
2466 		value &= ~NBCFG_ECC_ENABLE;
2467 		amd64_write_pci_cfg(F3, NBCFG, value);
2468 	}
2469 
2470 	/* restore the NB Enable MCGCTL bit */
2471 	if (toggle_ecc_err_reporting(s, nid, OFF))
2472 		amd64_warn("Error restoring NB MCGCTL settings!\n");
2473 }
2474 
2475 /*
2476  * EDAC requires that the BIOS have ECC enabled before
2477  * taking over the processing of ECC errors. A command line
2478  * option allows to force-enable hardware ECC later in
2479  * enable_ecc_error_reporting().
2480  */
2481 static const char *ecc_msg =
2482 	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2483 	" Either enable ECC checking or force module loading by setting "
2484 	"'ecc_enable_override'.\n"
2485 	" (Note that use of the override may cause unknown side effects.)\n";
2486 
2487 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2488 {
2489 	u32 value;
2490 	u8 ecc_en = 0;
2491 	bool nb_mce_en = false;
2492 
2493 	amd64_read_pci_cfg(F3, NBCFG, &value);
2494 
2495 	ecc_en = !!(value & NBCFG_ECC_ENABLE);
2496 	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2497 
2498 	nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2499 	if (!nb_mce_en)
2500 		amd64_notice("NB MCE bank disabled, set MSR "
2501 			     "0x%08x[4] on node %d to enable.\n",
2502 			     MSR_IA32_MCG_CTL, nid);
2503 
2504 	if (!ecc_en || !nb_mce_en) {
2505 		amd64_notice("%s", ecc_msg);
2506 		return false;
2507 	}
2508 	return true;
2509 }
2510 
2511 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2512 {
2513 	struct amd64_pvt *pvt = mci->pvt_info;
2514 	int rc;
2515 
2516 	rc = amd64_create_sysfs_dbg_files(mci);
2517 	if (rc < 0)
2518 		return rc;
2519 
2520 	if (pvt->fam >= 0x10) {
2521 		rc = amd64_create_sysfs_inject_files(mci);
2522 		if (rc < 0)
2523 			return rc;
2524 	}
2525 
2526 	return 0;
2527 }
2528 
2529 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2530 {
2531 	struct amd64_pvt *pvt = mci->pvt_info;
2532 
2533 	amd64_remove_sysfs_dbg_files(mci);
2534 
2535 	if (pvt->fam >= 0x10)
2536 		amd64_remove_sysfs_inject_files(mci);
2537 }
2538 
2539 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2540 				 struct amd64_family_type *fam)
2541 {
2542 	struct amd64_pvt *pvt = mci->pvt_info;
2543 
2544 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2545 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
2546 
2547 	if (pvt->nbcap & NBCAP_SECDED)
2548 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2549 
2550 	if (pvt->nbcap & NBCAP_CHIPKILL)
2551 		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2552 
2553 	mci->edac_cap		= determine_edac_cap(pvt);
2554 	mci->mod_name		= EDAC_MOD_STR;
2555 	mci->mod_ver		= EDAC_AMD64_VERSION;
2556 	mci->ctl_name		= fam->ctl_name;
2557 	mci->dev_name		= pci_name(pvt->F2);
2558 	mci->ctl_page_to_phys	= NULL;
2559 
2560 	/* memory scrubber interface */
2561 	mci->set_sdram_scrub_rate = set_scrub_rate;
2562 	mci->get_sdram_scrub_rate = get_scrub_rate;
2563 }
2564 
2565 /*
2566  * returns a pointer to the family descriptor on success, NULL otherwise.
2567  */
2568 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2569 {
2570 	struct amd64_family_type *fam_type = NULL;
2571 
2572 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
2573 	pvt->stepping	= boot_cpu_data.x86_mask;
2574 	pvt->model	= boot_cpu_data.x86_model;
2575 	pvt->fam	= boot_cpu_data.x86;
2576 
2577 	switch (pvt->fam) {
2578 	case 0xf:
2579 		fam_type	= &family_types[K8_CPUS];
2580 		pvt->ops	= &family_types[K8_CPUS].ops;
2581 		break;
2582 
2583 	case 0x10:
2584 		fam_type	= &family_types[F10_CPUS];
2585 		pvt->ops	= &family_types[F10_CPUS].ops;
2586 		break;
2587 
2588 	case 0x15:
2589 		if (pvt->model == 0x30) {
2590 			fam_type = &family_types[F15_M30H_CPUS];
2591 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
2592 			break;
2593 		}
2594 
2595 		fam_type	= &family_types[F15_CPUS];
2596 		pvt->ops	= &family_types[F15_CPUS].ops;
2597 		break;
2598 
2599 	case 0x16:
2600 		if (pvt->model == 0x30) {
2601 			fam_type = &family_types[F16_M30H_CPUS];
2602 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
2603 			break;
2604 		}
2605 		fam_type	= &family_types[F16_CPUS];
2606 		pvt->ops	= &family_types[F16_CPUS].ops;
2607 		break;
2608 
2609 	default:
2610 		amd64_err("Unsupported family!\n");
2611 		return NULL;
2612 	}
2613 
2614 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2615 		     (pvt->fam == 0xf ?
2616 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
2617 							     : "revE or earlier ")
2618 				 : ""), pvt->mc_node_id);
2619 	return fam_type;
2620 }
2621 
2622 static int init_one_instance(struct pci_dev *F2)
2623 {
2624 	struct amd64_pvt *pvt = NULL;
2625 	struct amd64_family_type *fam_type = NULL;
2626 	struct mem_ctl_info *mci = NULL;
2627 	struct edac_mc_layer layers[2];
2628 	int err = 0, ret;
2629 	u16 nid = amd_get_node_id(F2);
2630 
2631 	ret = -ENOMEM;
2632 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2633 	if (!pvt)
2634 		goto err_ret;
2635 
2636 	pvt->mc_node_id	= nid;
2637 	pvt->F2 = F2;
2638 
2639 	ret = -EINVAL;
2640 	fam_type = per_family_init(pvt);
2641 	if (!fam_type)
2642 		goto err_free;
2643 
2644 	ret = -ENODEV;
2645 	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2646 	if (err)
2647 		goto err_free;
2648 
2649 	read_mc_regs(pvt);
2650 
2651 	/*
2652 	 * We need to determine how many memory channels there are. Then use
2653 	 * that information for calculating the size of the dynamic instance
2654 	 * tables in the 'mci' structure.
2655 	 */
2656 	ret = -EINVAL;
2657 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
2658 	if (pvt->channel_count < 0)
2659 		goto err_siblings;
2660 
2661 	ret = -ENOMEM;
2662 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2663 	layers[0].size = pvt->csels[0].b_cnt;
2664 	layers[0].is_virt_csrow = true;
2665 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
2666 
2667 	/*
2668 	 * Always allocate two channels since we can have setups with DIMMs on
2669 	 * only one channel. Also, this simplifies handling later for the price
2670 	 * of a couple of KBs tops.
2671 	 */
2672 	layers[1].size = 2;
2673 	layers[1].is_virt_csrow = false;
2674 
2675 	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2676 	if (!mci)
2677 		goto err_siblings;
2678 
2679 	mci->pvt_info = pvt;
2680 	mci->pdev = &pvt->F2->dev;
2681 
2682 	setup_mci_misc_attrs(mci, fam_type);
2683 
2684 	if (init_csrows(mci))
2685 		mci->edac_cap = EDAC_FLAG_NONE;
2686 
2687 	ret = -ENODEV;
2688 	if (edac_mc_add_mc(mci)) {
2689 		edac_dbg(1, "failed edac_mc_add_mc()\n");
2690 		goto err_add_mc;
2691 	}
2692 	if (set_mc_sysfs_attrs(mci)) {
2693 		edac_dbg(1, "failed edac_mc_add_mc()\n");
2694 		goto err_add_sysfs;
2695 	}
2696 
2697 	/* register stuff with EDAC MCE */
2698 	if (report_gart_errors)
2699 		amd_report_gart_errors(true);
2700 
2701 	amd_register_ecc_decoder(decode_bus_error);
2702 
2703 	mcis[nid] = mci;
2704 
2705 	atomic_inc(&drv_instances);
2706 
2707 	return 0;
2708 
2709 err_add_sysfs:
2710 	edac_mc_del_mc(mci->pdev);
2711 err_add_mc:
2712 	edac_mc_free(mci);
2713 
2714 err_siblings:
2715 	free_mc_sibling_devs(pvt);
2716 
2717 err_free:
2718 	kfree(pvt);
2719 
2720 err_ret:
2721 	return ret;
2722 }
2723 
2724 static int probe_one_instance(struct pci_dev *pdev,
2725 			      const struct pci_device_id *mc_type)
2726 {
2727 	u16 nid = amd_get_node_id(pdev);
2728 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2729 	struct ecc_settings *s;
2730 	int ret = 0;
2731 
2732 	ret = pci_enable_device(pdev);
2733 	if (ret < 0) {
2734 		edac_dbg(0, "ret=%d\n", ret);
2735 		return -EIO;
2736 	}
2737 
2738 	ret = -ENOMEM;
2739 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2740 	if (!s)
2741 		goto err_out;
2742 
2743 	ecc_stngs[nid] = s;
2744 
2745 	if (!ecc_enabled(F3, nid)) {
2746 		ret = -ENODEV;
2747 
2748 		if (!ecc_enable_override)
2749 			goto err_enable;
2750 
2751 		amd64_warn("Forcing ECC on!\n");
2752 
2753 		if (!enable_ecc_error_reporting(s, nid, F3))
2754 			goto err_enable;
2755 	}
2756 
2757 	ret = init_one_instance(pdev);
2758 	if (ret < 0) {
2759 		amd64_err("Error probing instance: %d\n", nid);
2760 		restore_ecc_error_reporting(s, nid, F3);
2761 	}
2762 
2763 	return ret;
2764 
2765 err_enable:
2766 	kfree(s);
2767 	ecc_stngs[nid] = NULL;
2768 
2769 err_out:
2770 	return ret;
2771 }
2772 
2773 static void remove_one_instance(struct pci_dev *pdev)
2774 {
2775 	struct mem_ctl_info *mci;
2776 	struct amd64_pvt *pvt;
2777 	u16 nid = amd_get_node_id(pdev);
2778 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2779 	struct ecc_settings *s = ecc_stngs[nid];
2780 
2781 	mci = find_mci_by_dev(&pdev->dev);
2782 	WARN_ON(!mci);
2783 
2784 	del_mc_sysfs_attrs(mci);
2785 	/* Remove from EDAC CORE tracking list */
2786 	mci = edac_mc_del_mc(&pdev->dev);
2787 	if (!mci)
2788 		return;
2789 
2790 	pvt = mci->pvt_info;
2791 
2792 	restore_ecc_error_reporting(s, nid, F3);
2793 
2794 	free_mc_sibling_devs(pvt);
2795 
2796 	/* unregister from EDAC MCE */
2797 	amd_report_gart_errors(false);
2798 	amd_unregister_ecc_decoder(decode_bus_error);
2799 
2800 	kfree(ecc_stngs[nid]);
2801 	ecc_stngs[nid] = NULL;
2802 
2803 	/* Free the EDAC CORE resources */
2804 	mci->pvt_info = NULL;
2805 	mcis[nid] = NULL;
2806 
2807 	kfree(pvt);
2808 	edac_mc_free(mci);
2809 }
2810 
2811 /*
2812  * This table is part of the interface for loading drivers for PCI devices. The
2813  * PCI core identifies what devices are on a system during boot, and then
2814  * inquiry this table to see if this driver is for a given device found.
2815  */
2816 static const struct pci_device_id amd64_pci_table[] = {
2817 	{
2818 		.vendor		= PCI_VENDOR_ID_AMD,
2819 		.device		= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2820 		.subvendor	= PCI_ANY_ID,
2821 		.subdevice	= PCI_ANY_ID,
2822 		.class		= 0,
2823 		.class_mask	= 0,
2824 	},
2825 	{
2826 		.vendor		= PCI_VENDOR_ID_AMD,
2827 		.device		= PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2828 		.subvendor	= PCI_ANY_ID,
2829 		.subdevice	= PCI_ANY_ID,
2830 		.class		= 0,
2831 		.class_mask	= 0,
2832 	},
2833 	{
2834 		.vendor		= PCI_VENDOR_ID_AMD,
2835 		.device		= PCI_DEVICE_ID_AMD_15H_NB_F2,
2836 		.subvendor	= PCI_ANY_ID,
2837 		.subdevice	= PCI_ANY_ID,
2838 		.class		= 0,
2839 		.class_mask	= 0,
2840 	},
2841 	{
2842 		.vendor		= PCI_VENDOR_ID_AMD,
2843 		.device		= PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2844 		.subvendor	= PCI_ANY_ID,
2845 		.subdevice	= PCI_ANY_ID,
2846 		.class		= 0,
2847 		.class_mask	= 0,
2848 	},
2849 	{
2850 		.vendor		= PCI_VENDOR_ID_AMD,
2851 		.device		= PCI_DEVICE_ID_AMD_16H_NB_F2,
2852 		.subvendor	= PCI_ANY_ID,
2853 		.subdevice	= PCI_ANY_ID,
2854 		.class		= 0,
2855 		.class_mask	= 0,
2856 	},
2857 	{
2858 		.vendor		= PCI_VENDOR_ID_AMD,
2859 		.device		= PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2860 		.subvendor	= PCI_ANY_ID,
2861 		.subdevice	= PCI_ANY_ID,
2862 		.class		= 0,
2863 		.class_mask	= 0,
2864 	},
2865 
2866 	{0, }
2867 };
2868 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2869 
2870 static struct pci_driver amd64_pci_driver = {
2871 	.name		= EDAC_MOD_STR,
2872 	.probe		= probe_one_instance,
2873 	.remove		= remove_one_instance,
2874 	.id_table	= amd64_pci_table,
2875 };
2876 
2877 static void setup_pci_device(void)
2878 {
2879 	struct mem_ctl_info *mci;
2880 	struct amd64_pvt *pvt;
2881 
2882 	if (pci_ctl)
2883 		return;
2884 
2885 	mci = mcis[0];
2886 	if (!mci)
2887 		return;
2888 
2889 	pvt = mci->pvt_info;
2890 	pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2891 	if (!pci_ctl) {
2892 		pr_warn("%s(): Unable to create PCI control\n", __func__);
2893 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2894 	}
2895 }
2896 
2897 static int __init amd64_edac_init(void)
2898 {
2899 	int err = -ENODEV;
2900 
2901 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2902 
2903 	opstate_init();
2904 
2905 	if (amd_cache_northbridges() < 0)
2906 		goto err_ret;
2907 
2908 	err = -ENOMEM;
2909 	mcis	  = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2910 	ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2911 	if (!(mcis && ecc_stngs))
2912 		goto err_free;
2913 
2914 	msrs = msrs_alloc();
2915 	if (!msrs)
2916 		goto err_free;
2917 
2918 	err = pci_register_driver(&amd64_pci_driver);
2919 	if (err)
2920 		goto err_pci;
2921 
2922 	err = -ENODEV;
2923 	if (!atomic_read(&drv_instances))
2924 		goto err_no_instances;
2925 
2926 	setup_pci_device();
2927 	return 0;
2928 
2929 err_no_instances:
2930 	pci_unregister_driver(&amd64_pci_driver);
2931 
2932 err_pci:
2933 	msrs_free(msrs);
2934 	msrs = NULL;
2935 
2936 err_free:
2937 	kfree(mcis);
2938 	mcis = NULL;
2939 
2940 	kfree(ecc_stngs);
2941 	ecc_stngs = NULL;
2942 
2943 err_ret:
2944 	return err;
2945 }
2946 
2947 static void __exit amd64_edac_exit(void)
2948 {
2949 	if (pci_ctl)
2950 		edac_pci_release_generic_ctl(pci_ctl);
2951 
2952 	pci_unregister_driver(&amd64_pci_driver);
2953 
2954 	kfree(ecc_stngs);
2955 	ecc_stngs = NULL;
2956 
2957 	kfree(mcis);
2958 	mcis = NULL;
2959 
2960 	msrs_free(msrs);
2961 	msrs = NULL;
2962 }
2963 
2964 module_init(amd64_edac_init);
2965 module_exit(amd64_edac_exit);
2966 
2967 MODULE_LICENSE("GPL");
2968 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2969 		"Dave Peterson, Thayne Harbaugh");
2970 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2971 		EDAC_AMD64_VERSION);
2972 
2973 module_param(edac_op_state, int, 0444);
2974 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2975