xref: /linux/drivers/edac/amd64_edac.c (revision f3a8b6645dc2e60d11f20c1c23afd964ff4e55ae)
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3 
4 static struct edac_pci_ctl_info *pci_ctl;
5 
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8 
9 /*
10  * Set by command line parameter. If BIOS has enabled the ECC, this override is
11  * cleared to prevent re-enabling the hardware by this driver.
12  */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15 
16 static struct msr __percpu *msrs;
17 
18 /* Per-node stuff */
19 static struct ecc_settings **ecc_stngs;
20 
21 /*
22  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
24  * or higher value'.
25  *
26  *FIXME: Produce a better mapping/linearisation.
27  */
28 static const struct scrubrate {
29        u32 scrubval;           /* bit pattern for scrub rate */
30        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
31 } scrubrates[] = {
32 	{ 0x01, 1600000000UL},
33 	{ 0x02, 800000000UL},
34 	{ 0x03, 400000000UL},
35 	{ 0x04, 200000000UL},
36 	{ 0x05, 100000000UL},
37 	{ 0x06, 50000000UL},
38 	{ 0x07, 25000000UL},
39 	{ 0x08, 12284069UL},
40 	{ 0x09, 6274509UL},
41 	{ 0x0A, 3121951UL},
42 	{ 0x0B, 1560975UL},
43 	{ 0x0C, 781440UL},
44 	{ 0x0D, 390720UL},
45 	{ 0x0E, 195300UL},
46 	{ 0x0F, 97650UL},
47 	{ 0x10, 48854UL},
48 	{ 0x11, 24427UL},
49 	{ 0x12, 12213UL},
50 	{ 0x13, 6101UL},
51 	{ 0x14, 3051UL},
52 	{ 0x15, 1523UL},
53 	{ 0x16, 761UL},
54 	{ 0x00, 0UL},        /* scrubbing off */
55 };
56 
57 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
58 			       u32 *val, const char *func)
59 {
60 	int err = 0;
61 
62 	err = pci_read_config_dword(pdev, offset, val);
63 	if (err)
64 		amd64_warn("%s: error reading F%dx%03x.\n",
65 			   func, PCI_FUNC(pdev->devfn), offset);
66 
67 	return err;
68 }
69 
70 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
71 				u32 val, const char *func)
72 {
73 	int err = 0;
74 
75 	err = pci_write_config_dword(pdev, offset, val);
76 	if (err)
77 		amd64_warn("%s: error writing to F%dx%03x.\n",
78 			   func, PCI_FUNC(pdev->devfn), offset);
79 
80 	return err;
81 }
82 
83 /*
84  * Select DCT to which PCI cfg accesses are routed
85  */
86 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
87 {
88 	u32 reg = 0;
89 
90 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
91 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
92 	reg |= dct;
93 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
94 }
95 
96 /*
97  *
98  * Depending on the family, F2 DCT reads need special handling:
99  *
100  * K8: has a single DCT only and no address offsets >= 0x100
101  *
102  * F10h: each DCT has its own set of regs
103  *	DCT0 -> F2x040..
104  *	DCT1 -> F2x140..
105  *
106  * F16h: has only 1 DCT
107  *
108  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
109  */
110 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
111 					 int offset, u32 *val)
112 {
113 	switch (pvt->fam) {
114 	case 0xf:
115 		if (dct || offset >= 0x100)
116 			return -EINVAL;
117 		break;
118 
119 	case 0x10:
120 		if (dct) {
121 			/*
122 			 * Note: If ganging is enabled, barring the regs
123 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
125 			 */
126 			if (dct_ganging_enabled(pvt))
127 				return 0;
128 
129 			offset += 0x100;
130 		}
131 		break;
132 
133 	case 0x15:
134 		/*
135 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 		 * We should select which DCT we access using F1x10C[DctCfgSel]
137 		 */
138 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
139 		f15h_select_dct(pvt, dct);
140 		break;
141 
142 	case 0x16:
143 		if (dct)
144 			return -EINVAL;
145 		break;
146 
147 	default:
148 		break;
149 	}
150 	return amd64_read_pci_cfg(pvt->F2, offset, val);
151 }
152 
153 /*
154  * Memory scrubber control interface. For K8, memory scrubbing is handled by
155  * hardware and can involve L2 cache, dcache as well as the main memory. With
156  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
157  * functionality.
158  *
159  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161  * bytes/sec for the setting.
162  *
163  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164  * other archs, we might not have access to the caches directly.
165  */
166 
167 /*
168  * scan the scrub rate mapping table for a close or matching bandwidth value to
169  * issue. If requested is too big, then use last maximum value found.
170  */
171 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
172 {
173 	u32 scrubval;
174 	int i;
175 
176 	/*
177 	 * map the configured rate (new_bw) to a value specific to the AMD64
178 	 * memory controller and apply to register. Search for the first
179 	 * bandwidth entry that is greater or equal than the setting requested
180 	 * and program that. If at last entry, turn off DRAM scrubbing.
181 	 *
182 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
183 	 * by falling back to the last element in scrubrates[].
184 	 */
185 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
186 		/*
187 		 * skip scrub rates which aren't recommended
188 		 * (see F10 BKDG, F3x58)
189 		 */
190 		if (scrubrates[i].scrubval < min_rate)
191 			continue;
192 
193 		if (scrubrates[i].bandwidth <= new_bw)
194 			break;
195 	}
196 
197 	scrubval = scrubrates[i].scrubval;
198 
199 	if (pvt->fam == 0x15 && pvt->model == 0x60) {
200 		f15h_select_dct(pvt, 0);
201 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
202 		f15h_select_dct(pvt, 1);
203 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
204 	} else {
205 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
206 	}
207 
208 	if (scrubval)
209 		return scrubrates[i].bandwidth;
210 
211 	return 0;
212 }
213 
214 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
215 {
216 	struct amd64_pvt *pvt = mci->pvt_info;
217 	u32 min_scrubrate = 0x5;
218 
219 	if (pvt->fam == 0xf)
220 		min_scrubrate = 0x0;
221 
222 	if (pvt->fam == 0x15) {
223 		/* Erratum #505 */
224 		if (pvt->model < 0x10)
225 			f15h_select_dct(pvt, 0);
226 
227 		if (pvt->model == 0x60)
228 			min_scrubrate = 0x6;
229 	}
230 	return __set_scrub_rate(pvt, bw, min_scrubrate);
231 }
232 
233 static int get_scrub_rate(struct mem_ctl_info *mci)
234 {
235 	struct amd64_pvt *pvt = mci->pvt_info;
236 	u32 scrubval = 0;
237 	int i, retval = -EINVAL;
238 
239 	if (pvt->fam == 0x15) {
240 		/* Erratum #505 */
241 		if (pvt->model < 0x10)
242 			f15h_select_dct(pvt, 0);
243 
244 		if (pvt->model == 0x60)
245 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
246 	} else
247 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
248 
249 	scrubval = scrubval & 0x001F;
250 
251 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
252 		if (scrubrates[i].scrubval == scrubval) {
253 			retval = scrubrates[i].bandwidth;
254 			break;
255 		}
256 	}
257 	return retval;
258 }
259 
260 /*
261  * returns true if the SysAddr given by sys_addr matches the
262  * DRAM base/limit associated with node_id
263  */
264 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
265 {
266 	u64 addr;
267 
268 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
269 	 * all ones if the most significant implemented address bit is 1.
270 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
271 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
272 	 * Application Programming.
273 	 */
274 	addr = sys_addr & 0x000000ffffffffffull;
275 
276 	return ((addr >= get_dram_base(pvt, nid)) &&
277 		(addr <= get_dram_limit(pvt, nid)));
278 }
279 
280 /*
281  * Attempt to map a SysAddr to a node. On success, return a pointer to the
282  * mem_ctl_info structure for the node that the SysAddr maps to.
283  *
284  * On failure, return NULL.
285  */
286 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
287 						u64 sys_addr)
288 {
289 	struct amd64_pvt *pvt;
290 	u8 node_id;
291 	u32 intlv_en, bits;
292 
293 	/*
294 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
295 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
296 	 */
297 	pvt = mci->pvt_info;
298 
299 	/*
300 	 * The value of this field should be the same for all DRAM Base
301 	 * registers.  Therefore we arbitrarily choose to read it from the
302 	 * register for node 0.
303 	 */
304 	intlv_en = dram_intlv_en(pvt, 0);
305 
306 	if (intlv_en == 0) {
307 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
308 			if (base_limit_match(pvt, sys_addr, node_id))
309 				goto found;
310 		}
311 		goto err_no_match;
312 	}
313 
314 	if (unlikely((intlv_en != 0x01) &&
315 		     (intlv_en != 0x03) &&
316 		     (intlv_en != 0x07))) {
317 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
318 		return NULL;
319 	}
320 
321 	bits = (((u32) sys_addr) >> 12) & intlv_en;
322 
323 	for (node_id = 0; ; ) {
324 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
325 			break;	/* intlv_sel field matches */
326 
327 		if (++node_id >= DRAM_RANGES)
328 			goto err_no_match;
329 	}
330 
331 	/* sanity test for sys_addr */
332 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
333 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
334 			   "range for node %d with node interleaving enabled.\n",
335 			   __func__, sys_addr, node_id);
336 		return NULL;
337 	}
338 
339 found:
340 	return edac_mc_find((int)node_id);
341 
342 err_no_match:
343 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
344 		 (unsigned long)sys_addr);
345 
346 	return NULL;
347 }
348 
349 /*
350  * compute the CS base address of the @csrow on the DRAM controller @dct.
351  * For details see F2x[5C:40] in the processor's BKDG
352  */
353 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
354 				 u64 *base, u64 *mask)
355 {
356 	u64 csbase, csmask, base_bits, mask_bits;
357 	u8 addr_shift;
358 
359 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
360 		csbase		= pvt->csels[dct].csbases[csrow];
361 		csmask		= pvt->csels[dct].csmasks[csrow];
362 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
363 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
364 		addr_shift	= 4;
365 
366 	/*
367 	 * F16h and F15h, models 30h and later need two addr_shift values:
368 	 * 8 for high and 6 for low (cf. F16h BKDG).
369 	 */
370 	} else if (pvt->fam == 0x16 ||
371 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
372 		csbase          = pvt->csels[dct].csbases[csrow];
373 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
374 
375 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
376 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
377 
378 		*mask = ~0ULL;
379 		/* poke holes for the csmask */
380 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
381 			   (GENMASK_ULL(30, 19) << 8));
382 
383 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
384 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
385 
386 		return;
387 	} else {
388 		csbase		= pvt->csels[dct].csbases[csrow];
389 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
390 		addr_shift	= 8;
391 
392 		if (pvt->fam == 0x15)
393 			base_bits = mask_bits =
394 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
395 		else
396 			base_bits = mask_bits =
397 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
398 	}
399 
400 	*base  = (csbase & base_bits) << addr_shift;
401 
402 	*mask  = ~0ULL;
403 	/* poke holes for the csmask */
404 	*mask &= ~(mask_bits << addr_shift);
405 	/* OR them in */
406 	*mask |= (csmask & mask_bits) << addr_shift;
407 }
408 
409 #define for_each_chip_select(i, dct, pvt) \
410 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
411 
412 #define chip_select_base(i, dct, pvt) \
413 	pvt->csels[dct].csbases[i]
414 
415 #define for_each_chip_select_mask(i, dct, pvt) \
416 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
417 
418 /*
419  * @input_addr is an InputAddr associated with the node given by mci. Return the
420  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
421  */
422 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
423 {
424 	struct amd64_pvt *pvt;
425 	int csrow;
426 	u64 base, mask;
427 
428 	pvt = mci->pvt_info;
429 
430 	for_each_chip_select(csrow, 0, pvt) {
431 		if (!csrow_enabled(csrow, 0, pvt))
432 			continue;
433 
434 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
435 
436 		mask = ~mask;
437 
438 		if ((input_addr & mask) == (base & mask)) {
439 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
440 				 (unsigned long)input_addr, csrow,
441 				 pvt->mc_node_id);
442 
443 			return csrow;
444 		}
445 	}
446 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
447 		 (unsigned long)input_addr, pvt->mc_node_id);
448 
449 	return -1;
450 }
451 
452 /*
453  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
454  * for the node represented by mci. Info is passed back in *hole_base,
455  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
456  * info is invalid. Info may be invalid for either of the following reasons:
457  *
458  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
459  *   Address Register does not exist.
460  *
461  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
462  *   indicating that its contents are not valid.
463  *
464  * The values passed back in *hole_base, *hole_offset, and *hole_size are
465  * complete 32-bit values despite the fact that the bitfields in the DHAR
466  * only represent bits 31-24 of the base and offset values.
467  */
468 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
469 			     u64 *hole_offset, u64 *hole_size)
470 {
471 	struct amd64_pvt *pvt = mci->pvt_info;
472 
473 	/* only revE and later have the DRAM Hole Address Register */
474 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
475 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
476 			 pvt->ext_model, pvt->mc_node_id);
477 		return 1;
478 	}
479 
480 	/* valid for Fam10h and above */
481 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
482 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
483 		return 1;
484 	}
485 
486 	if (!dhar_valid(pvt)) {
487 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
488 			 pvt->mc_node_id);
489 		return 1;
490 	}
491 
492 	/* This node has Memory Hoisting */
493 
494 	/* +------------------+--------------------+--------------------+-----
495 	 * | memory           | DRAM hole          | relocated          |
496 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
497 	 * |                  |                    | DRAM hole          |
498 	 * |                  |                    | [0x100000000,      |
499 	 * |                  |                    |  (0x100000000+     |
500 	 * |                  |                    |   (0xffffffff-x))] |
501 	 * +------------------+--------------------+--------------------+-----
502 	 *
503 	 * Above is a diagram of physical memory showing the DRAM hole and the
504 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
505 	 * starts at address x (the base address) and extends through address
506 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
507 	 * addresses in the hole so that they start at 0x100000000.
508 	 */
509 
510 	*hole_base = dhar_base(pvt);
511 	*hole_size = (1ULL << 32) - *hole_base;
512 
513 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
514 					: k8_dhar_offset(pvt);
515 
516 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
517 		 pvt->mc_node_id, (unsigned long)*hole_base,
518 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
519 
520 	return 0;
521 }
522 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
523 
524 /*
525  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
526  * assumed that sys_addr maps to the node given by mci.
527  *
528  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
529  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
530  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
531  * then it is also involved in translating a SysAddr to a DramAddr. Sections
532  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
533  * These parts of the documentation are unclear. I interpret them as follows:
534  *
535  * When node n receives a SysAddr, it processes the SysAddr as follows:
536  *
537  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
538  *    Limit registers for node n. If the SysAddr is not within the range
539  *    specified by the base and limit values, then node n ignores the Sysaddr
540  *    (since it does not map to node n). Otherwise continue to step 2 below.
541  *
542  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
543  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
544  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
545  *    hole. If not, skip to step 3 below. Else get the value of the
546  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
547  *    offset defined by this value from the SysAddr.
548  *
549  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
550  *    Base register for node n. To obtain the DramAddr, subtract the base
551  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
552  */
553 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
554 {
555 	struct amd64_pvt *pvt = mci->pvt_info;
556 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
557 	int ret;
558 
559 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
560 
561 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
562 				      &hole_size);
563 	if (!ret) {
564 		if ((sys_addr >= (1ULL << 32)) &&
565 		    (sys_addr < ((1ULL << 32) + hole_size))) {
566 			/* use DHAR to translate SysAddr to DramAddr */
567 			dram_addr = sys_addr - hole_offset;
568 
569 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
570 				 (unsigned long)sys_addr,
571 				 (unsigned long)dram_addr);
572 
573 			return dram_addr;
574 		}
575 	}
576 
577 	/*
578 	 * Translate the SysAddr to a DramAddr as shown near the start of
579 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
580 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
581 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
582 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
583 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
584 	 * Programmer's Manual Volume 1 Application Programming.
585 	 */
586 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
587 
588 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
589 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
590 	return dram_addr;
591 }
592 
593 /*
594  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
595  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
596  * for node interleaving.
597  */
598 static int num_node_interleave_bits(unsigned intlv_en)
599 {
600 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
601 	int n;
602 
603 	BUG_ON(intlv_en > 7);
604 	n = intlv_shift_table[intlv_en];
605 	return n;
606 }
607 
608 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
609 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
610 {
611 	struct amd64_pvt *pvt;
612 	int intlv_shift;
613 	u64 input_addr;
614 
615 	pvt = mci->pvt_info;
616 
617 	/*
618 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
619 	 * concerning translating a DramAddr to an InputAddr.
620 	 */
621 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
622 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
623 		      (dram_addr & 0xfff);
624 
625 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
626 		 intlv_shift, (unsigned long)dram_addr,
627 		 (unsigned long)input_addr);
628 
629 	return input_addr;
630 }
631 
632 /*
633  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
634  * assumed that @sys_addr maps to the node given by mci.
635  */
636 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
637 {
638 	u64 input_addr;
639 
640 	input_addr =
641 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
642 
643 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
644 		 (unsigned long)sys_addr, (unsigned long)input_addr);
645 
646 	return input_addr;
647 }
648 
649 /* Map the Error address to a PAGE and PAGE OFFSET. */
650 static inline void error_address_to_page_and_offset(u64 error_address,
651 						    struct err_info *err)
652 {
653 	err->page = (u32) (error_address >> PAGE_SHIFT);
654 	err->offset = ((u32) error_address) & ~PAGE_MASK;
655 }
656 
657 /*
658  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
659  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
660  * of a node that detected an ECC memory error.  mci represents the node that
661  * the error address maps to (possibly different from the node that detected
662  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
663  * error.
664  */
665 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
666 {
667 	int csrow;
668 
669 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
670 
671 	if (csrow == -1)
672 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
673 				  "address 0x%lx\n", (unsigned long)sys_addr);
674 	return csrow;
675 }
676 
677 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
678 
679 /*
680  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
681  * are ECC capable.
682  */
683 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
684 {
685 	u8 bit;
686 	unsigned long edac_cap = EDAC_FLAG_NONE;
687 
688 	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
689 		? 19
690 		: 17;
691 
692 	if (pvt->dclr0 & BIT(bit))
693 		edac_cap = EDAC_FLAG_SECDED;
694 
695 	return edac_cap;
696 }
697 
698 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
699 
700 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
701 {
702 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
703 
704 	if (pvt->dram_type == MEM_LRDDR3) {
705 		u32 dcsm = pvt->csels[chan].csmasks[0];
706 		/*
707 		 * It's assumed all LRDIMMs in a DCT are going to be of
708 		 * same 'type' until proven otherwise. So, use a cs
709 		 * value of '0' here to get dcsm value.
710 		 */
711 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
712 	}
713 
714 	edac_dbg(1, "All DIMMs support ECC:%s\n",
715 		    (dclr & BIT(19)) ? "yes" : "no");
716 
717 
718 	edac_dbg(1, "  PAR/ERR parity: %s\n",
719 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
720 
721 	if (pvt->fam == 0x10)
722 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
723 			 (dclr & BIT(11)) ?  "128b" : "64b");
724 
725 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
726 		 (dclr & BIT(12)) ?  "yes" : "no",
727 		 (dclr & BIT(13)) ?  "yes" : "no",
728 		 (dclr & BIT(14)) ?  "yes" : "no",
729 		 (dclr & BIT(15)) ?  "yes" : "no");
730 }
731 
732 /* Display and decode various NB registers for debug purposes. */
733 static void dump_misc_regs(struct amd64_pvt *pvt)
734 {
735 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
736 
737 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
738 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
739 
740 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
741 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
742 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
743 
744 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
745 
746 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
747 
748 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
749 		 pvt->dhar, dhar_base(pvt),
750 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
751 				   : f10_dhar_offset(pvt));
752 
753 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
754 
755 	debug_display_dimm_sizes(pvt, 0);
756 
757 	/* everything below this point is Fam10h and above */
758 	if (pvt->fam == 0xf)
759 		return;
760 
761 	debug_display_dimm_sizes(pvt, 1);
762 
763 	amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
764 
765 	/* Only if NOT ganged does dclr1 have valid info */
766 	if (!dct_ganging_enabled(pvt))
767 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
768 }
769 
770 /*
771  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
772  */
773 static void prep_chip_selects(struct amd64_pvt *pvt)
774 {
775 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
776 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
777 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
778 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
779 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
780 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
781 	} else {
782 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
783 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
784 	}
785 }
786 
787 /*
788  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
789  */
790 static void read_dct_base_mask(struct amd64_pvt *pvt)
791 {
792 	int cs;
793 
794 	prep_chip_selects(pvt);
795 
796 	for_each_chip_select(cs, 0, pvt) {
797 		int reg0   = DCSB0 + (cs * 4);
798 		int reg1   = DCSB1 + (cs * 4);
799 		u32 *base0 = &pvt->csels[0].csbases[cs];
800 		u32 *base1 = &pvt->csels[1].csbases[cs];
801 
802 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
803 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
804 				 cs, *base0, reg0);
805 
806 		if (pvt->fam == 0xf)
807 			continue;
808 
809 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
810 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
811 				 cs, *base1, (pvt->fam == 0x10) ? reg1
812 								: reg0);
813 	}
814 
815 	for_each_chip_select_mask(cs, 0, pvt) {
816 		int reg0   = DCSM0 + (cs * 4);
817 		int reg1   = DCSM1 + (cs * 4);
818 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
819 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
820 
821 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
822 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
823 				 cs, *mask0, reg0);
824 
825 		if (pvt->fam == 0xf)
826 			continue;
827 
828 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
829 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
830 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
831 								: reg0);
832 	}
833 }
834 
835 static void determine_memory_type(struct amd64_pvt *pvt)
836 {
837 	u32 dram_ctrl, dcsm;
838 
839 	switch (pvt->fam) {
840 	case 0xf:
841 		if (pvt->ext_model >= K8_REV_F)
842 			goto ddr3;
843 
844 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
845 		return;
846 
847 	case 0x10:
848 		if (pvt->dchr0 & DDR3_MODE)
849 			goto ddr3;
850 
851 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
852 		return;
853 
854 	case 0x15:
855 		if (pvt->model < 0x60)
856 			goto ddr3;
857 
858 		/*
859 		 * Model 0x60h needs special handling:
860 		 *
861 		 * We use a Chip Select value of '0' to obtain dcsm.
862 		 * Theoretically, it is possible to populate LRDIMMs of different
863 		 * 'Rank' value on a DCT. But this is not the common case. So,
864 		 * it's reasonable to assume all DIMMs are going to be of same
865 		 * 'type' until proven otherwise.
866 		 */
867 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
868 		dcsm = pvt->csels[0].csmasks[0];
869 
870 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
871 			pvt->dram_type = MEM_DDR4;
872 		else if (pvt->dclr0 & BIT(16))
873 			pvt->dram_type = MEM_DDR3;
874 		else if (dcsm & 0x3)
875 			pvt->dram_type = MEM_LRDDR3;
876 		else
877 			pvt->dram_type = MEM_RDDR3;
878 
879 		return;
880 
881 	case 0x16:
882 		goto ddr3;
883 
884 	default:
885 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
886 		pvt->dram_type = MEM_EMPTY;
887 	}
888 	return;
889 
890 ddr3:
891 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
892 }
893 
894 /* Get the number of DCT channels the memory controller is using. */
895 static int k8_early_channel_count(struct amd64_pvt *pvt)
896 {
897 	int flag;
898 
899 	if (pvt->ext_model >= K8_REV_F)
900 		/* RevF (NPT) and later */
901 		flag = pvt->dclr0 & WIDTH_128;
902 	else
903 		/* RevE and earlier */
904 		flag = pvt->dclr0 & REVE_WIDTH_128;
905 
906 	/* not used */
907 	pvt->dclr1 = 0;
908 
909 	return (flag) ? 2 : 1;
910 }
911 
912 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
913 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
914 {
915 	u16 mce_nid = amd_get_nb_id(m->extcpu);
916 	struct mem_ctl_info *mci;
917 	u8 start_bit = 1;
918 	u8 end_bit   = 47;
919 	u64 addr;
920 
921 	mci = edac_mc_find(mce_nid);
922 	if (!mci)
923 		return 0;
924 
925 	pvt = mci->pvt_info;
926 
927 	if (pvt->fam == 0xf) {
928 		start_bit = 3;
929 		end_bit   = 39;
930 	}
931 
932 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
933 
934 	/*
935 	 * Erratum 637 workaround
936 	 */
937 	if (pvt->fam == 0x15) {
938 		u64 cc6_base, tmp_addr;
939 		u32 tmp;
940 		u8 intlv_en;
941 
942 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
943 			return addr;
944 
945 
946 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
947 		intlv_en = tmp >> 21 & 0x7;
948 
949 		/* add [47:27] + 3 trailing bits */
950 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
951 
952 		/* reverse and add DramIntlvEn */
953 		cc6_base |= intlv_en ^ 0x7;
954 
955 		/* pin at [47:24] */
956 		cc6_base <<= 24;
957 
958 		if (!intlv_en)
959 			return cc6_base | (addr & GENMASK_ULL(23, 0));
960 
961 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
962 
963 							/* faster log2 */
964 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
965 
966 		/* OR DramIntlvSel into bits [14:12] */
967 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
968 
969 		/* add remaining [11:0] bits from original MC4_ADDR */
970 		tmp_addr |= addr & GENMASK_ULL(11, 0);
971 
972 		return cc6_base | tmp_addr;
973 	}
974 
975 	return addr;
976 }
977 
978 static struct pci_dev *pci_get_related_function(unsigned int vendor,
979 						unsigned int device,
980 						struct pci_dev *related)
981 {
982 	struct pci_dev *dev = NULL;
983 
984 	while ((dev = pci_get_device(vendor, device, dev))) {
985 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
986 		    (dev->bus->number == related->bus->number) &&
987 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
988 			break;
989 	}
990 
991 	return dev;
992 }
993 
994 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
995 {
996 	struct amd_northbridge *nb;
997 	struct pci_dev *f1 = NULL;
998 	unsigned int pci_func;
999 	int off = range << 3;
1000 	u32 llim;
1001 
1002 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1003 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1004 
1005 	if (pvt->fam == 0xf)
1006 		return;
1007 
1008 	if (!dram_rw(pvt, range))
1009 		return;
1010 
1011 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1012 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1013 
1014 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1015 	if (pvt->fam != 0x15)
1016 		return;
1017 
1018 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1019 	if (WARN_ON(!nb))
1020 		return;
1021 
1022 	if (pvt->model == 0x60)
1023 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1024 	else if (pvt->model == 0x30)
1025 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1026 	else
1027 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1028 
1029 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1030 	if (WARN_ON(!f1))
1031 		return;
1032 
1033 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1034 
1035 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1036 
1037 				    /* {[39:27],111b} */
1038 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1039 
1040 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1041 
1042 				    /* [47:40] */
1043 	pvt->ranges[range].lim.hi |= llim >> 13;
1044 
1045 	pci_dev_put(f1);
1046 }
1047 
1048 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1049 				    struct err_info *err)
1050 {
1051 	struct amd64_pvt *pvt = mci->pvt_info;
1052 
1053 	error_address_to_page_and_offset(sys_addr, err);
1054 
1055 	/*
1056 	 * Find out which node the error address belongs to. This may be
1057 	 * different from the node that detected the error.
1058 	 */
1059 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1060 	if (!err->src_mci) {
1061 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1062 			     (unsigned long)sys_addr);
1063 		err->err_code = ERR_NODE;
1064 		return;
1065 	}
1066 
1067 	/* Now map the sys_addr to a CSROW */
1068 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1069 	if (err->csrow < 0) {
1070 		err->err_code = ERR_CSROW;
1071 		return;
1072 	}
1073 
1074 	/* CHIPKILL enabled */
1075 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1076 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1077 		if (err->channel < 0) {
1078 			/*
1079 			 * Syndrome didn't map, so we don't know which of the
1080 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1081 			 * as suspect.
1082 			 */
1083 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1084 				      "possible error reporting race\n",
1085 				      err->syndrome);
1086 			err->err_code = ERR_CHANNEL;
1087 			return;
1088 		}
1089 	} else {
1090 		/*
1091 		 * non-chipkill ecc mode
1092 		 *
1093 		 * The k8 documentation is unclear about how to determine the
1094 		 * channel number when using non-chipkill memory.  This method
1095 		 * was obtained from email communication with someone at AMD.
1096 		 * (Wish the email was placed in this comment - norsk)
1097 		 */
1098 		err->channel = ((sys_addr & BIT(3)) != 0);
1099 	}
1100 }
1101 
1102 static int ddr2_cs_size(unsigned i, bool dct_width)
1103 {
1104 	unsigned shift = 0;
1105 
1106 	if (i <= 2)
1107 		shift = i;
1108 	else if (!(i & 0x1))
1109 		shift = i >> 1;
1110 	else
1111 		shift = (i + 1) >> 1;
1112 
1113 	return 128 << (shift + !!dct_width);
1114 }
1115 
1116 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1117 				  unsigned cs_mode, int cs_mask_nr)
1118 {
1119 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1120 
1121 	if (pvt->ext_model >= K8_REV_F) {
1122 		WARN_ON(cs_mode > 11);
1123 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1124 	}
1125 	else if (pvt->ext_model >= K8_REV_D) {
1126 		unsigned diff;
1127 		WARN_ON(cs_mode > 10);
1128 
1129 		/*
1130 		 * the below calculation, besides trying to win an obfuscated C
1131 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1132 		 * mappings are:
1133 		 *
1134 		 * cs_mode	CS size (mb)
1135 		 * =======	============
1136 		 * 0		32
1137 		 * 1		64
1138 		 * 2		128
1139 		 * 3		128
1140 		 * 4		256
1141 		 * 5		512
1142 		 * 6		256
1143 		 * 7		512
1144 		 * 8		1024
1145 		 * 9		1024
1146 		 * 10		2048
1147 		 *
1148 		 * Basically, it calculates a value with which to shift the
1149 		 * smallest CS size of 32MB.
1150 		 *
1151 		 * ddr[23]_cs_size have a similar purpose.
1152 		 */
1153 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1154 
1155 		return 32 << (cs_mode - diff);
1156 	}
1157 	else {
1158 		WARN_ON(cs_mode > 6);
1159 		return 32 << cs_mode;
1160 	}
1161 }
1162 
1163 /*
1164  * Get the number of DCT channels in use.
1165  *
1166  * Return:
1167  *	number of Memory Channels in operation
1168  * Pass back:
1169  *	contents of the DCL0_LOW register
1170  */
1171 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1172 {
1173 	int i, j, channels = 0;
1174 
1175 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1176 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1177 		return 2;
1178 
1179 	/*
1180 	 * Need to check if in unganged mode: In such, there are 2 channels,
1181 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1182 	 * bit will be OFF.
1183 	 *
1184 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1185 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1186 	 */
1187 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1188 
1189 	/*
1190 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1191 	 * is more than just one DIMM present in unganged mode. Need to check
1192 	 * both controllers since DIMMs can be placed in either one.
1193 	 */
1194 	for (i = 0; i < 2; i++) {
1195 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1196 
1197 		for (j = 0; j < 4; j++) {
1198 			if (DBAM_DIMM(j, dbam) > 0) {
1199 				channels++;
1200 				break;
1201 			}
1202 		}
1203 	}
1204 
1205 	if (channels > 2)
1206 		channels = 2;
1207 
1208 	amd64_info("MCT channel count: %d\n", channels);
1209 
1210 	return channels;
1211 }
1212 
1213 static int ddr3_cs_size(unsigned i, bool dct_width)
1214 {
1215 	unsigned shift = 0;
1216 	int cs_size = 0;
1217 
1218 	if (i == 0 || i == 3 || i == 4)
1219 		cs_size = -1;
1220 	else if (i <= 2)
1221 		shift = i;
1222 	else if (i == 12)
1223 		shift = 7;
1224 	else if (!(i & 0x1))
1225 		shift = i >> 1;
1226 	else
1227 		shift = (i + 1) >> 1;
1228 
1229 	if (cs_size != -1)
1230 		cs_size = (128 * (1 << !!dct_width)) << shift;
1231 
1232 	return cs_size;
1233 }
1234 
1235 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1236 {
1237 	unsigned shift = 0;
1238 	int cs_size = 0;
1239 
1240 	if (i < 4 || i == 6)
1241 		cs_size = -1;
1242 	else if (i == 12)
1243 		shift = 7;
1244 	else if (!(i & 0x1))
1245 		shift = i >> 1;
1246 	else
1247 		shift = (i + 1) >> 1;
1248 
1249 	if (cs_size != -1)
1250 		cs_size = rank_multiply * (128 << shift);
1251 
1252 	return cs_size;
1253 }
1254 
1255 static int ddr4_cs_size(unsigned i)
1256 {
1257 	int cs_size = 0;
1258 
1259 	if (i == 0)
1260 		cs_size = -1;
1261 	else if (i == 1)
1262 		cs_size = 1024;
1263 	else
1264 		/* Min cs_size = 1G */
1265 		cs_size = 1024 * (1 << (i >> 1));
1266 
1267 	return cs_size;
1268 }
1269 
1270 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1271 				   unsigned cs_mode, int cs_mask_nr)
1272 {
1273 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1274 
1275 	WARN_ON(cs_mode > 11);
1276 
1277 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1278 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1279 	else
1280 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1281 }
1282 
1283 /*
1284  * F15h supports only 64bit DCT interfaces
1285  */
1286 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1287 				   unsigned cs_mode, int cs_mask_nr)
1288 {
1289 	WARN_ON(cs_mode > 12);
1290 
1291 	return ddr3_cs_size(cs_mode, false);
1292 }
1293 
1294 /* F15h M60h supports DDR4 mapping as well.. */
1295 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1296 					unsigned cs_mode, int cs_mask_nr)
1297 {
1298 	int cs_size;
1299 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1300 
1301 	WARN_ON(cs_mode > 12);
1302 
1303 	if (pvt->dram_type == MEM_DDR4) {
1304 		if (cs_mode > 9)
1305 			return -1;
1306 
1307 		cs_size = ddr4_cs_size(cs_mode);
1308 	} else if (pvt->dram_type == MEM_LRDDR3) {
1309 		unsigned rank_multiply = dcsm & 0xf;
1310 
1311 		if (rank_multiply == 3)
1312 			rank_multiply = 4;
1313 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1314 	} else {
1315 		/* Minimum cs size is 512mb for F15hM60h*/
1316 		if (cs_mode == 0x1)
1317 			return -1;
1318 
1319 		cs_size = ddr3_cs_size(cs_mode, false);
1320 	}
1321 
1322 	return cs_size;
1323 }
1324 
1325 /*
1326  * F16h and F15h model 30h have only limited cs_modes.
1327  */
1328 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1329 				unsigned cs_mode, int cs_mask_nr)
1330 {
1331 	WARN_ON(cs_mode > 12);
1332 
1333 	if (cs_mode == 6 || cs_mode == 8 ||
1334 	    cs_mode == 9 || cs_mode == 12)
1335 		return -1;
1336 	else
1337 		return ddr3_cs_size(cs_mode, false);
1338 }
1339 
1340 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1341 {
1342 
1343 	if (pvt->fam == 0xf)
1344 		return;
1345 
1346 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1347 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1348 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1349 
1350 		edac_dbg(0, "  DCTs operate in %s mode\n",
1351 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1352 
1353 		if (!dct_ganging_enabled(pvt))
1354 			edac_dbg(0, "  Address range split per DCT: %s\n",
1355 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1356 
1357 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1358 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1359 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1360 
1361 		edac_dbg(0, "  channel interleave: %s, "
1362 			 "interleave bits selector: 0x%x\n",
1363 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1364 			 dct_sel_interleave_addr(pvt));
1365 	}
1366 
1367 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1368 }
1369 
1370 /*
1371  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1372  * 2.10.12 Memory Interleaving Modes).
1373  */
1374 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1375 				     u8 intlv_en, int num_dcts_intlv,
1376 				     u32 dct_sel)
1377 {
1378 	u8 channel = 0;
1379 	u8 select;
1380 
1381 	if (!(intlv_en))
1382 		return (u8)(dct_sel);
1383 
1384 	if (num_dcts_intlv == 2) {
1385 		select = (sys_addr >> 8) & 0x3;
1386 		channel = select ? 0x3 : 0;
1387 	} else if (num_dcts_intlv == 4) {
1388 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1389 		switch (intlv_addr) {
1390 		case 0x4:
1391 			channel = (sys_addr >> 8) & 0x3;
1392 			break;
1393 		case 0x5:
1394 			channel = (sys_addr >> 9) & 0x3;
1395 			break;
1396 		}
1397 	}
1398 	return channel;
1399 }
1400 
1401 /*
1402  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1403  * Interleaving Modes.
1404  */
1405 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1406 				bool hi_range_sel, u8 intlv_en)
1407 {
1408 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1409 
1410 	if (dct_ganging_enabled(pvt))
1411 		return 0;
1412 
1413 	if (hi_range_sel)
1414 		return dct_sel_high;
1415 
1416 	/*
1417 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1418 	 */
1419 	if (dct_interleave_enabled(pvt)) {
1420 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1421 
1422 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1423 		if (!intlv_addr)
1424 			return sys_addr >> 6 & 1;
1425 
1426 		if (intlv_addr & 0x2) {
1427 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1428 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1429 
1430 			return ((sys_addr >> shift) & 1) ^ temp;
1431 		}
1432 
1433 		if (intlv_addr & 0x4) {
1434 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
1435 
1436 			return (sys_addr >> shift) & 1;
1437 		}
1438 
1439 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1440 	}
1441 
1442 	if (dct_high_range_enabled(pvt))
1443 		return ~dct_sel_high & 1;
1444 
1445 	return 0;
1446 }
1447 
1448 /* Convert the sys_addr to the normalized DCT address */
1449 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1450 				 u64 sys_addr, bool hi_rng,
1451 				 u32 dct_sel_base_addr)
1452 {
1453 	u64 chan_off;
1454 	u64 dram_base		= get_dram_base(pvt, range);
1455 	u64 hole_off		= f10_dhar_offset(pvt);
1456 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1457 
1458 	if (hi_rng) {
1459 		/*
1460 		 * if
1461 		 * base address of high range is below 4Gb
1462 		 * (bits [47:27] at [31:11])
1463 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1464 		 * sys_addr > 4Gb
1465 		 *
1466 		 *	remove hole offset from sys_addr
1467 		 * else
1468 		 *	remove high range offset from sys_addr
1469 		 */
1470 		if ((!(dct_sel_base_addr >> 16) ||
1471 		     dct_sel_base_addr < dhar_base(pvt)) &&
1472 		    dhar_valid(pvt) &&
1473 		    (sys_addr >= BIT_64(32)))
1474 			chan_off = hole_off;
1475 		else
1476 			chan_off = dct_sel_base_off;
1477 	} else {
1478 		/*
1479 		 * if
1480 		 * we have a valid hole		&&
1481 		 * sys_addr > 4Gb
1482 		 *
1483 		 *	remove hole
1484 		 * else
1485 		 *	remove dram base to normalize to DCT address
1486 		 */
1487 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1488 			chan_off = hole_off;
1489 		else
1490 			chan_off = dram_base;
1491 	}
1492 
1493 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1494 }
1495 
1496 /*
1497  * checks if the csrow passed in is marked as SPARED, if so returns the new
1498  * spare row
1499  */
1500 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1501 {
1502 	int tmp_cs;
1503 
1504 	if (online_spare_swap_done(pvt, dct) &&
1505 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1506 
1507 		for_each_chip_select(tmp_cs, dct, pvt) {
1508 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1509 				csrow = tmp_cs;
1510 				break;
1511 			}
1512 		}
1513 	}
1514 	return csrow;
1515 }
1516 
1517 /*
1518  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1519  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1520  *
1521  * Return:
1522  *	-EINVAL:  NOT FOUND
1523  *	0..csrow = Chip-Select Row
1524  */
1525 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1526 {
1527 	struct mem_ctl_info *mci;
1528 	struct amd64_pvt *pvt;
1529 	u64 cs_base, cs_mask;
1530 	int cs_found = -EINVAL;
1531 	int csrow;
1532 
1533 	mci = edac_mc_find(nid);
1534 	if (!mci)
1535 		return cs_found;
1536 
1537 	pvt = mci->pvt_info;
1538 
1539 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1540 
1541 	for_each_chip_select(csrow, dct, pvt) {
1542 		if (!csrow_enabled(csrow, dct, pvt))
1543 			continue;
1544 
1545 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1546 
1547 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1548 			 csrow, cs_base, cs_mask);
1549 
1550 		cs_mask = ~cs_mask;
1551 
1552 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1553 			 (in_addr & cs_mask), (cs_base & cs_mask));
1554 
1555 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1556 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1557 				cs_found =  csrow;
1558 				break;
1559 			}
1560 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1561 
1562 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1563 			break;
1564 		}
1565 	}
1566 	return cs_found;
1567 }
1568 
1569 /*
1570  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1571  * swapped with a region located at the bottom of memory so that the GPU can use
1572  * the interleaved region and thus two channels.
1573  */
1574 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1575 {
1576 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1577 
1578 	if (pvt->fam == 0x10) {
1579 		/* only revC3 and revE have that feature */
1580 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1581 			return sys_addr;
1582 	}
1583 
1584 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1585 
1586 	if (!(swap_reg & 0x1))
1587 		return sys_addr;
1588 
1589 	swap_base	= (swap_reg >> 3) & 0x7f;
1590 	swap_limit	= (swap_reg >> 11) & 0x7f;
1591 	rgn_size	= (swap_reg >> 20) & 0x7f;
1592 	tmp_addr	= sys_addr >> 27;
1593 
1594 	if (!(sys_addr >> 34) &&
1595 	    (((tmp_addr >= swap_base) &&
1596 	     (tmp_addr <= swap_limit)) ||
1597 	     (tmp_addr < rgn_size)))
1598 		return sys_addr ^ (u64)swap_base << 27;
1599 
1600 	return sys_addr;
1601 }
1602 
1603 /* For a given @dram_range, check if @sys_addr falls within it. */
1604 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1605 				  u64 sys_addr, int *chan_sel)
1606 {
1607 	int cs_found = -EINVAL;
1608 	u64 chan_addr;
1609 	u32 dct_sel_base;
1610 	u8 channel;
1611 	bool high_range = false;
1612 
1613 	u8 node_id    = dram_dst_node(pvt, range);
1614 	u8 intlv_en   = dram_intlv_en(pvt, range);
1615 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1616 
1617 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1618 		 range, sys_addr, get_dram_limit(pvt, range));
1619 
1620 	if (dhar_valid(pvt) &&
1621 	    dhar_base(pvt) <= sys_addr &&
1622 	    sys_addr < BIT_64(32)) {
1623 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1624 			    sys_addr);
1625 		return -EINVAL;
1626 	}
1627 
1628 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1629 		return -EINVAL;
1630 
1631 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1632 
1633 	dct_sel_base = dct_sel_baseaddr(pvt);
1634 
1635 	/*
1636 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1637 	 * select between DCT0 and DCT1.
1638 	 */
1639 	if (dct_high_range_enabled(pvt) &&
1640 	   !dct_ganging_enabled(pvt) &&
1641 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1642 		high_range = true;
1643 
1644 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1645 
1646 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1647 					  high_range, dct_sel_base);
1648 
1649 	/* Remove node interleaving, see F1x120 */
1650 	if (intlv_en)
1651 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1652 			    (chan_addr & 0xfff);
1653 
1654 	/* remove channel interleave */
1655 	if (dct_interleave_enabled(pvt) &&
1656 	   !dct_high_range_enabled(pvt) &&
1657 	   !dct_ganging_enabled(pvt)) {
1658 
1659 		if (dct_sel_interleave_addr(pvt) != 1) {
1660 			if (dct_sel_interleave_addr(pvt) == 0x3)
1661 				/* hash 9 */
1662 				chan_addr = ((chan_addr >> 10) << 9) |
1663 					     (chan_addr & 0x1ff);
1664 			else
1665 				/* A[6] or hash 6 */
1666 				chan_addr = ((chan_addr >> 7) << 6) |
1667 					     (chan_addr & 0x3f);
1668 		} else
1669 			/* A[12] */
1670 			chan_addr = ((chan_addr >> 13) << 12) |
1671 				     (chan_addr & 0xfff);
1672 	}
1673 
1674 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1675 
1676 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1677 
1678 	if (cs_found >= 0)
1679 		*chan_sel = channel;
1680 
1681 	return cs_found;
1682 }
1683 
1684 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1685 					u64 sys_addr, int *chan_sel)
1686 {
1687 	int cs_found = -EINVAL;
1688 	int num_dcts_intlv = 0;
1689 	u64 chan_addr, chan_offset;
1690 	u64 dct_base, dct_limit;
1691 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1692 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1693 
1694 	u64 dhar_offset		= f10_dhar_offset(pvt);
1695 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1696 	u8 node_id		= dram_dst_node(pvt, range);
1697 	u8 intlv_en		= dram_intlv_en(pvt, range);
1698 
1699 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1700 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1701 
1702 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1703 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
1704 
1705 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1706 		 range, sys_addr, get_dram_limit(pvt, range));
1707 
1708 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
1709 	    !(get_dram_limit(pvt, range) >= sys_addr))
1710 		return -EINVAL;
1711 
1712 	if (dhar_valid(pvt) &&
1713 	    dhar_base(pvt) <= sys_addr &&
1714 	    sys_addr < BIT_64(32)) {
1715 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1716 			    sys_addr);
1717 		return -EINVAL;
1718 	}
1719 
1720 	/* Verify sys_addr is within DCT Range. */
1721 	dct_base = (u64) dct_sel_baseaddr(pvt);
1722 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1723 
1724 	if (!(dct_cont_base_reg & BIT(0)) &&
1725 	    !(dct_base <= (sys_addr >> 27) &&
1726 	      dct_limit >= (sys_addr >> 27)))
1727 		return -EINVAL;
1728 
1729 	/* Verify number of dct's that participate in channel interleaving. */
1730 	num_dcts_intlv = (int) hweight8(intlv_en);
1731 
1732 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1733 		return -EINVAL;
1734 
1735 	if (pvt->model >= 0x60)
1736 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
1737 	else
1738 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1739 						     num_dcts_intlv, dct_sel);
1740 
1741 	/* Verify we stay within the MAX number of channels allowed */
1742 	if (channel > 3)
1743 		return -EINVAL;
1744 
1745 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1746 
1747 	/* Get normalized DCT addr */
1748 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1749 		chan_offset = dhar_offset;
1750 	else
1751 		chan_offset = dct_base << 27;
1752 
1753 	chan_addr = sys_addr - chan_offset;
1754 
1755 	/* remove channel interleave */
1756 	if (num_dcts_intlv == 2) {
1757 		if (intlv_addr == 0x4)
1758 			chan_addr = ((chan_addr >> 9) << 8) |
1759 						(chan_addr & 0xff);
1760 		else if (intlv_addr == 0x5)
1761 			chan_addr = ((chan_addr >> 10) << 9) |
1762 						(chan_addr & 0x1ff);
1763 		else
1764 			return -EINVAL;
1765 
1766 	} else if (num_dcts_intlv == 4) {
1767 		if (intlv_addr == 0x4)
1768 			chan_addr = ((chan_addr >> 10) << 8) |
1769 							(chan_addr & 0xff);
1770 		else if (intlv_addr == 0x5)
1771 			chan_addr = ((chan_addr >> 11) << 9) |
1772 							(chan_addr & 0x1ff);
1773 		else
1774 			return -EINVAL;
1775 	}
1776 
1777 	if (dct_offset_en) {
1778 		amd64_read_pci_cfg(pvt->F1,
1779 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
1780 				   &tmp);
1781 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
1782 	}
1783 
1784 	f15h_select_dct(pvt, channel);
1785 
1786 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1787 
1788 	/*
1789 	 * Find Chip select:
1790 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1791 	 * there is support for 4 DCT's, but only 2 are currently functional.
1792 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1793 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
1794 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1795 	 */
1796 	alias_channel =  (channel == 3) ? 1 : channel;
1797 
1798 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1799 
1800 	if (cs_found >= 0)
1801 		*chan_sel = alias_channel;
1802 
1803 	return cs_found;
1804 }
1805 
1806 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1807 					u64 sys_addr,
1808 					int *chan_sel)
1809 {
1810 	int cs_found = -EINVAL;
1811 	unsigned range;
1812 
1813 	for (range = 0; range < DRAM_RANGES; range++) {
1814 		if (!dram_rw(pvt, range))
1815 			continue;
1816 
1817 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
1818 			cs_found = f15_m30h_match_to_this_node(pvt, range,
1819 							       sys_addr,
1820 							       chan_sel);
1821 
1822 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
1823 			 (get_dram_limit(pvt, range) >= sys_addr)) {
1824 			cs_found = f1x_match_to_this_node(pvt, range,
1825 							  sys_addr, chan_sel);
1826 			if (cs_found >= 0)
1827 				break;
1828 		}
1829 	}
1830 	return cs_found;
1831 }
1832 
1833 /*
1834  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1835  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1836  *
1837  * The @sys_addr is usually an error address received from the hardware
1838  * (MCX_ADDR).
1839  */
1840 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1841 				     struct err_info *err)
1842 {
1843 	struct amd64_pvt *pvt = mci->pvt_info;
1844 
1845 	error_address_to_page_and_offset(sys_addr, err);
1846 
1847 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1848 	if (err->csrow < 0) {
1849 		err->err_code = ERR_CSROW;
1850 		return;
1851 	}
1852 
1853 	/*
1854 	 * We need the syndromes for channel detection only when we're
1855 	 * ganged. Otherwise @chan should already contain the channel at
1856 	 * this point.
1857 	 */
1858 	if (dct_ganging_enabled(pvt))
1859 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1860 }
1861 
1862 /*
1863  * debug routine to display the memory sizes of all logical DIMMs and its
1864  * CSROWs
1865  */
1866 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1867 {
1868 	int dimm, size0, size1;
1869 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1870 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
1871 
1872 	if (pvt->fam == 0xf) {
1873 		/* K8 families < revF not supported yet */
1874 	       if (pvt->ext_model < K8_REV_F)
1875 			return;
1876 	       else
1877 		       WARN_ON(ctrl != 0);
1878 	}
1879 
1880 	if (pvt->fam == 0x10) {
1881 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1882 							   : pvt->dbam0;
1883 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1884 				 pvt->csels[1].csbases :
1885 				 pvt->csels[0].csbases;
1886 	} else if (ctrl) {
1887 		dbam = pvt->dbam0;
1888 		dcsb = pvt->csels[1].csbases;
1889 	}
1890 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1891 		 ctrl, dbam);
1892 
1893 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1894 
1895 	/* Dump memory sizes for DIMM and its CSROWs */
1896 	for (dimm = 0; dimm < 4; dimm++) {
1897 
1898 		size0 = 0;
1899 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1900 			/* For f15m60h, need multiplier for LRDIMM cs_size
1901 			 * calculation. We pass 'dimm' value to the dbam_to_cs
1902 			 * mapper so we can find the multiplier from the
1903 			 * corresponding DCSM.
1904 			 */
1905 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1906 						     DBAM_DIMM(dimm, dbam),
1907 						     dimm);
1908 
1909 		size1 = 0;
1910 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1911 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1912 						     DBAM_DIMM(dimm, dbam),
1913 						     dimm);
1914 
1915 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1916 				dimm * 2,     size0,
1917 				dimm * 2 + 1, size1);
1918 	}
1919 }
1920 
1921 static struct amd64_family_type family_types[] = {
1922 	[K8_CPUS] = {
1923 		.ctl_name = "K8",
1924 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1925 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
1926 		.ops = {
1927 			.early_channel_count	= k8_early_channel_count,
1928 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
1929 			.dbam_to_cs		= k8_dbam_to_chip_select,
1930 		}
1931 	},
1932 	[F10_CPUS] = {
1933 		.ctl_name = "F10h",
1934 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1935 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
1936 		.ops = {
1937 			.early_channel_count	= f1x_early_channel_count,
1938 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1939 			.dbam_to_cs		= f10_dbam_to_chip_select,
1940 		}
1941 	},
1942 	[F15_CPUS] = {
1943 		.ctl_name = "F15h",
1944 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1945 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
1946 		.ops = {
1947 			.early_channel_count	= f1x_early_channel_count,
1948 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1949 			.dbam_to_cs		= f15_dbam_to_chip_select,
1950 		}
1951 	},
1952 	[F15_M30H_CPUS] = {
1953 		.ctl_name = "F15h_M30h",
1954 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1955 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
1956 		.ops = {
1957 			.early_channel_count	= f1x_early_channel_count,
1958 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1959 			.dbam_to_cs		= f16_dbam_to_chip_select,
1960 		}
1961 	},
1962 	[F15_M60H_CPUS] = {
1963 		.ctl_name = "F15h_M60h",
1964 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
1965 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
1966 		.ops = {
1967 			.early_channel_count	= f1x_early_channel_count,
1968 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1969 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
1970 		}
1971 	},
1972 	[F16_CPUS] = {
1973 		.ctl_name = "F16h",
1974 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1975 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
1976 		.ops = {
1977 			.early_channel_count	= f1x_early_channel_count,
1978 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1979 			.dbam_to_cs		= f16_dbam_to_chip_select,
1980 		}
1981 	},
1982 	[F16_M30H_CPUS] = {
1983 		.ctl_name = "F16h_M30h",
1984 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
1985 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
1986 		.ops = {
1987 			.early_channel_count	= f1x_early_channel_count,
1988 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
1989 			.dbam_to_cs		= f16_dbam_to_chip_select,
1990 		}
1991 	},
1992 };
1993 
1994 /*
1995  * These are tables of eigenvectors (one per line) which can be used for the
1996  * construction of the syndrome tables. The modified syndrome search algorithm
1997  * uses those to find the symbol in error and thus the DIMM.
1998  *
1999  * Algorithm courtesy of Ross LaFetra from AMD.
2000  */
2001 static const u16 x4_vectors[] = {
2002 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2003 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2004 	0x0001, 0x0002, 0x0004, 0x0008,
2005 	0x1013, 0x3032, 0x4044, 0x8088,
2006 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2007 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2008 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2009 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2010 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2011 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2012 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2013 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2014 	0x2b87, 0x164e, 0x642c, 0xdc18,
2015 	0x40b9, 0x80de, 0x1094, 0x20e8,
2016 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2017 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2018 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2019 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2020 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2021 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2022 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2023 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2024 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2025 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2026 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2027 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2028 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2029 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2030 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2031 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2032 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2033 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2034 	0x4807, 0xc40e, 0x130c, 0x3208,
2035 	0x1905, 0x2e0a, 0x5804, 0xac08,
2036 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2037 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2038 };
2039 
2040 static const u16 x8_vectors[] = {
2041 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2042 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2043 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2044 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2045 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2046 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2047 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2048 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2049 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2050 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2051 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2052 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2053 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2054 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2055 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2056 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2057 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2058 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2059 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2060 };
2061 
2062 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2063 			   unsigned v_dim)
2064 {
2065 	unsigned int i, err_sym;
2066 
2067 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2068 		u16 s = syndrome;
2069 		unsigned v_idx =  err_sym * v_dim;
2070 		unsigned v_end = (err_sym + 1) * v_dim;
2071 
2072 		/* walk over all 16 bits of the syndrome */
2073 		for (i = 1; i < (1U << 16); i <<= 1) {
2074 
2075 			/* if bit is set in that eigenvector... */
2076 			if (v_idx < v_end && vectors[v_idx] & i) {
2077 				u16 ev_comp = vectors[v_idx++];
2078 
2079 				/* ... and bit set in the modified syndrome, */
2080 				if (s & i) {
2081 					/* remove it. */
2082 					s ^= ev_comp;
2083 
2084 					if (!s)
2085 						return err_sym;
2086 				}
2087 
2088 			} else if (s & i)
2089 				/* can't get to zero, move to next symbol */
2090 				break;
2091 		}
2092 	}
2093 
2094 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2095 	return -1;
2096 }
2097 
2098 static int map_err_sym_to_channel(int err_sym, int sym_size)
2099 {
2100 	if (sym_size == 4)
2101 		switch (err_sym) {
2102 		case 0x20:
2103 		case 0x21:
2104 			return 0;
2105 			break;
2106 		case 0x22:
2107 		case 0x23:
2108 			return 1;
2109 			break;
2110 		default:
2111 			return err_sym >> 4;
2112 			break;
2113 		}
2114 	/* x8 symbols */
2115 	else
2116 		switch (err_sym) {
2117 		/* imaginary bits not in a DIMM */
2118 		case 0x10:
2119 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2120 					  err_sym);
2121 			return -1;
2122 			break;
2123 
2124 		case 0x11:
2125 			return 0;
2126 			break;
2127 		case 0x12:
2128 			return 1;
2129 			break;
2130 		default:
2131 			return err_sym >> 3;
2132 			break;
2133 		}
2134 	return -1;
2135 }
2136 
2137 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2138 {
2139 	struct amd64_pvt *pvt = mci->pvt_info;
2140 	int err_sym = -1;
2141 
2142 	if (pvt->ecc_sym_sz == 8)
2143 		err_sym = decode_syndrome(syndrome, x8_vectors,
2144 					  ARRAY_SIZE(x8_vectors),
2145 					  pvt->ecc_sym_sz);
2146 	else if (pvt->ecc_sym_sz == 4)
2147 		err_sym = decode_syndrome(syndrome, x4_vectors,
2148 					  ARRAY_SIZE(x4_vectors),
2149 					  pvt->ecc_sym_sz);
2150 	else {
2151 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2152 		return err_sym;
2153 	}
2154 
2155 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2156 }
2157 
2158 static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
2159 			    u8 ecc_type)
2160 {
2161 	enum hw_event_mc_err_type err_type;
2162 	const char *string;
2163 
2164 	if (ecc_type == 2)
2165 		err_type = HW_EVENT_ERR_CORRECTED;
2166 	else if (ecc_type == 1)
2167 		err_type = HW_EVENT_ERR_UNCORRECTED;
2168 	else {
2169 		WARN(1, "Something is rotten in the state of Denmark.\n");
2170 		return;
2171 	}
2172 
2173 	switch (err->err_code) {
2174 	case DECODE_OK:
2175 		string = "";
2176 		break;
2177 	case ERR_NODE:
2178 		string = "Failed to map error addr to a node";
2179 		break;
2180 	case ERR_CSROW:
2181 		string = "Failed to map error addr to a csrow";
2182 		break;
2183 	case ERR_CHANNEL:
2184 		string = "unknown syndrome - possible error reporting race";
2185 		break;
2186 	default:
2187 		string = "WTF error";
2188 		break;
2189 	}
2190 
2191 	edac_mc_handle_error(err_type, mci, 1,
2192 			     err->page, err->offset, err->syndrome,
2193 			     err->csrow, err->channel, -1,
2194 			     string, "");
2195 }
2196 
2197 static inline void decode_bus_error(int node_id, struct mce *m)
2198 {
2199 	struct mem_ctl_info *mci;
2200 	struct amd64_pvt *pvt;
2201 	u8 ecc_type = (m->status >> 45) & 0x3;
2202 	u8 xec = XEC(m->status, 0x1f);
2203 	u16 ec = EC(m->status);
2204 	u64 sys_addr;
2205 	struct err_info err;
2206 
2207 	mci = edac_mc_find(node_id);
2208 	if (!mci)
2209 		return;
2210 
2211 	pvt = mci->pvt_info;
2212 
2213 	/* Bail out early if this was an 'observed' error */
2214 	if (PP(ec) == NBSL_PP_OBS)
2215 		return;
2216 
2217 	/* Do only ECC errors */
2218 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2219 		return;
2220 
2221 	memset(&err, 0, sizeof(err));
2222 
2223 	sys_addr = get_error_address(pvt, m);
2224 
2225 	if (ecc_type == 2)
2226 		err.syndrome = extract_syndrome(m->status);
2227 
2228 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2229 
2230 	__log_bus_error(mci, &err, ecc_type);
2231 }
2232 
2233 /*
2234  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2235  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2236  */
2237 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
2238 {
2239 	/* Reserve the ADDRESS MAP Device */
2240 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
2241 	if (!pvt->F1) {
2242 		amd64_err("error address map device not found: "
2243 			  "vendor %x device 0x%x (broken BIOS?)\n",
2244 			  PCI_VENDOR_ID_AMD, f1_id);
2245 		return -ENODEV;
2246 	}
2247 
2248 	/* Reserve the DCT Device */
2249 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
2250 	if (!pvt->F2) {
2251 		pci_dev_put(pvt->F1);
2252 		pvt->F1 = NULL;
2253 
2254 		amd64_err("error F2 device not found: "
2255 			  "vendor %x device 0x%x (broken BIOS?)\n",
2256 			  PCI_VENDOR_ID_AMD, f2_id);
2257 
2258 		return -ENODEV;
2259 	}
2260 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2261 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2262 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2263 
2264 	return 0;
2265 }
2266 
2267 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2268 {
2269 	pci_dev_put(pvt->F1);
2270 	pci_dev_put(pvt->F2);
2271 }
2272 
2273 /*
2274  * Retrieve the hardware registers of the memory controller (this includes the
2275  * 'Address Map' and 'Misc' device regs)
2276  */
2277 static void read_mc_regs(struct amd64_pvt *pvt)
2278 {
2279 	unsigned range;
2280 	u64 msr_val;
2281 	u32 tmp;
2282 
2283 	/*
2284 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2285 	 * those are Read-As-Zero
2286 	 */
2287 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2288 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2289 
2290 	/* check first whether TOP_MEM2 is enabled */
2291 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2292 	if (msr_val & (1U << 21)) {
2293 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2294 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2295 	} else
2296 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2297 
2298 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2299 
2300 	read_dram_ctl_register(pvt);
2301 
2302 	for (range = 0; range < DRAM_RANGES; range++) {
2303 		u8 rw;
2304 
2305 		/* read settings for this DRAM range */
2306 		read_dram_base_limit_regs(pvt, range);
2307 
2308 		rw = dram_rw(pvt, range);
2309 		if (!rw)
2310 			continue;
2311 
2312 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2313 			 range,
2314 			 get_dram_base(pvt, range),
2315 			 get_dram_limit(pvt, range));
2316 
2317 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2318 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2319 			 (rw & 0x1) ? "R" : "-",
2320 			 (rw & 0x2) ? "W" : "-",
2321 			 dram_intlv_sel(pvt, range),
2322 			 dram_dst_node(pvt, range));
2323 	}
2324 
2325 	read_dct_base_mask(pvt);
2326 
2327 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2328 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2329 
2330 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2331 
2332 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2333 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2334 
2335 	if (!dct_ganging_enabled(pvt)) {
2336 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2337 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2338 	}
2339 
2340 	pvt->ecc_sym_sz = 4;
2341 	determine_memory_type(pvt);
2342 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2343 
2344 	if (pvt->fam >= 0x10) {
2345 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2346 		/* F16h has only DCT0, so no need to read dbam1 */
2347 		if (pvt->fam != 0x16)
2348 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2349 
2350 		/* F10h, revD and later can do x8 ECC too */
2351 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2352 			pvt->ecc_sym_sz = 8;
2353 	}
2354 	dump_misc_regs(pvt);
2355 }
2356 
2357 /*
2358  * NOTE: CPU Revision Dependent code
2359  *
2360  * Input:
2361  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2362  *	k8 private pointer to -->
2363  *			DRAM Bank Address mapping register
2364  *			node_id
2365  *			DCL register where dual_channel_active is
2366  *
2367  * The DBAM register consists of 4 sets of 4 bits each definitions:
2368  *
2369  * Bits:	CSROWs
2370  * 0-3		CSROWs 0 and 1
2371  * 4-7		CSROWs 2 and 3
2372  * 8-11		CSROWs 4 and 5
2373  * 12-15	CSROWs 6 and 7
2374  *
2375  * Values range from: 0 to 15
2376  * The meaning of the values depends on CPU revision and dual-channel state,
2377  * see relevant BKDG more info.
2378  *
2379  * The memory controller provides for total of only 8 CSROWs in its current
2380  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2381  * single channel or two (2) DIMMs in dual channel mode.
2382  *
2383  * The following code logic collapses the various tables for CSROW based on CPU
2384  * revision.
2385  *
2386  * Returns:
2387  *	The number of PAGE_SIZE pages on the specified CSROW number it
2388  *	encompasses
2389  *
2390  */
2391 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2392 {
2393 	u32 cs_mode, nr_pages;
2394 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2395 
2396 
2397 	/*
2398 	 * The math on this doesn't look right on the surface because x/2*4 can
2399 	 * be simplified to x*2 but this expression makes use of the fact that
2400 	 * it is integral math where 1/2=0. This intermediate value becomes the
2401 	 * number of bits to shift the DBAM register to extract the proper CSROW
2402 	 * field.
2403 	 */
2404 	cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2405 
2406 	nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2))
2407 							   << (20 - PAGE_SHIFT);
2408 
2409 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2410 		    csrow_nr, dct,  cs_mode);
2411 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2412 
2413 	return nr_pages;
2414 }
2415 
2416 /*
2417  * Initialize the array of csrow attribute instances, based on the values
2418  * from pci config hardware registers.
2419  */
2420 static int init_csrows(struct mem_ctl_info *mci)
2421 {
2422 	struct amd64_pvt *pvt = mci->pvt_info;
2423 	struct csrow_info *csrow;
2424 	struct dimm_info *dimm;
2425 	enum edac_type edac_mode;
2426 	int i, j, empty = 1;
2427 	int nr_pages = 0;
2428 	u32 val;
2429 
2430 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2431 
2432 	pvt->nbcfg = val;
2433 
2434 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2435 		 pvt->mc_node_id, val,
2436 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2437 
2438 	/*
2439 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2440 	 */
2441 	for_each_chip_select(i, 0, pvt) {
2442 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2443 		bool row_dct1 = false;
2444 
2445 		if (pvt->fam != 0xf)
2446 			row_dct1 = !!csrow_enabled(i, 1, pvt);
2447 
2448 		if (!row_dct0 && !row_dct1)
2449 			continue;
2450 
2451 		csrow = mci->csrows[i];
2452 		empty = 0;
2453 
2454 		edac_dbg(1, "MC node: %d, csrow: %d\n",
2455 			    pvt->mc_node_id, i);
2456 
2457 		if (row_dct0) {
2458 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
2459 			csrow->channels[0]->dimm->nr_pages = nr_pages;
2460 		}
2461 
2462 		/* K8 has only one DCT */
2463 		if (pvt->fam != 0xf && row_dct1) {
2464 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2465 
2466 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2467 			nr_pages += row_dct1_pages;
2468 		}
2469 
2470 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2471 
2472 		/*
2473 		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2474 		 */
2475 		if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2476 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2477 				    EDAC_S4ECD4ED : EDAC_SECDED;
2478 		else
2479 			edac_mode = EDAC_NONE;
2480 
2481 		for (j = 0; j < pvt->channel_count; j++) {
2482 			dimm = csrow->channels[j]->dimm;
2483 			dimm->mtype = pvt->dram_type;
2484 			dimm->edac_mode = edac_mode;
2485 		}
2486 	}
2487 
2488 	return empty;
2489 }
2490 
2491 /* get all cores on this DCT */
2492 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2493 {
2494 	int cpu;
2495 
2496 	for_each_online_cpu(cpu)
2497 		if (amd_get_nb_id(cpu) == nid)
2498 			cpumask_set_cpu(cpu, mask);
2499 }
2500 
2501 /* check MCG_CTL on all the cpus on this node */
2502 static bool nb_mce_bank_enabled_on_node(u16 nid)
2503 {
2504 	cpumask_var_t mask;
2505 	int cpu, nbe;
2506 	bool ret = false;
2507 
2508 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2509 		amd64_warn("%s: Error allocating mask\n", __func__);
2510 		return false;
2511 	}
2512 
2513 	get_cpus_on_this_dct_cpumask(mask, nid);
2514 
2515 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2516 
2517 	for_each_cpu(cpu, mask) {
2518 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2519 		nbe = reg->l & MSR_MCGCTL_NBE;
2520 
2521 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2522 			 cpu, reg->q,
2523 			 (nbe ? "enabled" : "disabled"));
2524 
2525 		if (!nbe)
2526 			goto out;
2527 	}
2528 	ret = true;
2529 
2530 out:
2531 	free_cpumask_var(mask);
2532 	return ret;
2533 }
2534 
2535 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2536 {
2537 	cpumask_var_t cmask;
2538 	int cpu;
2539 
2540 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2541 		amd64_warn("%s: error allocating mask\n", __func__);
2542 		return false;
2543 	}
2544 
2545 	get_cpus_on_this_dct_cpumask(cmask, nid);
2546 
2547 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2548 
2549 	for_each_cpu(cpu, cmask) {
2550 
2551 		struct msr *reg = per_cpu_ptr(msrs, cpu);
2552 
2553 		if (on) {
2554 			if (reg->l & MSR_MCGCTL_NBE)
2555 				s->flags.nb_mce_enable = 1;
2556 
2557 			reg->l |= MSR_MCGCTL_NBE;
2558 		} else {
2559 			/*
2560 			 * Turn off NB MCE reporting only when it was off before
2561 			 */
2562 			if (!s->flags.nb_mce_enable)
2563 				reg->l &= ~MSR_MCGCTL_NBE;
2564 		}
2565 	}
2566 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2567 
2568 	free_cpumask_var(cmask);
2569 
2570 	return 0;
2571 }
2572 
2573 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2574 				       struct pci_dev *F3)
2575 {
2576 	bool ret = true;
2577 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2578 
2579 	if (toggle_ecc_err_reporting(s, nid, ON)) {
2580 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2581 		return false;
2582 	}
2583 
2584 	amd64_read_pci_cfg(F3, NBCTL, &value);
2585 
2586 	s->old_nbctl   = value & mask;
2587 	s->nbctl_valid = true;
2588 
2589 	value |= mask;
2590 	amd64_write_pci_cfg(F3, NBCTL, value);
2591 
2592 	amd64_read_pci_cfg(F3, NBCFG, &value);
2593 
2594 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2595 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2596 
2597 	if (!(value & NBCFG_ECC_ENABLE)) {
2598 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2599 
2600 		s->flags.nb_ecc_prev = 0;
2601 
2602 		/* Attempt to turn on DRAM ECC Enable */
2603 		value |= NBCFG_ECC_ENABLE;
2604 		amd64_write_pci_cfg(F3, NBCFG, value);
2605 
2606 		amd64_read_pci_cfg(F3, NBCFG, &value);
2607 
2608 		if (!(value & NBCFG_ECC_ENABLE)) {
2609 			amd64_warn("Hardware rejected DRAM ECC enable,"
2610 				   "check memory DIMM configuration.\n");
2611 			ret = false;
2612 		} else {
2613 			amd64_info("Hardware accepted DRAM ECC Enable\n");
2614 		}
2615 	} else {
2616 		s->flags.nb_ecc_prev = 1;
2617 	}
2618 
2619 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2620 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
2621 
2622 	return ret;
2623 }
2624 
2625 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2626 					struct pci_dev *F3)
2627 {
2628 	u32 value, mask = 0x3;		/* UECC/CECC enable */
2629 
2630 
2631 	if (!s->nbctl_valid)
2632 		return;
2633 
2634 	amd64_read_pci_cfg(F3, NBCTL, &value);
2635 	value &= ~mask;
2636 	value |= s->old_nbctl;
2637 
2638 	amd64_write_pci_cfg(F3, NBCTL, value);
2639 
2640 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2641 	if (!s->flags.nb_ecc_prev) {
2642 		amd64_read_pci_cfg(F3, NBCFG, &value);
2643 		value &= ~NBCFG_ECC_ENABLE;
2644 		amd64_write_pci_cfg(F3, NBCFG, value);
2645 	}
2646 
2647 	/* restore the NB Enable MCGCTL bit */
2648 	if (toggle_ecc_err_reporting(s, nid, OFF))
2649 		amd64_warn("Error restoring NB MCGCTL settings!\n");
2650 }
2651 
2652 /*
2653  * EDAC requires that the BIOS have ECC enabled before
2654  * taking over the processing of ECC errors. A command line
2655  * option allows to force-enable hardware ECC later in
2656  * enable_ecc_error_reporting().
2657  */
2658 static const char *ecc_msg =
2659 	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2660 	" Either enable ECC checking or force module loading by setting "
2661 	"'ecc_enable_override'.\n"
2662 	" (Note that use of the override may cause unknown side effects.)\n";
2663 
2664 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2665 {
2666 	u32 value;
2667 	u8 ecc_en = 0;
2668 	bool nb_mce_en = false;
2669 
2670 	amd64_read_pci_cfg(F3, NBCFG, &value);
2671 
2672 	ecc_en = !!(value & NBCFG_ECC_ENABLE);
2673 	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2674 
2675 	nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2676 	if (!nb_mce_en)
2677 		amd64_notice("NB MCE bank disabled, set MSR "
2678 			     "0x%08x[4] on node %d to enable.\n",
2679 			     MSR_IA32_MCG_CTL, nid);
2680 
2681 	if (!ecc_en || !nb_mce_en) {
2682 		amd64_notice("%s", ecc_msg);
2683 		return false;
2684 	}
2685 	return true;
2686 }
2687 
2688 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2689 				 struct amd64_family_type *fam)
2690 {
2691 	struct amd64_pvt *pvt = mci->pvt_info;
2692 
2693 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2694 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
2695 
2696 	if (pvt->nbcap & NBCAP_SECDED)
2697 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2698 
2699 	if (pvt->nbcap & NBCAP_CHIPKILL)
2700 		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2701 
2702 	mci->edac_cap		= determine_edac_cap(pvt);
2703 	mci->mod_name		= EDAC_MOD_STR;
2704 	mci->mod_ver		= EDAC_AMD64_VERSION;
2705 	mci->ctl_name		= fam->ctl_name;
2706 	mci->dev_name		= pci_name(pvt->F2);
2707 	mci->ctl_page_to_phys	= NULL;
2708 
2709 	/* memory scrubber interface */
2710 	mci->set_sdram_scrub_rate = set_scrub_rate;
2711 	mci->get_sdram_scrub_rate = get_scrub_rate;
2712 }
2713 
2714 /*
2715  * returns a pointer to the family descriptor on success, NULL otherwise.
2716  */
2717 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2718 {
2719 	struct amd64_family_type *fam_type = NULL;
2720 
2721 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
2722 	pvt->stepping	= boot_cpu_data.x86_mask;
2723 	pvt->model	= boot_cpu_data.x86_model;
2724 	pvt->fam	= boot_cpu_data.x86;
2725 
2726 	switch (pvt->fam) {
2727 	case 0xf:
2728 		fam_type	= &family_types[K8_CPUS];
2729 		pvt->ops	= &family_types[K8_CPUS].ops;
2730 		break;
2731 
2732 	case 0x10:
2733 		fam_type	= &family_types[F10_CPUS];
2734 		pvt->ops	= &family_types[F10_CPUS].ops;
2735 		break;
2736 
2737 	case 0x15:
2738 		if (pvt->model == 0x30) {
2739 			fam_type = &family_types[F15_M30H_CPUS];
2740 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
2741 			break;
2742 		} else if (pvt->model == 0x60) {
2743 			fam_type = &family_types[F15_M60H_CPUS];
2744 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
2745 			break;
2746 		}
2747 
2748 		fam_type	= &family_types[F15_CPUS];
2749 		pvt->ops	= &family_types[F15_CPUS].ops;
2750 		break;
2751 
2752 	case 0x16:
2753 		if (pvt->model == 0x30) {
2754 			fam_type = &family_types[F16_M30H_CPUS];
2755 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
2756 			break;
2757 		}
2758 		fam_type	= &family_types[F16_CPUS];
2759 		pvt->ops	= &family_types[F16_CPUS].ops;
2760 		break;
2761 
2762 	default:
2763 		amd64_err("Unsupported family!\n");
2764 		return NULL;
2765 	}
2766 
2767 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2768 		     (pvt->fam == 0xf ?
2769 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
2770 							     : "revE or earlier ")
2771 				 : ""), pvt->mc_node_id);
2772 	return fam_type;
2773 }
2774 
2775 static const struct attribute_group *amd64_edac_attr_groups[] = {
2776 #ifdef CONFIG_EDAC_DEBUG
2777 	&amd64_edac_dbg_group,
2778 #endif
2779 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
2780 	&amd64_edac_inj_group,
2781 #endif
2782 	NULL
2783 };
2784 
2785 static int init_one_instance(unsigned int nid)
2786 {
2787 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2788 	struct amd64_family_type *fam_type = NULL;
2789 	struct mem_ctl_info *mci = NULL;
2790 	struct edac_mc_layer layers[2];
2791 	struct amd64_pvt *pvt = NULL;
2792 	int err = 0, ret;
2793 
2794 	ret = -ENOMEM;
2795 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2796 	if (!pvt)
2797 		goto err_ret;
2798 
2799 	pvt->mc_node_id	= nid;
2800 	pvt->F3 = F3;
2801 
2802 	ret = -EINVAL;
2803 	fam_type = per_family_init(pvt);
2804 	if (!fam_type)
2805 		goto err_free;
2806 
2807 	ret = -ENODEV;
2808 	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
2809 	if (err)
2810 		goto err_free;
2811 
2812 	read_mc_regs(pvt);
2813 
2814 	/*
2815 	 * We need to determine how many memory channels there are. Then use
2816 	 * that information for calculating the size of the dynamic instance
2817 	 * tables in the 'mci' structure.
2818 	 */
2819 	ret = -EINVAL;
2820 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
2821 	if (pvt->channel_count < 0)
2822 		goto err_siblings;
2823 
2824 	ret = -ENOMEM;
2825 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2826 	layers[0].size = pvt->csels[0].b_cnt;
2827 	layers[0].is_virt_csrow = true;
2828 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
2829 
2830 	/*
2831 	 * Always allocate two channels since we can have setups with DIMMs on
2832 	 * only one channel. Also, this simplifies handling later for the price
2833 	 * of a couple of KBs tops.
2834 	 */
2835 	layers[1].size = 2;
2836 	layers[1].is_virt_csrow = false;
2837 
2838 	mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2839 	if (!mci)
2840 		goto err_siblings;
2841 
2842 	mci->pvt_info = pvt;
2843 	mci->pdev = &pvt->F3->dev;
2844 
2845 	setup_mci_misc_attrs(mci, fam_type);
2846 
2847 	if (init_csrows(mci))
2848 		mci->edac_cap = EDAC_FLAG_NONE;
2849 
2850 	ret = -ENODEV;
2851 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
2852 		edac_dbg(1, "failed edac_mc_add_mc()\n");
2853 		goto err_add_mc;
2854 	}
2855 
2856 	/* register stuff with EDAC MCE */
2857 	if (report_gart_errors)
2858 		amd_report_gart_errors(true);
2859 
2860 	amd_register_ecc_decoder(decode_bus_error);
2861 
2862 	return 0;
2863 
2864 err_add_mc:
2865 	edac_mc_free(mci);
2866 
2867 err_siblings:
2868 	free_mc_sibling_devs(pvt);
2869 
2870 err_free:
2871 	kfree(pvt);
2872 
2873 err_ret:
2874 	return ret;
2875 }
2876 
2877 static int probe_one_instance(unsigned int nid)
2878 {
2879 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2880 	struct ecc_settings *s;
2881 	int ret;
2882 
2883 	ret = -ENOMEM;
2884 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2885 	if (!s)
2886 		goto err_out;
2887 
2888 	ecc_stngs[nid] = s;
2889 
2890 	if (!ecc_enabled(F3, nid)) {
2891 		ret = -ENODEV;
2892 
2893 		if (!ecc_enable_override)
2894 			goto err_enable;
2895 
2896 		amd64_warn("Forcing ECC on!\n");
2897 
2898 		if (!enable_ecc_error_reporting(s, nid, F3))
2899 			goto err_enable;
2900 	}
2901 
2902 	ret = init_one_instance(nid);
2903 	if (ret < 0) {
2904 		amd64_err("Error probing instance: %d\n", nid);
2905 		restore_ecc_error_reporting(s, nid, F3);
2906 	}
2907 
2908 	return ret;
2909 
2910 err_enable:
2911 	kfree(s);
2912 	ecc_stngs[nid] = NULL;
2913 
2914 err_out:
2915 	return ret;
2916 }
2917 
2918 static void remove_one_instance(unsigned int nid)
2919 {
2920 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2921 	struct ecc_settings *s = ecc_stngs[nid];
2922 	struct mem_ctl_info *mci;
2923 	struct amd64_pvt *pvt;
2924 
2925 	mci = find_mci_by_dev(&F3->dev);
2926 	WARN_ON(!mci);
2927 
2928 	/* Remove from EDAC CORE tracking list */
2929 	mci = edac_mc_del_mc(&F3->dev);
2930 	if (!mci)
2931 		return;
2932 
2933 	pvt = mci->pvt_info;
2934 
2935 	restore_ecc_error_reporting(s, nid, F3);
2936 
2937 	free_mc_sibling_devs(pvt);
2938 
2939 	/* unregister from EDAC MCE */
2940 	amd_report_gart_errors(false);
2941 	amd_unregister_ecc_decoder(decode_bus_error);
2942 
2943 	kfree(ecc_stngs[nid]);
2944 	ecc_stngs[nid] = NULL;
2945 
2946 	/* Free the EDAC CORE resources */
2947 	mci->pvt_info = NULL;
2948 
2949 	kfree(pvt);
2950 	edac_mc_free(mci);
2951 }
2952 
2953 static void setup_pci_device(void)
2954 {
2955 	struct mem_ctl_info *mci;
2956 	struct amd64_pvt *pvt;
2957 
2958 	if (pci_ctl)
2959 		return;
2960 
2961 	mci = edac_mc_find(0);
2962 	if (!mci)
2963 		return;
2964 
2965 	pvt = mci->pvt_info;
2966 	pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2967 	if (!pci_ctl) {
2968 		pr_warn("%s(): Unable to create PCI control\n", __func__);
2969 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2970 	}
2971 }
2972 
2973 static const struct x86_cpu_id amd64_cpuids[] = {
2974 	{ X86_VENDOR_AMD, 0xF,	X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
2975 	{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
2976 	{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
2977 	{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY,	X86_FEATURE_ANY, 0 },
2978 	{ }
2979 };
2980 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
2981 
2982 static int __init amd64_edac_init(void)
2983 {
2984 	int err = -ENODEV;
2985 	int i;
2986 
2987 	if (amd_cache_northbridges() < 0)
2988 		goto err_ret;
2989 
2990 	opstate_init();
2991 
2992 	err = -ENOMEM;
2993 	ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2994 	if (!ecc_stngs)
2995 		goto err_free;
2996 
2997 	msrs = msrs_alloc();
2998 	if (!msrs)
2999 		goto err_free;
3000 
3001 	for (i = 0; i < amd_nb_num(); i++)
3002 		if (probe_one_instance(i)) {
3003 			/* unwind properly */
3004 			while (--i >= 0)
3005 				remove_one_instance(i);
3006 
3007 			goto err_pci;
3008 		}
3009 
3010 	setup_pci_device();
3011 
3012 #ifdef CONFIG_X86_32
3013 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3014 #endif
3015 
3016 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3017 
3018 	return 0;
3019 
3020 err_pci:
3021 	msrs_free(msrs);
3022 	msrs = NULL;
3023 
3024 err_free:
3025 	kfree(ecc_stngs);
3026 	ecc_stngs = NULL;
3027 
3028 err_ret:
3029 	return err;
3030 }
3031 
3032 static void __exit amd64_edac_exit(void)
3033 {
3034 	int i;
3035 
3036 	if (pci_ctl)
3037 		edac_pci_release_generic_ctl(pci_ctl);
3038 
3039 	for (i = 0; i < amd_nb_num(); i++)
3040 		remove_one_instance(i);
3041 
3042 	kfree(ecc_stngs);
3043 	ecc_stngs = NULL;
3044 
3045 	msrs_free(msrs);
3046 	msrs = NULL;
3047 }
3048 
3049 module_init(amd64_edac_init);
3050 module_exit(amd64_edac_exit);
3051 
3052 MODULE_LICENSE("GPL");
3053 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3054 		"Dave Peterson, Thayne Harbaugh");
3055 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3056 		EDAC_AMD64_VERSION);
3057 
3058 module_param(edac_op_state, int, 0444);
3059 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3060