xref: /linux/drivers/edac/amd64_edac.c (revision 29e31a8ee811f5d85274f0381f13cd6fe650aea4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
4 
5 static struct edac_pci_ctl_info *pci_ctl;
6 
7 /*
8  * Set by command line parameter. If BIOS has enabled the ECC, this override is
9  * cleared to prevent re-enabling the hardware by this driver.
10  */
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
13 
14 static struct msr __percpu *msrs;
15 
16 static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
17 {
18 	if (!pvt->flags.zn_regs_v2)
19 		return reg;
20 
21 	switch (reg) {
22 	case UMCCH_ADDR_CFG:		return UMCCH_ADDR_CFG_DDR5;
23 	case UMCCH_ADDR_MASK_SEC:	return UMCCH_ADDR_MASK_SEC_DDR5;
24 	case UMCCH_DIMM_CFG:		return UMCCH_DIMM_CFG_DDR5;
25 	}
26 
27 	WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
28 	return 0;
29 }
30 
31 /* Per-node stuff */
32 static struct ecc_settings **ecc_stngs;
33 
34 /* Device for the PCI component */
35 static struct device *pci_ctl_dev;
36 
37 /*
38  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
39  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
40  * or higher value'.
41  *
42  *FIXME: Produce a better mapping/linearisation.
43  */
44 static const struct scrubrate {
45        u32 scrubval;           /* bit pattern for scrub rate */
46        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
47 } scrubrates[] = {
48 	{ 0x01, 1600000000UL},
49 	{ 0x02, 800000000UL},
50 	{ 0x03, 400000000UL},
51 	{ 0x04, 200000000UL},
52 	{ 0x05, 100000000UL},
53 	{ 0x06, 50000000UL},
54 	{ 0x07, 25000000UL},
55 	{ 0x08, 12284069UL},
56 	{ 0x09, 6274509UL},
57 	{ 0x0A, 3121951UL},
58 	{ 0x0B, 1560975UL},
59 	{ 0x0C, 781440UL},
60 	{ 0x0D, 390720UL},
61 	{ 0x0E, 195300UL},
62 	{ 0x0F, 97650UL},
63 	{ 0x10, 48854UL},
64 	{ 0x11, 24427UL},
65 	{ 0x12, 12213UL},
66 	{ 0x13, 6101UL},
67 	{ 0x14, 3051UL},
68 	{ 0x15, 1523UL},
69 	{ 0x16, 761UL},
70 	{ 0x00, 0UL},        /* scrubbing off */
71 };
72 
73 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 			       u32 *val, const char *func)
75 {
76 	int err = 0;
77 
78 	err = pci_read_config_dword(pdev, offset, val);
79 	if (err)
80 		amd64_warn("%s: error reading F%dx%03x.\n",
81 			   func, PCI_FUNC(pdev->devfn), offset);
82 
83 	return err;
84 }
85 
86 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
87 				u32 val, const char *func)
88 {
89 	int err = 0;
90 
91 	err = pci_write_config_dword(pdev, offset, val);
92 	if (err)
93 		amd64_warn("%s: error writing to F%dx%03x.\n",
94 			   func, PCI_FUNC(pdev->devfn), offset);
95 
96 	return err;
97 }
98 
99 /*
100  * Select DCT to which PCI cfg accesses are routed
101  */
102 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
103 {
104 	u32 reg = 0;
105 
106 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
107 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
108 	reg |= dct;
109 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
110 }
111 
112 /*
113  *
114  * Depending on the family, F2 DCT reads need special handling:
115  *
116  * K8: has a single DCT only and no address offsets >= 0x100
117  *
118  * F10h: each DCT has its own set of regs
119  *	DCT0 -> F2x040..
120  *	DCT1 -> F2x140..
121  *
122  * F16h: has only 1 DCT
123  *
124  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
125  */
126 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
127 					 int offset, u32 *val)
128 {
129 	switch (pvt->fam) {
130 	case 0xf:
131 		if (dct || offset >= 0x100)
132 			return -EINVAL;
133 		break;
134 
135 	case 0x10:
136 		if (dct) {
137 			/*
138 			 * Note: If ganging is enabled, barring the regs
139 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
140 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
141 			 */
142 			if (dct_ganging_enabled(pvt))
143 				return 0;
144 
145 			offset += 0x100;
146 		}
147 		break;
148 
149 	case 0x15:
150 		/*
151 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
152 		 * We should select which DCT we access using F1x10C[DctCfgSel]
153 		 */
154 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
155 		f15h_select_dct(pvt, dct);
156 		break;
157 
158 	case 0x16:
159 		if (dct)
160 			return -EINVAL;
161 		break;
162 
163 	default:
164 		break;
165 	}
166 	return amd64_read_pci_cfg(pvt->F2, offset, val);
167 }
168 
169 /*
170  * Memory scrubber control interface. For K8, memory scrubbing is handled by
171  * hardware and can involve L2 cache, dcache as well as the main memory. With
172  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
173  * functionality.
174  *
175  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
176  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
177  * bytes/sec for the setting.
178  *
179  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
180  * other archs, we might not have access to the caches directly.
181  */
182 
183 /*
184  * Scan the scrub rate mapping table for a close or matching bandwidth value to
185  * issue. If requested is too big, then use last maximum value found.
186  */
187 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
188 {
189 	u32 scrubval;
190 	int i;
191 
192 	/*
193 	 * map the configured rate (new_bw) to a value specific to the AMD64
194 	 * memory controller and apply to register. Search for the first
195 	 * bandwidth entry that is greater or equal than the setting requested
196 	 * and program that. If at last entry, turn off DRAM scrubbing.
197 	 *
198 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
199 	 * by falling back to the last element in scrubrates[].
200 	 */
201 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
202 		/*
203 		 * skip scrub rates which aren't recommended
204 		 * (see F10 BKDG, F3x58)
205 		 */
206 		if (scrubrates[i].scrubval < min_rate)
207 			continue;
208 
209 		if (scrubrates[i].bandwidth <= new_bw)
210 			break;
211 	}
212 
213 	scrubval = scrubrates[i].scrubval;
214 
215 	if (pvt->fam == 0x15 && pvt->model == 0x60) {
216 		f15h_select_dct(pvt, 0);
217 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
218 		f15h_select_dct(pvt, 1);
219 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
220 	} else {
221 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
222 	}
223 
224 	if (scrubval)
225 		return scrubrates[i].bandwidth;
226 
227 	return 0;
228 }
229 
230 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
231 {
232 	struct amd64_pvt *pvt = mci->pvt_info;
233 	u32 min_scrubrate = 0x5;
234 
235 	if (pvt->fam == 0xf)
236 		min_scrubrate = 0x0;
237 
238 	if (pvt->fam == 0x15) {
239 		/* Erratum #505 */
240 		if (pvt->model < 0x10)
241 			f15h_select_dct(pvt, 0);
242 
243 		if (pvt->model == 0x60)
244 			min_scrubrate = 0x6;
245 	}
246 	return __set_scrub_rate(pvt, bw, min_scrubrate);
247 }
248 
249 static int get_scrub_rate(struct mem_ctl_info *mci)
250 {
251 	struct amd64_pvt *pvt = mci->pvt_info;
252 	int i, retval = -EINVAL;
253 	u32 scrubval = 0;
254 
255 	if (pvt->fam == 0x15) {
256 		/* Erratum #505 */
257 		if (pvt->model < 0x10)
258 			f15h_select_dct(pvt, 0);
259 
260 		if (pvt->model == 0x60)
261 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
262 		else
263 			amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
264 	} else {
265 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
266 	}
267 
268 	scrubval = scrubval & 0x001F;
269 
270 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
271 		if (scrubrates[i].scrubval == scrubval) {
272 			retval = scrubrates[i].bandwidth;
273 			break;
274 		}
275 	}
276 	return retval;
277 }
278 
279 /*
280  * returns true if the SysAddr given by sys_addr matches the
281  * DRAM base/limit associated with node_id
282  */
283 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
284 {
285 	u64 addr;
286 
287 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
288 	 * all ones if the most significant implemented address bit is 1.
289 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
290 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
291 	 * Application Programming.
292 	 */
293 	addr = sys_addr & 0x000000ffffffffffull;
294 
295 	return ((addr >= get_dram_base(pvt, nid)) &&
296 		(addr <= get_dram_limit(pvt, nid)));
297 }
298 
299 /*
300  * Attempt to map a SysAddr to a node. On success, return a pointer to the
301  * mem_ctl_info structure for the node that the SysAddr maps to.
302  *
303  * On failure, return NULL.
304  */
305 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
306 						u64 sys_addr)
307 {
308 	struct amd64_pvt *pvt;
309 	u8 node_id;
310 	u32 intlv_en, bits;
311 
312 	/*
313 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
314 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
315 	 */
316 	pvt = mci->pvt_info;
317 
318 	/*
319 	 * The value of this field should be the same for all DRAM Base
320 	 * registers.  Therefore we arbitrarily choose to read it from the
321 	 * register for node 0.
322 	 */
323 	intlv_en = dram_intlv_en(pvt, 0);
324 
325 	if (intlv_en == 0) {
326 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
327 			if (base_limit_match(pvt, sys_addr, node_id))
328 				goto found;
329 		}
330 		goto err_no_match;
331 	}
332 
333 	if (unlikely((intlv_en != 0x01) &&
334 		     (intlv_en != 0x03) &&
335 		     (intlv_en != 0x07))) {
336 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
337 		return NULL;
338 	}
339 
340 	bits = (((u32) sys_addr) >> 12) & intlv_en;
341 
342 	for (node_id = 0; ; ) {
343 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
344 			break;	/* intlv_sel field matches */
345 
346 		if (++node_id >= DRAM_RANGES)
347 			goto err_no_match;
348 	}
349 
350 	/* sanity test for sys_addr */
351 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
352 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
353 			   "range for node %d with node interleaving enabled.\n",
354 			   __func__, sys_addr, node_id);
355 		return NULL;
356 	}
357 
358 found:
359 	return edac_mc_find((int)node_id);
360 
361 err_no_match:
362 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
363 		 (unsigned long)sys_addr);
364 
365 	return NULL;
366 }
367 
368 /*
369  * compute the CS base address of the @csrow on the DRAM controller @dct.
370  * For details see F2x[5C:40] in the processor's BKDG
371  */
372 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
373 				 u64 *base, u64 *mask)
374 {
375 	u64 csbase, csmask, base_bits, mask_bits;
376 	u8 addr_shift;
377 
378 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
379 		csbase		= pvt->csels[dct].csbases[csrow];
380 		csmask		= pvt->csels[dct].csmasks[csrow];
381 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
382 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
383 		addr_shift	= 4;
384 
385 	/*
386 	 * F16h and F15h, models 30h and later need two addr_shift values:
387 	 * 8 for high and 6 for low (cf. F16h BKDG).
388 	 */
389 	} else if (pvt->fam == 0x16 ||
390 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
391 		csbase          = pvt->csels[dct].csbases[csrow];
392 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
393 
394 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
395 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
396 
397 		*mask = ~0ULL;
398 		/* poke holes for the csmask */
399 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
400 			   (GENMASK_ULL(30, 19) << 8));
401 
402 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
403 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
404 
405 		return;
406 	} else {
407 		csbase		= pvt->csels[dct].csbases[csrow];
408 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
409 		addr_shift	= 8;
410 
411 		if (pvt->fam == 0x15)
412 			base_bits = mask_bits =
413 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
414 		else
415 			base_bits = mask_bits =
416 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
417 	}
418 
419 	*base  = (csbase & base_bits) << addr_shift;
420 
421 	*mask  = ~0ULL;
422 	/* poke holes for the csmask */
423 	*mask &= ~(mask_bits << addr_shift);
424 	/* OR them in */
425 	*mask |= (csmask & mask_bits) << addr_shift;
426 }
427 
428 #define for_each_chip_select(i, dct, pvt) \
429 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
430 
431 #define chip_select_base(i, dct, pvt) \
432 	pvt->csels[dct].csbases[i]
433 
434 #define for_each_chip_select_mask(i, dct, pvt) \
435 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
436 
437 #define for_each_umc(i) \
438 	for (i = 0; i < pvt->max_mcs; i++)
439 
440 /*
441  * @input_addr is an InputAddr associated with the node given by mci. Return the
442  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
443  */
444 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
445 {
446 	struct amd64_pvt *pvt;
447 	int csrow;
448 	u64 base, mask;
449 
450 	pvt = mci->pvt_info;
451 
452 	for_each_chip_select(csrow, 0, pvt) {
453 		if (!csrow_enabled(csrow, 0, pvt))
454 			continue;
455 
456 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
457 
458 		mask = ~mask;
459 
460 		if ((input_addr & mask) == (base & mask)) {
461 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
462 				 (unsigned long)input_addr, csrow,
463 				 pvt->mc_node_id);
464 
465 			return csrow;
466 		}
467 	}
468 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
469 		 (unsigned long)input_addr, pvt->mc_node_id);
470 
471 	return -1;
472 }
473 
474 /*
475  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
476  * for the node represented by mci. Info is passed back in *hole_base,
477  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
478  * info is invalid. Info may be invalid for either of the following reasons:
479  *
480  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
481  *   Address Register does not exist.
482  *
483  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
484  *   indicating that its contents are not valid.
485  *
486  * The values passed back in *hole_base, *hole_offset, and *hole_size are
487  * complete 32-bit values despite the fact that the bitfields in the DHAR
488  * only represent bits 31-24 of the base and offset values.
489  */
490 static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
491 			      u64 *hole_offset, u64 *hole_size)
492 {
493 	struct amd64_pvt *pvt = mci->pvt_info;
494 
495 	/* only revE and later have the DRAM Hole Address Register */
496 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
497 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
498 			 pvt->ext_model, pvt->mc_node_id);
499 		return 1;
500 	}
501 
502 	/* valid for Fam10h and above */
503 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
504 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
505 		return 1;
506 	}
507 
508 	if (!dhar_valid(pvt)) {
509 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
510 			 pvt->mc_node_id);
511 		return 1;
512 	}
513 
514 	/* This node has Memory Hoisting */
515 
516 	/* +------------------+--------------------+--------------------+-----
517 	 * | memory           | DRAM hole          | relocated          |
518 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
519 	 * |                  |                    | DRAM hole          |
520 	 * |                  |                    | [0x100000000,      |
521 	 * |                  |                    |  (0x100000000+     |
522 	 * |                  |                    |   (0xffffffff-x))] |
523 	 * +------------------+--------------------+--------------------+-----
524 	 *
525 	 * Above is a diagram of physical memory showing the DRAM hole and the
526 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
527 	 * starts at address x (the base address) and extends through address
528 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
529 	 * addresses in the hole so that they start at 0x100000000.
530 	 */
531 
532 	*hole_base = dhar_base(pvt);
533 	*hole_size = (1ULL << 32) - *hole_base;
534 
535 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
536 					: k8_dhar_offset(pvt);
537 
538 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
539 		 pvt->mc_node_id, (unsigned long)*hole_base,
540 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
541 
542 	return 0;
543 }
544 
545 #ifdef CONFIG_EDAC_DEBUG
546 #define EDAC_DCT_ATTR_SHOW(reg)						\
547 static ssize_t reg##_show(struct device *dev,				\
548 			 struct device_attribute *mattr, char *data)	\
549 {									\
550 	struct mem_ctl_info *mci = to_mci(dev);				\
551 	struct amd64_pvt *pvt = mci->pvt_info;				\
552 									\
553 	return sprintf(data, "0x%016llx\n", (u64)pvt->reg);		\
554 }
555 
556 EDAC_DCT_ATTR_SHOW(dhar);
557 EDAC_DCT_ATTR_SHOW(dbam0);
558 EDAC_DCT_ATTR_SHOW(top_mem);
559 EDAC_DCT_ATTR_SHOW(top_mem2);
560 
561 static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
562 			      char *data)
563 {
564 	struct mem_ctl_info *mci = to_mci(dev);
565 
566 	u64 hole_base = 0;
567 	u64 hole_offset = 0;
568 	u64 hole_size = 0;
569 
570 	get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
571 
572 	return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
573 						 hole_size);
574 }
575 
576 /*
577  * update NUM_DBG_ATTRS in case you add new members
578  */
579 static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
580 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
581 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
582 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
583 static DEVICE_ATTR_RO(dram_hole);
584 
585 static struct attribute *dbg_attrs[] = {
586 	&dev_attr_dhar.attr,
587 	&dev_attr_dbam.attr,
588 	&dev_attr_topmem.attr,
589 	&dev_attr_topmem2.attr,
590 	&dev_attr_dram_hole.attr,
591 	NULL
592 };
593 
594 static const struct attribute_group dbg_group = {
595 	.attrs = dbg_attrs,
596 };
597 
598 static ssize_t inject_section_show(struct device *dev,
599 				   struct device_attribute *mattr, char *buf)
600 {
601 	struct mem_ctl_info *mci = to_mci(dev);
602 	struct amd64_pvt *pvt = mci->pvt_info;
603 	return sprintf(buf, "0x%x\n", pvt->injection.section);
604 }
605 
606 /*
607  * store error injection section value which refers to one of 4 16-byte sections
608  * within a 64-byte cacheline
609  *
610  * range: 0..3
611  */
612 static ssize_t inject_section_store(struct device *dev,
613 				    struct device_attribute *mattr,
614 				    const char *data, size_t count)
615 {
616 	struct mem_ctl_info *mci = to_mci(dev);
617 	struct amd64_pvt *pvt = mci->pvt_info;
618 	unsigned long value;
619 	int ret;
620 
621 	ret = kstrtoul(data, 10, &value);
622 	if (ret < 0)
623 		return ret;
624 
625 	if (value > 3) {
626 		amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
627 		return -EINVAL;
628 	}
629 
630 	pvt->injection.section = (u32) value;
631 	return count;
632 }
633 
634 static ssize_t inject_word_show(struct device *dev,
635 				struct device_attribute *mattr, char *buf)
636 {
637 	struct mem_ctl_info *mci = to_mci(dev);
638 	struct amd64_pvt *pvt = mci->pvt_info;
639 	return sprintf(buf, "0x%x\n", pvt->injection.word);
640 }
641 
642 /*
643  * store error injection word value which refers to one of 9 16-bit word of the
644  * 16-byte (128-bit + ECC bits) section
645  *
646  * range: 0..8
647  */
648 static ssize_t inject_word_store(struct device *dev,
649 				 struct device_attribute *mattr,
650 				 const char *data, size_t count)
651 {
652 	struct mem_ctl_info *mci = to_mci(dev);
653 	struct amd64_pvt *pvt = mci->pvt_info;
654 	unsigned long value;
655 	int ret;
656 
657 	ret = kstrtoul(data, 10, &value);
658 	if (ret < 0)
659 		return ret;
660 
661 	if (value > 8) {
662 		amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
663 		return -EINVAL;
664 	}
665 
666 	pvt->injection.word = (u32) value;
667 	return count;
668 }
669 
670 static ssize_t inject_ecc_vector_show(struct device *dev,
671 				      struct device_attribute *mattr,
672 				      char *buf)
673 {
674 	struct mem_ctl_info *mci = to_mci(dev);
675 	struct amd64_pvt *pvt = mci->pvt_info;
676 	return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
677 }
678 
679 /*
680  * store 16 bit error injection vector which enables injecting errors to the
681  * corresponding bit within the error injection word above. When used during a
682  * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
683  */
684 static ssize_t inject_ecc_vector_store(struct device *dev,
685 				       struct device_attribute *mattr,
686 				       const char *data, size_t count)
687 {
688 	struct mem_ctl_info *mci = to_mci(dev);
689 	struct amd64_pvt *pvt = mci->pvt_info;
690 	unsigned long value;
691 	int ret;
692 
693 	ret = kstrtoul(data, 16, &value);
694 	if (ret < 0)
695 		return ret;
696 
697 	if (value & 0xFFFF0000) {
698 		amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
699 		return -EINVAL;
700 	}
701 
702 	pvt->injection.bit_map = (u32) value;
703 	return count;
704 }
705 
706 /*
707  * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
708  * fields needed by the injection registers and read the NB Array Data Port.
709  */
710 static ssize_t inject_read_store(struct device *dev,
711 				 struct device_attribute *mattr,
712 				 const char *data, size_t count)
713 {
714 	struct mem_ctl_info *mci = to_mci(dev);
715 	struct amd64_pvt *pvt = mci->pvt_info;
716 	unsigned long value;
717 	u32 section, word_bits;
718 	int ret;
719 
720 	ret = kstrtoul(data, 10, &value);
721 	if (ret < 0)
722 		return ret;
723 
724 	/* Form value to choose 16-byte section of cacheline */
725 	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
726 
727 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
728 
729 	word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
730 
731 	/* Issue 'word' and 'bit' along with the READ request */
732 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
733 
734 	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
735 
736 	return count;
737 }
738 
739 /*
740  * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
741  * fields needed by the injection registers.
742  */
743 static ssize_t inject_write_store(struct device *dev,
744 				  struct device_attribute *mattr,
745 				  const char *data, size_t count)
746 {
747 	struct mem_ctl_info *mci = to_mci(dev);
748 	struct amd64_pvt *pvt = mci->pvt_info;
749 	u32 section, word_bits, tmp;
750 	unsigned long value;
751 	int ret;
752 
753 	ret = kstrtoul(data, 10, &value);
754 	if (ret < 0)
755 		return ret;
756 
757 	/* Form value to choose 16-byte section of cacheline */
758 	section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
759 
760 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
761 
762 	word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
763 
764 	pr_notice_once("Don't forget to decrease MCE polling interval in\n"
765 			"/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
766 			"so that you can get the error report faster.\n");
767 
768 	on_each_cpu(disable_caches, NULL, 1);
769 
770 	/* Issue 'word' and 'bit' along with the READ request */
771 	amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
772 
773  retry:
774 	/* wait until injection happens */
775 	amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
776 	if (tmp & F10_NB_ARR_ECC_WR_REQ) {
777 		cpu_relax();
778 		goto retry;
779 	}
780 
781 	on_each_cpu(enable_caches, NULL, 1);
782 
783 	edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
784 
785 	return count;
786 }
787 
788 /*
789  * update NUM_INJ_ATTRS in case you add new members
790  */
791 
792 static DEVICE_ATTR_RW(inject_section);
793 static DEVICE_ATTR_RW(inject_word);
794 static DEVICE_ATTR_RW(inject_ecc_vector);
795 static DEVICE_ATTR_WO(inject_write);
796 static DEVICE_ATTR_WO(inject_read);
797 
798 static struct attribute *inj_attrs[] = {
799 	&dev_attr_inject_section.attr,
800 	&dev_attr_inject_word.attr,
801 	&dev_attr_inject_ecc_vector.attr,
802 	&dev_attr_inject_write.attr,
803 	&dev_attr_inject_read.attr,
804 	NULL
805 };
806 
807 static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
808 {
809 	struct device *dev = kobj_to_dev(kobj);
810 	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
811 	struct amd64_pvt *pvt = mci->pvt_info;
812 
813 	/* Families which have that injection hw */
814 	if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
815 		return attr->mode;
816 
817 	return 0;
818 }
819 
820 static const struct attribute_group inj_group = {
821 	.attrs = inj_attrs,
822 	.is_visible = inj_is_visible,
823 };
824 #endif /* CONFIG_EDAC_DEBUG */
825 
826 /*
827  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
828  * assumed that sys_addr maps to the node given by mci.
829  *
830  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
831  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
832  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
833  * then it is also involved in translating a SysAddr to a DramAddr. Sections
834  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
835  * These parts of the documentation are unclear. I interpret them as follows:
836  *
837  * When node n receives a SysAddr, it processes the SysAddr as follows:
838  *
839  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
840  *    Limit registers for node n. If the SysAddr is not within the range
841  *    specified by the base and limit values, then node n ignores the Sysaddr
842  *    (since it does not map to node n). Otherwise continue to step 2 below.
843  *
844  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
845  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
846  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
847  *    hole. If not, skip to step 3 below. Else get the value of the
848  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
849  *    offset defined by this value from the SysAddr.
850  *
851  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
852  *    Base register for node n. To obtain the DramAddr, subtract the base
853  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
854  */
855 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
856 {
857 	struct amd64_pvt *pvt = mci->pvt_info;
858 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
859 	int ret;
860 
861 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
862 
863 	ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
864 	if (!ret) {
865 		if ((sys_addr >= (1ULL << 32)) &&
866 		    (sys_addr < ((1ULL << 32) + hole_size))) {
867 			/* use DHAR to translate SysAddr to DramAddr */
868 			dram_addr = sys_addr - hole_offset;
869 
870 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
871 				 (unsigned long)sys_addr,
872 				 (unsigned long)dram_addr);
873 
874 			return dram_addr;
875 		}
876 	}
877 
878 	/*
879 	 * Translate the SysAddr to a DramAddr as shown near the start of
880 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
881 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
882 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
883 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
884 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
885 	 * Programmer's Manual Volume 1 Application Programming.
886 	 */
887 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
888 
889 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
890 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
891 	return dram_addr;
892 }
893 
894 /*
895  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
896  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
897  * for node interleaving.
898  */
899 static int num_node_interleave_bits(unsigned intlv_en)
900 {
901 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
902 	int n;
903 
904 	BUG_ON(intlv_en > 7);
905 	n = intlv_shift_table[intlv_en];
906 	return n;
907 }
908 
909 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
910 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
911 {
912 	struct amd64_pvt *pvt;
913 	int intlv_shift;
914 	u64 input_addr;
915 
916 	pvt = mci->pvt_info;
917 
918 	/*
919 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
920 	 * concerning translating a DramAddr to an InputAddr.
921 	 */
922 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
923 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
924 		      (dram_addr & 0xfff);
925 
926 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
927 		 intlv_shift, (unsigned long)dram_addr,
928 		 (unsigned long)input_addr);
929 
930 	return input_addr;
931 }
932 
933 /*
934  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
935  * assumed that @sys_addr maps to the node given by mci.
936  */
937 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
938 {
939 	u64 input_addr;
940 
941 	input_addr =
942 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
943 
944 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
945 		 (unsigned long)sys_addr, (unsigned long)input_addr);
946 
947 	return input_addr;
948 }
949 
950 /* Map the Error address to a PAGE and PAGE OFFSET. */
951 static inline void error_address_to_page_and_offset(u64 error_address,
952 						    struct err_info *err)
953 {
954 	err->page = (u32) (error_address >> PAGE_SHIFT);
955 	err->offset = ((u32) error_address) & ~PAGE_MASK;
956 }
957 
958 /*
959  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
960  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
961  * of a node that detected an ECC memory error.  mci represents the node that
962  * the error address maps to (possibly different from the node that detected
963  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
964  * error.
965  */
966 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
967 {
968 	int csrow;
969 
970 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
971 
972 	if (csrow == -1)
973 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
974 				  "address 0x%lx\n", (unsigned long)sys_addr);
975 	return csrow;
976 }
977 
978 /* Protect the PCI config register pairs used for DF indirect access. */
979 static DEFINE_MUTEX(df_indirect_mutex);
980 
981 /*
982  * Data Fabric Indirect Access uses FICAA/FICAD.
983  *
984  * Fabric Indirect Configuration Access Address (FICAA): Constructed based
985  * on the device's Instance Id and the PCI function and register offset of
986  * the desired register.
987  *
988  * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
989  * and FICAD HI registers but so far we only need the LO register.
990  *
991  * Use Instance Id 0xFF to indicate a broadcast read.
992  */
993 #define DF_BROADCAST	0xFF
994 static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
995 {
996 	struct pci_dev *F4;
997 	u32 ficaa;
998 	int err = -ENODEV;
999 
1000 	if (node >= amd_nb_num())
1001 		goto out;
1002 
1003 	F4 = node_to_amd_nb(node)->link;
1004 	if (!F4)
1005 		goto out;
1006 
1007 	ficaa  = (instance_id == DF_BROADCAST) ? 0 : 1;
1008 	ficaa |= reg & 0x3FC;
1009 	ficaa |= (func & 0x7) << 11;
1010 	ficaa |= instance_id << 16;
1011 
1012 	mutex_lock(&df_indirect_mutex);
1013 
1014 	err = pci_write_config_dword(F4, 0x5C, ficaa);
1015 	if (err) {
1016 		pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
1017 		goto out_unlock;
1018 	}
1019 
1020 	err = pci_read_config_dword(F4, 0x98, lo);
1021 	if (err)
1022 		pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
1023 
1024 out_unlock:
1025 	mutex_unlock(&df_indirect_mutex);
1026 
1027 out:
1028 	return err;
1029 }
1030 
1031 static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
1032 {
1033 	return __df_indirect_read(node, func, reg, instance_id, lo);
1034 }
1035 
1036 static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
1037 {
1038 	return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
1039 }
1040 
1041 struct addr_ctx {
1042 	u64 ret_addr;
1043 	u32 tmp;
1044 	u16 nid;
1045 	u8 inst_id;
1046 };
1047 
1048 static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
1049 {
1050 	u64 dram_base_addr, dram_limit_addr, dram_hole_base;
1051 
1052 	u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
1053 	u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
1054 	u8 intlv_addr_sel, intlv_addr_bit;
1055 	u8 num_intlv_bits, hashed_bit;
1056 	u8 lgcy_mmio_hole_en, base = 0;
1057 	u8 cs_mask, cs_id = 0;
1058 	bool hash_enabled = false;
1059 
1060 	struct addr_ctx ctx;
1061 
1062 	memset(&ctx, 0, sizeof(ctx));
1063 
1064 	/* Start from the normalized address */
1065 	ctx.ret_addr = norm_addr;
1066 
1067 	ctx.nid = nid;
1068 	ctx.inst_id = umc;
1069 
1070 	/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
1071 	if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
1072 		goto out_err;
1073 
1074 	/* Remove HiAddrOffset from normalized address, if enabled: */
1075 	if (ctx.tmp & BIT(0)) {
1076 		u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
1077 
1078 		if (norm_addr >= hi_addr_offset) {
1079 			ctx.ret_addr -= hi_addr_offset;
1080 			base = 1;
1081 		}
1082 	}
1083 
1084 	/* Read D18F0x110 (DramBaseAddress). */
1085 	if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
1086 		goto out_err;
1087 
1088 	/* Check if address range is valid. */
1089 	if (!(ctx.tmp & BIT(0))) {
1090 		pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
1091 			__func__, ctx.tmp);
1092 		goto out_err;
1093 	}
1094 
1095 	lgcy_mmio_hole_en = ctx.tmp & BIT(1);
1096 	intlv_num_chan	  = (ctx.tmp >> 4) & 0xF;
1097 	intlv_addr_sel	  = (ctx.tmp >> 8) & 0x7;
1098 	dram_base_addr	  = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
1099 
1100 	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
1101 	if (intlv_addr_sel > 3) {
1102 		pr_err("%s: Invalid interleave address select %d.\n",
1103 			__func__, intlv_addr_sel);
1104 		goto out_err;
1105 	}
1106 
1107 	/* Read D18F0x114 (DramLimitAddress). */
1108 	if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
1109 		goto out_err;
1110 
1111 	intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
1112 	intlv_num_dies	  = (ctx.tmp >> 10) & 0x3;
1113 	dram_limit_addr	  = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
1114 
1115 	intlv_addr_bit = intlv_addr_sel + 8;
1116 
1117 	/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
1118 	switch (intlv_num_chan) {
1119 	case 0:	intlv_num_chan = 0; break;
1120 	case 1: intlv_num_chan = 1; break;
1121 	case 3: intlv_num_chan = 2; break;
1122 	case 5:	intlv_num_chan = 3; break;
1123 	case 7:	intlv_num_chan = 4; break;
1124 
1125 	case 8: intlv_num_chan = 1;
1126 		hash_enabled = true;
1127 		break;
1128 	default:
1129 		pr_err("%s: Invalid number of interleaved channels %d.\n",
1130 			__func__, intlv_num_chan);
1131 		goto out_err;
1132 	}
1133 
1134 	num_intlv_bits = intlv_num_chan;
1135 
1136 	if (intlv_num_dies > 2) {
1137 		pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
1138 			__func__, intlv_num_dies);
1139 		goto out_err;
1140 	}
1141 
1142 	num_intlv_bits += intlv_num_dies;
1143 
1144 	/* Add a bit if sockets are interleaved. */
1145 	num_intlv_bits += intlv_num_sockets;
1146 
1147 	/* Assert num_intlv_bits <= 4 */
1148 	if (num_intlv_bits > 4) {
1149 		pr_err("%s: Invalid interleave bits %d.\n",
1150 			__func__, num_intlv_bits);
1151 		goto out_err;
1152 	}
1153 
1154 	if (num_intlv_bits > 0) {
1155 		u64 temp_addr_x, temp_addr_i, temp_addr_y;
1156 		u8 die_id_bit, sock_id_bit, cs_fabric_id;
1157 
1158 		/*
1159 		 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
1160 		 * This is the fabric id for this coherent slave. Use
1161 		 * umc/channel# as instance id of the coherent slave
1162 		 * for FICAA.
1163 		 */
1164 		if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
1165 			goto out_err;
1166 
1167 		cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
1168 		die_id_bit   = 0;
1169 
1170 		/* If interleaved over more than 1 channel: */
1171 		if (intlv_num_chan) {
1172 			die_id_bit = intlv_num_chan;
1173 			cs_mask	   = (1 << die_id_bit) - 1;
1174 			cs_id	   = cs_fabric_id & cs_mask;
1175 		}
1176 
1177 		sock_id_bit = die_id_bit;
1178 
1179 		/* Read D18F1x208 (SystemFabricIdMask). */
1180 		if (intlv_num_dies || intlv_num_sockets)
1181 			if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
1182 				goto out_err;
1183 
1184 		/* If interleaved over more than 1 die. */
1185 		if (intlv_num_dies) {
1186 			sock_id_bit  = die_id_bit + intlv_num_dies;
1187 			die_id_shift = (ctx.tmp >> 24) & 0xF;
1188 			die_id_mask  = (ctx.tmp >> 8) & 0xFF;
1189 
1190 			cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
1191 		}
1192 
1193 		/* If interleaved over more than 1 socket. */
1194 		if (intlv_num_sockets) {
1195 			socket_id_shift	= (ctx.tmp >> 28) & 0xF;
1196 			socket_id_mask	= (ctx.tmp >> 16) & 0xFF;
1197 
1198 			cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
1199 		}
1200 
1201 		/*
1202 		 * The pre-interleaved address consists of XXXXXXIIIYYYYY
1203 		 * where III is the ID for this CS, and XXXXXXYYYYY are the
1204 		 * address bits from the post-interleaved address.
1205 		 * "num_intlv_bits" has been calculated to tell us how many "I"
1206 		 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
1207 		 * there are (where "I" starts).
1208 		 */
1209 		temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
1210 		temp_addr_i = (cs_id << intlv_addr_bit);
1211 		temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
1212 		ctx.ret_addr    = temp_addr_x | temp_addr_i | temp_addr_y;
1213 	}
1214 
1215 	/* Add dram base address */
1216 	ctx.ret_addr += dram_base_addr;
1217 
1218 	/* If legacy MMIO hole enabled */
1219 	if (lgcy_mmio_hole_en) {
1220 		if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
1221 			goto out_err;
1222 
1223 		dram_hole_base = ctx.tmp & GENMASK(31, 24);
1224 		if (ctx.ret_addr >= dram_hole_base)
1225 			ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
1226 	}
1227 
1228 	if (hash_enabled) {
1229 		/* Save some parentheses and grab ls-bit at the end. */
1230 		hashed_bit =	(ctx.ret_addr >> 12) ^
1231 				(ctx.ret_addr >> 18) ^
1232 				(ctx.ret_addr >> 21) ^
1233 				(ctx.ret_addr >> 30) ^
1234 				cs_id;
1235 
1236 		hashed_bit &= BIT(0);
1237 
1238 		if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
1239 			ctx.ret_addr ^= BIT(intlv_addr_bit);
1240 	}
1241 
1242 	/* Is calculated system address is above DRAM limit address? */
1243 	if (ctx.ret_addr > dram_limit_addr)
1244 		goto out_err;
1245 
1246 	*sys_addr = ctx.ret_addr;
1247 	return 0;
1248 
1249 out_err:
1250 	return -EINVAL;
1251 }
1252 
1253 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
1254 
1255 /*
1256  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
1257  * are ECC capable.
1258  */
1259 static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
1260 {
1261 	unsigned long edac_cap = EDAC_FLAG_NONE;
1262 	u8 bit;
1263 
1264 	bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1265 		? 19
1266 		: 17;
1267 
1268 	if (pvt->dclr0 & BIT(bit))
1269 		edac_cap = EDAC_FLAG_SECDED;
1270 
1271 	return edac_cap;
1272 }
1273 
1274 static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
1275 {
1276 	u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1277 	unsigned long edac_cap = EDAC_FLAG_NONE;
1278 
1279 	for_each_umc(i) {
1280 		if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1281 			continue;
1282 
1283 		umc_en_mask |= BIT(i);
1284 
1285 		/* UMC Configuration bit 12 (DimmEccEn) */
1286 		if (pvt->umc[i].umc_cfg & BIT(12))
1287 			dimm_ecc_en_mask |= BIT(i);
1288 	}
1289 
1290 	if (umc_en_mask == dimm_ecc_en_mask)
1291 		edac_cap = EDAC_FLAG_SECDED;
1292 
1293 	return edac_cap;
1294 }
1295 
1296 /*
1297  * debug routine to display the memory sizes of all logical DIMMs and its
1298  * CSROWs
1299  */
1300 static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1301 {
1302 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1303 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
1304 	int dimm, size0, size1;
1305 
1306 	if (pvt->fam == 0xf) {
1307 		/* K8 families < revF not supported yet */
1308 		if (pvt->ext_model < K8_REV_F)
1309 			return;
1310 
1311 		WARN_ON(ctrl != 0);
1312 	}
1313 
1314 	if (pvt->fam == 0x10) {
1315 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1316 							   : pvt->dbam0;
1317 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1318 				 pvt->csels[1].csbases :
1319 				 pvt->csels[0].csbases;
1320 	} else if (ctrl) {
1321 		dbam = pvt->dbam0;
1322 		dcsb = pvt->csels[1].csbases;
1323 	}
1324 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1325 		 ctrl, dbam);
1326 
1327 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1328 
1329 	/* Dump memory sizes for DIMM and its CSROWs */
1330 	for (dimm = 0; dimm < 4; dimm++) {
1331 		size0 = 0;
1332 		if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
1333 			/*
1334 			 * For F15m60h, we need multiplier for LRDIMM cs_size
1335 			 * calculation. We pass dimm value to the dbam_to_cs
1336 			 * mapper so we can find the multiplier from the
1337 			 * corresponding DCSM.
1338 			 */
1339 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1340 						     DBAM_DIMM(dimm, dbam),
1341 						     dimm);
1342 
1343 		size1 = 0;
1344 		if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
1345 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1346 						     DBAM_DIMM(dimm, dbam),
1347 						     dimm);
1348 
1349 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1350 			   dimm * 2,     size0,
1351 			   dimm * 2 + 1, size1);
1352 	}
1353 }
1354 
1355 
1356 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1357 {
1358 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1359 
1360 	if (pvt->dram_type == MEM_LRDDR3) {
1361 		u32 dcsm = pvt->csels[chan].csmasks[0];
1362 		/*
1363 		 * It's assumed all LRDIMMs in a DCT are going to be of
1364 		 * same 'type' until proven otherwise. So, use a cs
1365 		 * value of '0' here to get dcsm value.
1366 		 */
1367 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
1368 	}
1369 
1370 	edac_dbg(1, "All DIMMs support ECC:%s\n",
1371 		    (dclr & BIT(19)) ? "yes" : "no");
1372 
1373 
1374 	edac_dbg(1, "  PAR/ERR parity: %s\n",
1375 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
1376 
1377 	if (pvt->fam == 0x10)
1378 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
1379 			 (dclr & BIT(11)) ?  "128b" : "64b");
1380 
1381 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
1382 		 (dclr & BIT(12)) ?  "yes" : "no",
1383 		 (dclr & BIT(13)) ?  "yes" : "no",
1384 		 (dclr & BIT(14)) ?  "yes" : "no",
1385 		 (dclr & BIT(15)) ?  "yes" : "no");
1386 }
1387 
1388 #define CS_EVEN_PRIMARY		BIT(0)
1389 #define CS_ODD_PRIMARY		BIT(1)
1390 #define CS_EVEN_SECONDARY	BIT(2)
1391 #define CS_ODD_SECONDARY	BIT(3)
1392 #define CS_3R_INTERLEAVE	BIT(4)
1393 
1394 #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
1395 #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1396 
1397 static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1398 {
1399 	u8 base, count = 0;
1400 	int cs_mode = 0;
1401 
1402 	if (csrow_enabled(2 * dimm, ctrl, pvt))
1403 		cs_mode |= CS_EVEN_PRIMARY;
1404 
1405 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1406 		cs_mode |= CS_ODD_PRIMARY;
1407 
1408 	/* Asymmetric dual-rank DIMM support. */
1409 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1410 		cs_mode |= CS_ODD_SECONDARY;
1411 
1412 	/*
1413 	 * 3 Rank inteleaving support.
1414 	 * There should be only three bases enabled and their two masks should
1415 	 * be equal.
1416 	 */
1417 	for_each_chip_select(base, ctrl, pvt)
1418 		count += csrow_enabled(base, ctrl, pvt);
1419 
1420 	if (count == 3 &&
1421 	    pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
1422 		edac_dbg(1, "3R interleaving in use.\n");
1423 		cs_mode |= CS_3R_INTERLEAVE;
1424 	}
1425 
1426 	return cs_mode;
1427 }
1428 
1429 static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1430 				    unsigned int cs_mode, int csrow_nr)
1431 {
1432 	u32 addr_mask_orig, addr_mask_deinterleaved;
1433 	u32 msb, weight, num_zero_bits;
1434 	int cs_mask_nr = csrow_nr;
1435 	int dimm, size = 0;
1436 
1437 	/* No Chip Selects are enabled. */
1438 	if (!cs_mode)
1439 		return size;
1440 
1441 	/* Requested size of an even CS but none are enabled. */
1442 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1443 		return size;
1444 
1445 	/* Requested size of an odd CS but none are enabled. */
1446 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1447 		return size;
1448 
1449 	/*
1450 	 * Family 17h introduced systems with one mask per DIMM,
1451 	 * and two Chip Selects per DIMM.
1452 	 *
1453 	 *	CS0 and CS1 -> MASK0 / DIMM0
1454 	 *	CS2 and CS3 -> MASK1 / DIMM1
1455 	 *
1456 	 * Family 19h Model 10h introduced systems with one mask per Chip Select,
1457 	 * and two Chip Selects per DIMM.
1458 	 *
1459 	 *	CS0 -> MASK0 -> DIMM0
1460 	 *	CS1 -> MASK1 -> DIMM0
1461 	 *	CS2 -> MASK2 -> DIMM1
1462 	 *	CS3 -> MASK3 -> DIMM1
1463 	 *
1464 	 * Keep the mask number equal to the Chip Select number for newer systems,
1465 	 * and shift the mask number for older systems.
1466 	 */
1467 	dimm = csrow_nr >> 1;
1468 
1469 	if (!pvt->flags.zn_regs_v2)
1470 		cs_mask_nr >>= 1;
1471 
1472 	/* Asymmetric dual-rank DIMM support. */
1473 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1474 		addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
1475 	else
1476 		addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
1477 
1478 	/*
1479 	 * The number of zero bits in the mask is equal to the number of bits
1480 	 * in a full mask minus the number of bits in the current mask.
1481 	 *
1482 	 * The MSB is the number of bits in the full mask because BIT[0] is
1483 	 * always 0.
1484 	 *
1485 	 * In the special 3 Rank interleaving case, a single bit is flipped
1486 	 * without swapping with the most significant bit. This can be handled
1487 	 * by keeping the MSB where it is and ignoring the single zero bit.
1488 	 */
1489 	msb = fls(addr_mask_orig) - 1;
1490 	weight = hweight_long(addr_mask_orig);
1491 	num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
1492 
1493 	/* Take the number of zero bits off from the top of the mask. */
1494 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1495 
1496 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1497 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
1498 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1499 
1500 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
1501 	size = (addr_mask_deinterleaved >> 2) + 1;
1502 
1503 	/* Return size in MBs. */
1504 	return size >> 10;
1505 }
1506 
1507 static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1508 {
1509 	int dimm, size0, size1, cs0, cs1, cs_mode;
1510 
1511 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
1512 
1513 	for (dimm = 0; dimm < 2; dimm++) {
1514 		cs0 = dimm * 2;
1515 		cs1 = dimm * 2 + 1;
1516 
1517 		cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
1518 
1519 		size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
1520 		size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
1521 
1522 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1523 				cs0,	size0,
1524 				cs1,	size1);
1525 	}
1526 }
1527 
1528 static void umc_dump_misc_regs(struct amd64_pvt *pvt)
1529 {
1530 	struct amd64_umc *umc;
1531 	u32 i, tmp, umc_base;
1532 
1533 	for_each_umc(i) {
1534 		umc_base = get_umc_base(i);
1535 		umc = &pvt->umc[i];
1536 
1537 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1538 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1539 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1540 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1541 
1542 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
1543 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
1544 
1545 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
1546 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
1547 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1548 
1549 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
1550 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1551 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1552 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
1553 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1554 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
1555 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1556 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
1557 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1558 
1559 		if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
1560 			amd_smn_read(pvt->mc_node_id,
1561 				     umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
1562 				     &tmp);
1563 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
1564 					i, 1 << ((tmp >> 4) & 0x3));
1565 		}
1566 
1567 		umc_debug_display_dimm_sizes(pvt, i);
1568 	}
1569 }
1570 
1571 static void dct_dump_misc_regs(struct amd64_pvt *pvt)
1572 {
1573 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1574 
1575 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
1576 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1577 
1578 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
1579 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1580 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1581 
1582 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1583 
1584 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1585 
1586 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
1587 		 pvt->dhar, dhar_base(pvt),
1588 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1589 				   : f10_dhar_offset(pvt));
1590 
1591 	dct_debug_display_dimm_sizes(pvt, 0);
1592 
1593 	/* everything below this point is Fam10h and above */
1594 	if (pvt->fam == 0xf)
1595 		return;
1596 
1597 	dct_debug_display_dimm_sizes(pvt, 1);
1598 
1599 	/* Only if NOT ganged does dclr1 have valid info */
1600 	if (!dct_ganging_enabled(pvt))
1601 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1602 
1603 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1604 
1605 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1606 }
1607 
1608 /*
1609  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1610  */
1611 static void dct_prep_chip_selects(struct amd64_pvt *pvt)
1612 {
1613 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1614 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1615 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1616 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1617 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1618 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1619 	} else {
1620 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1621 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1622 	}
1623 }
1624 
1625 static void umc_prep_chip_selects(struct amd64_pvt *pvt)
1626 {
1627 	int umc;
1628 
1629 	for_each_umc(umc) {
1630 		pvt->csels[umc].b_cnt = 4;
1631 		pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
1632 	}
1633 }
1634 
1635 static void umc_read_base_mask(struct amd64_pvt *pvt)
1636 {
1637 	u32 umc_base_reg, umc_base_reg_sec;
1638 	u32 umc_mask_reg, umc_mask_reg_sec;
1639 	u32 base_reg, base_reg_sec;
1640 	u32 mask_reg, mask_reg_sec;
1641 	u32 *base, *base_sec;
1642 	u32 *mask, *mask_sec;
1643 	int cs, umc;
1644 
1645 	for_each_umc(umc) {
1646 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1647 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1648 
1649 		for_each_chip_select(cs, umc, pvt) {
1650 			base = &pvt->csels[umc].csbases[cs];
1651 			base_sec = &pvt->csels[umc].csbases_sec[cs];
1652 
1653 			base_reg = umc_base_reg + (cs * 4);
1654 			base_reg_sec = umc_base_reg_sec + (cs * 4);
1655 
1656 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
1657 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
1658 					 umc, cs, *base, base_reg);
1659 
1660 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
1661 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
1662 					 umc, cs, *base_sec, base_reg_sec);
1663 		}
1664 
1665 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1666 		umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
1667 
1668 		for_each_chip_select_mask(cs, umc, pvt) {
1669 			mask = &pvt->csels[umc].csmasks[cs];
1670 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1671 
1672 			mask_reg = umc_mask_reg + (cs * 4);
1673 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1674 
1675 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1676 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
1677 					 umc, cs, *mask, mask_reg);
1678 
1679 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1680 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1681 					 umc, cs, *mask_sec, mask_reg_sec);
1682 		}
1683 	}
1684 }
1685 
1686 /*
1687  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1688  */
1689 static void dct_read_base_mask(struct amd64_pvt *pvt)
1690 {
1691 	int cs;
1692 
1693 	for_each_chip_select(cs, 0, pvt) {
1694 		int reg0   = DCSB0 + (cs * 4);
1695 		int reg1   = DCSB1 + (cs * 4);
1696 		u32 *base0 = &pvt->csels[0].csbases[cs];
1697 		u32 *base1 = &pvt->csels[1].csbases[cs];
1698 
1699 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1700 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
1701 				 cs, *base0, reg0);
1702 
1703 		if (pvt->fam == 0xf)
1704 			continue;
1705 
1706 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1707 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
1708 				 cs, *base1, (pvt->fam == 0x10) ? reg1
1709 							: reg0);
1710 	}
1711 
1712 	for_each_chip_select_mask(cs, 0, pvt) {
1713 		int reg0   = DCSM0 + (cs * 4);
1714 		int reg1   = DCSM1 + (cs * 4);
1715 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1716 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1717 
1718 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1719 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1720 				 cs, *mask0, reg0);
1721 
1722 		if (pvt->fam == 0xf)
1723 			continue;
1724 
1725 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1726 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1727 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
1728 							: reg0);
1729 	}
1730 }
1731 
1732 static void umc_determine_memory_type(struct amd64_pvt *pvt)
1733 {
1734 	struct amd64_umc *umc;
1735 	u32 i;
1736 
1737 	for_each_umc(i) {
1738 		umc = &pvt->umc[i];
1739 
1740 		if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
1741 			umc->dram_type = MEM_EMPTY;
1742 			continue;
1743 		}
1744 
1745 		/*
1746 		 * Check if the system supports the "DDR Type" field in UMC Config
1747 		 * and has DDR5 DIMMs in use.
1748 		 */
1749 		if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
1750 			if (umc->dimm_cfg & BIT(5))
1751 				umc->dram_type = MEM_LRDDR5;
1752 			else if (umc->dimm_cfg & BIT(4))
1753 				umc->dram_type = MEM_RDDR5;
1754 			else
1755 				umc->dram_type = MEM_DDR5;
1756 		} else {
1757 			if (umc->dimm_cfg & BIT(5))
1758 				umc->dram_type = MEM_LRDDR4;
1759 			else if (umc->dimm_cfg & BIT(4))
1760 				umc->dram_type = MEM_RDDR4;
1761 			else
1762 				umc->dram_type = MEM_DDR4;
1763 		}
1764 
1765 		edac_dbg(1, "  UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
1766 	}
1767 }
1768 
1769 static void dct_determine_memory_type(struct amd64_pvt *pvt)
1770 {
1771 	u32 dram_ctrl, dcsm;
1772 
1773 	switch (pvt->fam) {
1774 	case 0xf:
1775 		if (pvt->ext_model >= K8_REV_F)
1776 			goto ddr3;
1777 
1778 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1779 		return;
1780 
1781 	case 0x10:
1782 		if (pvt->dchr0 & DDR3_MODE)
1783 			goto ddr3;
1784 
1785 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1786 		return;
1787 
1788 	case 0x15:
1789 		if (pvt->model < 0x60)
1790 			goto ddr3;
1791 
1792 		/*
1793 		 * Model 0x60h needs special handling:
1794 		 *
1795 		 * We use a Chip Select value of '0' to obtain dcsm.
1796 		 * Theoretically, it is possible to populate LRDIMMs of different
1797 		 * 'Rank' value on a DCT. But this is not the common case. So,
1798 		 * it's reasonable to assume all DIMMs are going to be of same
1799 		 * 'type' until proven otherwise.
1800 		 */
1801 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1802 		dcsm = pvt->csels[0].csmasks[0];
1803 
1804 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1805 			pvt->dram_type = MEM_DDR4;
1806 		else if (pvt->dclr0 & BIT(16))
1807 			pvt->dram_type = MEM_DDR3;
1808 		else if (dcsm & 0x3)
1809 			pvt->dram_type = MEM_LRDDR3;
1810 		else
1811 			pvt->dram_type = MEM_RDDR3;
1812 
1813 		return;
1814 
1815 	case 0x16:
1816 		goto ddr3;
1817 
1818 	default:
1819 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1820 		pvt->dram_type = MEM_EMPTY;
1821 	}
1822 
1823 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
1824 	return;
1825 
1826 ddr3:
1827 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1828 }
1829 
1830 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1831 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1832 {
1833 	u16 mce_nid = topology_die_id(m->extcpu);
1834 	struct mem_ctl_info *mci;
1835 	u8 start_bit = 1;
1836 	u8 end_bit   = 47;
1837 	u64 addr;
1838 
1839 	mci = edac_mc_find(mce_nid);
1840 	if (!mci)
1841 		return 0;
1842 
1843 	pvt = mci->pvt_info;
1844 
1845 	if (pvt->fam == 0xf) {
1846 		start_bit = 3;
1847 		end_bit   = 39;
1848 	}
1849 
1850 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1851 
1852 	/*
1853 	 * Erratum 637 workaround
1854 	 */
1855 	if (pvt->fam == 0x15) {
1856 		u64 cc6_base, tmp_addr;
1857 		u32 tmp;
1858 		u8 intlv_en;
1859 
1860 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1861 			return addr;
1862 
1863 
1864 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1865 		intlv_en = tmp >> 21 & 0x7;
1866 
1867 		/* add [47:27] + 3 trailing bits */
1868 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1869 
1870 		/* reverse and add DramIntlvEn */
1871 		cc6_base |= intlv_en ^ 0x7;
1872 
1873 		/* pin at [47:24] */
1874 		cc6_base <<= 24;
1875 
1876 		if (!intlv_en)
1877 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1878 
1879 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1880 
1881 							/* faster log2 */
1882 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1883 
1884 		/* OR DramIntlvSel into bits [14:12] */
1885 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1886 
1887 		/* add remaining [11:0] bits from original MC4_ADDR */
1888 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1889 
1890 		return cc6_base | tmp_addr;
1891 	}
1892 
1893 	return addr;
1894 }
1895 
1896 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1897 						unsigned int device,
1898 						struct pci_dev *related)
1899 {
1900 	struct pci_dev *dev = NULL;
1901 
1902 	while ((dev = pci_get_device(vendor, device, dev))) {
1903 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1904 		    (dev->bus->number == related->bus->number) &&
1905 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1906 			break;
1907 	}
1908 
1909 	return dev;
1910 }
1911 
1912 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1913 {
1914 	struct amd_northbridge *nb;
1915 	struct pci_dev *f1 = NULL;
1916 	unsigned int pci_func;
1917 	int off = range << 3;
1918 	u32 llim;
1919 
1920 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1921 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1922 
1923 	if (pvt->fam == 0xf)
1924 		return;
1925 
1926 	if (!dram_rw(pvt, range))
1927 		return;
1928 
1929 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1930 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1931 
1932 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1933 	if (pvt->fam != 0x15)
1934 		return;
1935 
1936 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1937 	if (WARN_ON(!nb))
1938 		return;
1939 
1940 	if (pvt->model == 0x60)
1941 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1942 	else if (pvt->model == 0x30)
1943 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1944 	else
1945 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1946 
1947 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1948 	if (WARN_ON(!f1))
1949 		return;
1950 
1951 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1952 
1953 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1954 
1955 				    /* {[39:27],111b} */
1956 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1957 
1958 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1959 
1960 				    /* [47:40] */
1961 	pvt->ranges[range].lim.hi |= llim >> 13;
1962 
1963 	pci_dev_put(f1);
1964 }
1965 
1966 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1967 				    struct err_info *err)
1968 {
1969 	struct amd64_pvt *pvt = mci->pvt_info;
1970 
1971 	error_address_to_page_and_offset(sys_addr, err);
1972 
1973 	/*
1974 	 * Find out which node the error address belongs to. This may be
1975 	 * different from the node that detected the error.
1976 	 */
1977 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1978 	if (!err->src_mci) {
1979 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1980 			     (unsigned long)sys_addr);
1981 		err->err_code = ERR_NODE;
1982 		return;
1983 	}
1984 
1985 	/* Now map the sys_addr to a CSROW */
1986 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1987 	if (err->csrow < 0) {
1988 		err->err_code = ERR_CSROW;
1989 		return;
1990 	}
1991 
1992 	/* CHIPKILL enabled */
1993 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1994 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1995 		if (err->channel < 0) {
1996 			/*
1997 			 * Syndrome didn't map, so we don't know which of the
1998 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1999 			 * as suspect.
2000 			 */
2001 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
2002 				      "possible error reporting race\n",
2003 				      err->syndrome);
2004 			err->err_code = ERR_CHANNEL;
2005 			return;
2006 		}
2007 	} else {
2008 		/*
2009 		 * non-chipkill ecc mode
2010 		 *
2011 		 * The k8 documentation is unclear about how to determine the
2012 		 * channel number when using non-chipkill memory.  This method
2013 		 * was obtained from email communication with someone at AMD.
2014 		 * (Wish the email was placed in this comment - norsk)
2015 		 */
2016 		err->channel = ((sys_addr & BIT(3)) != 0);
2017 	}
2018 }
2019 
2020 static int ddr2_cs_size(unsigned i, bool dct_width)
2021 {
2022 	unsigned shift = 0;
2023 
2024 	if (i <= 2)
2025 		shift = i;
2026 	else if (!(i & 0x1))
2027 		shift = i >> 1;
2028 	else
2029 		shift = (i + 1) >> 1;
2030 
2031 	return 128 << (shift + !!dct_width);
2032 }
2033 
2034 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2035 				  unsigned cs_mode, int cs_mask_nr)
2036 {
2037 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2038 
2039 	if (pvt->ext_model >= K8_REV_F) {
2040 		WARN_ON(cs_mode > 11);
2041 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
2042 	}
2043 	else if (pvt->ext_model >= K8_REV_D) {
2044 		unsigned diff;
2045 		WARN_ON(cs_mode > 10);
2046 
2047 		/*
2048 		 * the below calculation, besides trying to win an obfuscated C
2049 		 * contest, maps cs_mode values to DIMM chip select sizes. The
2050 		 * mappings are:
2051 		 *
2052 		 * cs_mode	CS size (mb)
2053 		 * =======	============
2054 		 * 0		32
2055 		 * 1		64
2056 		 * 2		128
2057 		 * 3		128
2058 		 * 4		256
2059 		 * 5		512
2060 		 * 6		256
2061 		 * 7		512
2062 		 * 8		1024
2063 		 * 9		1024
2064 		 * 10		2048
2065 		 *
2066 		 * Basically, it calculates a value with which to shift the
2067 		 * smallest CS size of 32MB.
2068 		 *
2069 		 * ddr[23]_cs_size have a similar purpose.
2070 		 */
2071 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
2072 
2073 		return 32 << (cs_mode - diff);
2074 	}
2075 	else {
2076 		WARN_ON(cs_mode > 6);
2077 		return 32 << cs_mode;
2078 	}
2079 }
2080 
2081 static int ddr3_cs_size(unsigned i, bool dct_width)
2082 {
2083 	unsigned shift = 0;
2084 	int cs_size = 0;
2085 
2086 	if (i == 0 || i == 3 || i == 4)
2087 		cs_size = -1;
2088 	else if (i <= 2)
2089 		shift = i;
2090 	else if (i == 12)
2091 		shift = 7;
2092 	else if (!(i & 0x1))
2093 		shift = i >> 1;
2094 	else
2095 		shift = (i + 1) >> 1;
2096 
2097 	if (cs_size != -1)
2098 		cs_size = (128 * (1 << !!dct_width)) << shift;
2099 
2100 	return cs_size;
2101 }
2102 
2103 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
2104 {
2105 	unsigned shift = 0;
2106 	int cs_size = 0;
2107 
2108 	if (i < 4 || i == 6)
2109 		cs_size = -1;
2110 	else if (i == 12)
2111 		shift = 7;
2112 	else if (!(i & 0x1))
2113 		shift = i >> 1;
2114 	else
2115 		shift = (i + 1) >> 1;
2116 
2117 	if (cs_size != -1)
2118 		cs_size = rank_multiply * (128 << shift);
2119 
2120 	return cs_size;
2121 }
2122 
2123 static int ddr4_cs_size(unsigned i)
2124 {
2125 	int cs_size = 0;
2126 
2127 	if (i == 0)
2128 		cs_size = -1;
2129 	else if (i == 1)
2130 		cs_size = 1024;
2131 	else
2132 		/* Min cs_size = 1G */
2133 		cs_size = 1024 * (1 << (i >> 1));
2134 
2135 	return cs_size;
2136 }
2137 
2138 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2139 				   unsigned cs_mode, int cs_mask_nr)
2140 {
2141 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
2142 
2143 	WARN_ON(cs_mode > 11);
2144 
2145 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
2146 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
2147 	else
2148 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
2149 }
2150 
2151 /*
2152  * F15h supports only 64bit DCT interfaces
2153  */
2154 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2155 				   unsigned cs_mode, int cs_mask_nr)
2156 {
2157 	WARN_ON(cs_mode > 12);
2158 
2159 	return ddr3_cs_size(cs_mode, false);
2160 }
2161 
2162 /* F15h M60h supports DDR4 mapping as well.. */
2163 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2164 					unsigned cs_mode, int cs_mask_nr)
2165 {
2166 	int cs_size;
2167 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
2168 
2169 	WARN_ON(cs_mode > 12);
2170 
2171 	if (pvt->dram_type == MEM_DDR4) {
2172 		if (cs_mode > 9)
2173 			return -1;
2174 
2175 		cs_size = ddr4_cs_size(cs_mode);
2176 	} else if (pvt->dram_type == MEM_LRDDR3) {
2177 		unsigned rank_multiply = dcsm & 0xf;
2178 
2179 		if (rank_multiply == 3)
2180 			rank_multiply = 4;
2181 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
2182 	} else {
2183 		/* Minimum cs size is 512mb for F15hM60h*/
2184 		if (cs_mode == 0x1)
2185 			return -1;
2186 
2187 		cs_size = ddr3_cs_size(cs_mode, false);
2188 	}
2189 
2190 	return cs_size;
2191 }
2192 
2193 /*
2194  * F16h and F15h model 30h have only limited cs_modes.
2195  */
2196 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2197 				unsigned cs_mode, int cs_mask_nr)
2198 {
2199 	WARN_ON(cs_mode > 12);
2200 
2201 	if (cs_mode == 6 || cs_mode == 8 ||
2202 	    cs_mode == 9 || cs_mode == 12)
2203 		return -1;
2204 	else
2205 		return ddr3_cs_size(cs_mode, false);
2206 }
2207 
2208 static void read_dram_ctl_register(struct amd64_pvt *pvt)
2209 {
2210 
2211 	if (pvt->fam == 0xf)
2212 		return;
2213 
2214 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2215 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
2216 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2217 
2218 		edac_dbg(0, "  DCTs operate in %s mode\n",
2219 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2220 
2221 		if (!dct_ganging_enabled(pvt))
2222 			edac_dbg(0, "  Address range split per DCT: %s\n",
2223 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2224 
2225 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
2226 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
2227 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
2228 
2229 		edac_dbg(0, "  channel interleave: %s, "
2230 			 "interleave bits selector: 0x%x\n",
2231 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
2232 			 dct_sel_interleave_addr(pvt));
2233 	}
2234 
2235 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2236 }
2237 
2238 /*
2239  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
2240  * 2.10.12 Memory Interleaving Modes).
2241  */
2242 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2243 				     u8 intlv_en, int num_dcts_intlv,
2244 				     u32 dct_sel)
2245 {
2246 	u8 channel = 0;
2247 	u8 select;
2248 
2249 	if (!(intlv_en))
2250 		return (u8)(dct_sel);
2251 
2252 	if (num_dcts_intlv == 2) {
2253 		select = (sys_addr >> 8) & 0x3;
2254 		channel = select ? 0x3 : 0;
2255 	} else if (num_dcts_intlv == 4) {
2256 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
2257 		switch (intlv_addr) {
2258 		case 0x4:
2259 			channel = (sys_addr >> 8) & 0x3;
2260 			break;
2261 		case 0x5:
2262 			channel = (sys_addr >> 9) & 0x3;
2263 			break;
2264 		}
2265 	}
2266 	return channel;
2267 }
2268 
2269 /*
2270  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2271  * Interleaving Modes.
2272  */
2273 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2274 				bool hi_range_sel, u8 intlv_en)
2275 {
2276 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2277 
2278 	if (dct_ganging_enabled(pvt))
2279 		return 0;
2280 
2281 	if (hi_range_sel)
2282 		return dct_sel_high;
2283 
2284 	/*
2285 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
2286 	 */
2287 	if (dct_interleave_enabled(pvt)) {
2288 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
2289 
2290 		/* return DCT select function: 0=DCT0, 1=DCT1 */
2291 		if (!intlv_addr)
2292 			return sys_addr >> 6 & 1;
2293 
2294 		if (intlv_addr & 0x2) {
2295 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
2296 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2297 
2298 			return ((sys_addr >> shift) & 1) ^ temp;
2299 		}
2300 
2301 		if (intlv_addr & 0x4) {
2302 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
2303 
2304 			return (sys_addr >> shift) & 1;
2305 		}
2306 
2307 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
2308 	}
2309 
2310 	if (dct_high_range_enabled(pvt))
2311 		return ~dct_sel_high & 1;
2312 
2313 	return 0;
2314 }
2315 
2316 /* Convert the sys_addr to the normalized DCT address */
2317 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2318 				 u64 sys_addr, bool hi_rng,
2319 				 u32 dct_sel_base_addr)
2320 {
2321 	u64 chan_off;
2322 	u64 dram_base		= get_dram_base(pvt, range);
2323 	u64 hole_off		= f10_dhar_offset(pvt);
2324 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2325 
2326 	if (hi_rng) {
2327 		/*
2328 		 * if
2329 		 * base address of high range is below 4Gb
2330 		 * (bits [47:27] at [31:11])
2331 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
2332 		 * sys_addr > 4Gb
2333 		 *
2334 		 *	remove hole offset from sys_addr
2335 		 * else
2336 		 *	remove high range offset from sys_addr
2337 		 */
2338 		if ((!(dct_sel_base_addr >> 16) ||
2339 		     dct_sel_base_addr < dhar_base(pvt)) &&
2340 		    dhar_valid(pvt) &&
2341 		    (sys_addr >= BIT_64(32)))
2342 			chan_off = hole_off;
2343 		else
2344 			chan_off = dct_sel_base_off;
2345 	} else {
2346 		/*
2347 		 * if
2348 		 * we have a valid hole		&&
2349 		 * sys_addr > 4Gb
2350 		 *
2351 		 *	remove hole
2352 		 * else
2353 		 *	remove dram base to normalize to DCT address
2354 		 */
2355 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2356 			chan_off = hole_off;
2357 		else
2358 			chan_off = dram_base;
2359 	}
2360 
2361 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2362 }
2363 
2364 /*
2365  * checks if the csrow passed in is marked as SPARED, if so returns the new
2366  * spare row
2367  */
2368 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2369 {
2370 	int tmp_cs;
2371 
2372 	if (online_spare_swap_done(pvt, dct) &&
2373 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
2374 
2375 		for_each_chip_select(tmp_cs, dct, pvt) {
2376 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2377 				csrow = tmp_cs;
2378 				break;
2379 			}
2380 		}
2381 	}
2382 	return csrow;
2383 }
2384 
2385 /*
2386  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2387  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
2388  *
2389  * Return:
2390  *	-EINVAL:  NOT FOUND
2391  *	0..csrow = Chip-Select Row
2392  */
2393 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2394 {
2395 	struct mem_ctl_info *mci;
2396 	struct amd64_pvt *pvt;
2397 	u64 cs_base, cs_mask;
2398 	int cs_found = -EINVAL;
2399 	int csrow;
2400 
2401 	mci = edac_mc_find(nid);
2402 	if (!mci)
2403 		return cs_found;
2404 
2405 	pvt = mci->pvt_info;
2406 
2407 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2408 
2409 	for_each_chip_select(csrow, dct, pvt) {
2410 		if (!csrow_enabled(csrow, dct, pvt))
2411 			continue;
2412 
2413 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2414 
2415 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
2416 			 csrow, cs_base, cs_mask);
2417 
2418 		cs_mask = ~cs_mask;
2419 
2420 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
2421 			 (in_addr & cs_mask), (cs_base & cs_mask));
2422 
2423 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2424 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2425 				cs_found =  csrow;
2426 				break;
2427 			}
2428 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
2429 
2430 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2431 			break;
2432 		}
2433 	}
2434 	return cs_found;
2435 }
2436 
2437 /*
2438  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2439  * swapped with a region located at the bottom of memory so that the GPU can use
2440  * the interleaved region and thus two channels.
2441  */
2442 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2443 {
2444 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
2445 
2446 	if (pvt->fam == 0x10) {
2447 		/* only revC3 and revE have that feature */
2448 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2449 			return sys_addr;
2450 	}
2451 
2452 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2453 
2454 	if (!(swap_reg & 0x1))
2455 		return sys_addr;
2456 
2457 	swap_base	= (swap_reg >> 3) & 0x7f;
2458 	swap_limit	= (swap_reg >> 11) & 0x7f;
2459 	rgn_size	= (swap_reg >> 20) & 0x7f;
2460 	tmp_addr	= sys_addr >> 27;
2461 
2462 	if (!(sys_addr >> 34) &&
2463 	    (((tmp_addr >= swap_base) &&
2464 	     (tmp_addr <= swap_limit)) ||
2465 	     (tmp_addr < rgn_size)))
2466 		return sys_addr ^ (u64)swap_base << 27;
2467 
2468 	return sys_addr;
2469 }
2470 
2471 /* For a given @dram_range, check if @sys_addr falls within it. */
2472 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2473 				  u64 sys_addr, int *chan_sel)
2474 {
2475 	int cs_found = -EINVAL;
2476 	u64 chan_addr;
2477 	u32 dct_sel_base;
2478 	u8 channel;
2479 	bool high_range = false;
2480 
2481 	u8 node_id    = dram_dst_node(pvt, range);
2482 	u8 intlv_en   = dram_intlv_en(pvt, range);
2483 	u32 intlv_sel = dram_intlv_sel(pvt, range);
2484 
2485 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2486 		 range, sys_addr, get_dram_limit(pvt, range));
2487 
2488 	if (dhar_valid(pvt) &&
2489 	    dhar_base(pvt) <= sys_addr &&
2490 	    sys_addr < BIT_64(32)) {
2491 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2492 			    sys_addr);
2493 		return -EINVAL;
2494 	}
2495 
2496 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2497 		return -EINVAL;
2498 
2499 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2500 
2501 	dct_sel_base = dct_sel_baseaddr(pvt);
2502 
2503 	/*
2504 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
2505 	 * select between DCT0 and DCT1.
2506 	 */
2507 	if (dct_high_range_enabled(pvt) &&
2508 	   !dct_ganging_enabled(pvt) &&
2509 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2510 		high_range = true;
2511 
2512 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2513 
2514 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2515 					  high_range, dct_sel_base);
2516 
2517 	/* Remove node interleaving, see F1x120 */
2518 	if (intlv_en)
2519 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
2520 			    (chan_addr & 0xfff);
2521 
2522 	/* remove channel interleave */
2523 	if (dct_interleave_enabled(pvt) &&
2524 	   !dct_high_range_enabled(pvt) &&
2525 	   !dct_ganging_enabled(pvt)) {
2526 
2527 		if (dct_sel_interleave_addr(pvt) != 1) {
2528 			if (dct_sel_interleave_addr(pvt) == 0x3)
2529 				/* hash 9 */
2530 				chan_addr = ((chan_addr >> 10) << 9) |
2531 					     (chan_addr & 0x1ff);
2532 			else
2533 				/* A[6] or hash 6 */
2534 				chan_addr = ((chan_addr >> 7) << 6) |
2535 					     (chan_addr & 0x3f);
2536 		} else
2537 			/* A[12] */
2538 			chan_addr = ((chan_addr >> 13) << 12) |
2539 				     (chan_addr & 0xfff);
2540 	}
2541 
2542 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2543 
2544 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2545 
2546 	if (cs_found >= 0)
2547 		*chan_sel = channel;
2548 
2549 	return cs_found;
2550 }
2551 
2552 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2553 					u64 sys_addr, int *chan_sel)
2554 {
2555 	int cs_found = -EINVAL;
2556 	int num_dcts_intlv = 0;
2557 	u64 chan_addr, chan_offset;
2558 	u64 dct_base, dct_limit;
2559 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2560 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2561 
2562 	u64 dhar_offset		= f10_dhar_offset(pvt);
2563 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
2564 	u8 node_id		= dram_dst_node(pvt, range);
2565 	u8 intlv_en		= dram_intlv_en(pvt, range);
2566 
2567 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2568 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2569 
2570 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2571 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
2572 
2573 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2574 		 range, sys_addr, get_dram_limit(pvt, range));
2575 
2576 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
2577 	    !(get_dram_limit(pvt, range) >= sys_addr))
2578 		return -EINVAL;
2579 
2580 	if (dhar_valid(pvt) &&
2581 	    dhar_base(pvt) <= sys_addr &&
2582 	    sys_addr < BIT_64(32)) {
2583 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2584 			    sys_addr);
2585 		return -EINVAL;
2586 	}
2587 
2588 	/* Verify sys_addr is within DCT Range. */
2589 	dct_base = (u64) dct_sel_baseaddr(pvt);
2590 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2591 
2592 	if (!(dct_cont_base_reg & BIT(0)) &&
2593 	    !(dct_base <= (sys_addr >> 27) &&
2594 	      dct_limit >= (sys_addr >> 27)))
2595 		return -EINVAL;
2596 
2597 	/* Verify number of dct's that participate in channel interleaving. */
2598 	num_dcts_intlv = (int) hweight8(intlv_en);
2599 
2600 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2601 		return -EINVAL;
2602 
2603 	if (pvt->model >= 0x60)
2604 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2605 	else
2606 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2607 						     num_dcts_intlv, dct_sel);
2608 
2609 	/* Verify we stay within the MAX number of channels allowed */
2610 	if (channel > 3)
2611 		return -EINVAL;
2612 
2613 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2614 
2615 	/* Get normalized DCT addr */
2616 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2617 		chan_offset = dhar_offset;
2618 	else
2619 		chan_offset = dct_base << 27;
2620 
2621 	chan_addr = sys_addr - chan_offset;
2622 
2623 	/* remove channel interleave */
2624 	if (num_dcts_intlv == 2) {
2625 		if (intlv_addr == 0x4)
2626 			chan_addr = ((chan_addr >> 9) << 8) |
2627 						(chan_addr & 0xff);
2628 		else if (intlv_addr == 0x5)
2629 			chan_addr = ((chan_addr >> 10) << 9) |
2630 						(chan_addr & 0x1ff);
2631 		else
2632 			return -EINVAL;
2633 
2634 	} else if (num_dcts_intlv == 4) {
2635 		if (intlv_addr == 0x4)
2636 			chan_addr = ((chan_addr >> 10) << 8) |
2637 							(chan_addr & 0xff);
2638 		else if (intlv_addr == 0x5)
2639 			chan_addr = ((chan_addr >> 11) << 9) |
2640 							(chan_addr & 0x1ff);
2641 		else
2642 			return -EINVAL;
2643 	}
2644 
2645 	if (dct_offset_en) {
2646 		amd64_read_pci_cfg(pvt->F1,
2647 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2648 				   &tmp);
2649 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2650 	}
2651 
2652 	f15h_select_dct(pvt, channel);
2653 
2654 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2655 
2656 	/*
2657 	 * Find Chip select:
2658 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2659 	 * there is support for 4 DCT's, but only 2 are currently functional.
2660 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2661 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2662 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2663 	 */
2664 	alias_channel =  (channel == 3) ? 1 : channel;
2665 
2666 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2667 
2668 	if (cs_found >= 0)
2669 		*chan_sel = alias_channel;
2670 
2671 	return cs_found;
2672 }
2673 
2674 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2675 					u64 sys_addr,
2676 					int *chan_sel)
2677 {
2678 	int cs_found = -EINVAL;
2679 	unsigned range;
2680 
2681 	for (range = 0; range < DRAM_RANGES; range++) {
2682 		if (!dram_rw(pvt, range))
2683 			continue;
2684 
2685 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2686 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2687 							       sys_addr,
2688 							       chan_sel);
2689 
2690 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2691 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2692 			cs_found = f1x_match_to_this_node(pvt, range,
2693 							  sys_addr, chan_sel);
2694 			if (cs_found >= 0)
2695 				break;
2696 		}
2697 	}
2698 	return cs_found;
2699 }
2700 
2701 /*
2702  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2703  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2704  *
2705  * The @sys_addr is usually an error address received from the hardware
2706  * (MCX_ADDR).
2707  */
2708 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2709 				     struct err_info *err)
2710 {
2711 	struct amd64_pvt *pvt = mci->pvt_info;
2712 
2713 	error_address_to_page_and_offset(sys_addr, err);
2714 
2715 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2716 	if (err->csrow < 0) {
2717 		err->err_code = ERR_CSROW;
2718 		return;
2719 	}
2720 
2721 	/*
2722 	 * We need the syndromes for channel detection only when we're
2723 	 * ganged. Otherwise @chan should already contain the channel at
2724 	 * this point.
2725 	 */
2726 	if (dct_ganging_enabled(pvt))
2727 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2728 }
2729 
2730 /*
2731  * These are tables of eigenvectors (one per line) which can be used for the
2732  * construction of the syndrome tables. The modified syndrome search algorithm
2733  * uses those to find the symbol in error and thus the DIMM.
2734  *
2735  * Algorithm courtesy of Ross LaFetra from AMD.
2736  */
2737 static const u16 x4_vectors[] = {
2738 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2739 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2740 	0x0001, 0x0002, 0x0004, 0x0008,
2741 	0x1013, 0x3032, 0x4044, 0x8088,
2742 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2743 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2744 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2745 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2746 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2747 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2748 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2749 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2750 	0x2b87, 0x164e, 0x642c, 0xdc18,
2751 	0x40b9, 0x80de, 0x1094, 0x20e8,
2752 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2753 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2754 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2755 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2756 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2757 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2758 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2759 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2760 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2761 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2762 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2763 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2764 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2765 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2766 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2767 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2768 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2769 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2770 	0x4807, 0xc40e, 0x130c, 0x3208,
2771 	0x1905, 0x2e0a, 0x5804, 0xac08,
2772 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2773 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2774 };
2775 
2776 static const u16 x8_vectors[] = {
2777 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2778 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2779 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2780 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2781 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2782 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2783 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2784 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2785 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2786 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2787 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2788 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2789 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2790 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2791 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2792 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2793 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2794 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2795 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2796 };
2797 
2798 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2799 			   unsigned v_dim)
2800 {
2801 	unsigned int i, err_sym;
2802 
2803 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2804 		u16 s = syndrome;
2805 		unsigned v_idx =  err_sym * v_dim;
2806 		unsigned v_end = (err_sym + 1) * v_dim;
2807 
2808 		/* walk over all 16 bits of the syndrome */
2809 		for (i = 1; i < (1U << 16); i <<= 1) {
2810 
2811 			/* if bit is set in that eigenvector... */
2812 			if (v_idx < v_end && vectors[v_idx] & i) {
2813 				u16 ev_comp = vectors[v_idx++];
2814 
2815 				/* ... and bit set in the modified syndrome, */
2816 				if (s & i) {
2817 					/* remove it. */
2818 					s ^= ev_comp;
2819 
2820 					if (!s)
2821 						return err_sym;
2822 				}
2823 
2824 			} else if (s & i)
2825 				/* can't get to zero, move to next symbol */
2826 				break;
2827 		}
2828 	}
2829 
2830 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2831 	return -1;
2832 }
2833 
2834 static int map_err_sym_to_channel(int err_sym, int sym_size)
2835 {
2836 	if (sym_size == 4)
2837 		switch (err_sym) {
2838 		case 0x20:
2839 		case 0x21:
2840 			return 0;
2841 		case 0x22:
2842 		case 0x23:
2843 			return 1;
2844 		default:
2845 			return err_sym >> 4;
2846 		}
2847 	/* x8 symbols */
2848 	else
2849 		switch (err_sym) {
2850 		/* imaginary bits not in a DIMM */
2851 		case 0x10:
2852 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2853 					  err_sym);
2854 			return -1;
2855 		case 0x11:
2856 			return 0;
2857 		case 0x12:
2858 			return 1;
2859 		default:
2860 			return err_sym >> 3;
2861 		}
2862 	return -1;
2863 }
2864 
2865 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2866 {
2867 	struct amd64_pvt *pvt = mci->pvt_info;
2868 	int err_sym = -1;
2869 
2870 	if (pvt->ecc_sym_sz == 8)
2871 		err_sym = decode_syndrome(syndrome, x8_vectors,
2872 					  ARRAY_SIZE(x8_vectors),
2873 					  pvt->ecc_sym_sz);
2874 	else if (pvt->ecc_sym_sz == 4)
2875 		err_sym = decode_syndrome(syndrome, x4_vectors,
2876 					  ARRAY_SIZE(x4_vectors),
2877 					  pvt->ecc_sym_sz);
2878 	else {
2879 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2880 		return err_sym;
2881 	}
2882 
2883 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2884 }
2885 
2886 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2887 			    u8 ecc_type)
2888 {
2889 	enum hw_event_mc_err_type err_type;
2890 	const char *string;
2891 
2892 	if (ecc_type == 2)
2893 		err_type = HW_EVENT_ERR_CORRECTED;
2894 	else if (ecc_type == 1)
2895 		err_type = HW_EVENT_ERR_UNCORRECTED;
2896 	else if (ecc_type == 3)
2897 		err_type = HW_EVENT_ERR_DEFERRED;
2898 	else {
2899 		WARN(1, "Something is rotten in the state of Denmark.\n");
2900 		return;
2901 	}
2902 
2903 	switch (err->err_code) {
2904 	case DECODE_OK:
2905 		string = "";
2906 		break;
2907 	case ERR_NODE:
2908 		string = "Failed to map error addr to a node";
2909 		break;
2910 	case ERR_CSROW:
2911 		string = "Failed to map error addr to a csrow";
2912 		break;
2913 	case ERR_CHANNEL:
2914 		string = "Unknown syndrome - possible error reporting race";
2915 		break;
2916 	case ERR_SYND:
2917 		string = "MCA_SYND not valid - unknown syndrome and csrow";
2918 		break;
2919 	case ERR_NORM_ADDR:
2920 		string = "Cannot decode normalized address";
2921 		break;
2922 	default:
2923 		string = "WTF error";
2924 		break;
2925 	}
2926 
2927 	edac_mc_handle_error(err_type, mci, 1,
2928 			     err->page, err->offset, err->syndrome,
2929 			     err->csrow, err->channel, -1,
2930 			     string, "");
2931 }
2932 
2933 static inline void decode_bus_error(int node_id, struct mce *m)
2934 {
2935 	struct mem_ctl_info *mci;
2936 	struct amd64_pvt *pvt;
2937 	u8 ecc_type = (m->status >> 45) & 0x3;
2938 	u8 xec = XEC(m->status, 0x1f);
2939 	u16 ec = EC(m->status);
2940 	u64 sys_addr;
2941 	struct err_info err;
2942 
2943 	mci = edac_mc_find(node_id);
2944 	if (!mci)
2945 		return;
2946 
2947 	pvt = mci->pvt_info;
2948 
2949 	/* Bail out early if this was an 'observed' error */
2950 	if (PP(ec) == NBSL_PP_OBS)
2951 		return;
2952 
2953 	/* Do only ECC errors */
2954 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2955 		return;
2956 
2957 	memset(&err, 0, sizeof(err));
2958 
2959 	sys_addr = get_error_address(pvt, m);
2960 
2961 	if (ecc_type == 2)
2962 		err.syndrome = extract_syndrome(m->status);
2963 
2964 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2965 
2966 	__log_ecc_error(mci, &err, ecc_type);
2967 }
2968 
2969 /*
2970  * To find the UMC channel represented by this bank we need to match on its
2971  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2972  * IPID.
2973  *
2974  * Currently, we can derive the channel number by looking at the 6th nibble in
2975  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2976  * number.
2977  *
2978  * For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
2979  * the MCA_SYND[ErrorInformation] field.
2980  */
2981 static void umc_get_err_info(struct mce *m, struct err_info *err)
2982 {
2983 	err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
2984 	err->csrow = m->synd & 0x7;
2985 }
2986 
2987 static void decode_umc_error(int node_id, struct mce *m)
2988 {
2989 	u8 ecc_type = (m->status >> 45) & 0x3;
2990 	struct mem_ctl_info *mci;
2991 	struct amd64_pvt *pvt;
2992 	struct err_info err;
2993 	u64 sys_addr;
2994 
2995 	mci = edac_mc_find(node_id);
2996 	if (!mci)
2997 		return;
2998 
2999 	pvt = mci->pvt_info;
3000 
3001 	memset(&err, 0, sizeof(err));
3002 
3003 	if (m->status & MCI_STATUS_DEFERRED)
3004 		ecc_type = 3;
3005 
3006 	if (!(m->status & MCI_STATUS_SYNDV)) {
3007 		err.err_code = ERR_SYND;
3008 		goto log_error;
3009 	}
3010 
3011 	if (ecc_type == 2) {
3012 		u8 length = (m->synd >> 18) & 0x3f;
3013 
3014 		if (length)
3015 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
3016 		else
3017 			err.err_code = ERR_CHANNEL;
3018 	}
3019 
3020 	pvt->ops->get_err_info(m, &err);
3021 
3022 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
3023 		err.err_code = ERR_NORM_ADDR;
3024 		goto log_error;
3025 	}
3026 
3027 	error_address_to_page_and_offset(sys_addr, &err);
3028 
3029 log_error:
3030 	__log_ecc_error(mci, &err, ecc_type);
3031 }
3032 
3033 /*
3034  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
3035  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
3036  */
3037 static int
3038 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
3039 {
3040 	/* Reserve the ADDRESS MAP Device */
3041 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
3042 	if (!pvt->F1) {
3043 		edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
3044 		return -ENODEV;
3045 	}
3046 
3047 	/* Reserve the DCT Device */
3048 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
3049 	if (!pvt->F2) {
3050 		pci_dev_put(pvt->F1);
3051 		pvt->F1 = NULL;
3052 
3053 		edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
3054 		return -ENODEV;
3055 	}
3056 
3057 	if (!pci_ctl_dev)
3058 		pci_ctl_dev = &pvt->F2->dev;
3059 
3060 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
3061 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
3062 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3063 
3064 	return 0;
3065 }
3066 
3067 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
3068 {
3069 	pvt->ecc_sym_sz = 4;
3070 
3071 	if (pvt->fam >= 0x10) {
3072 		u32 tmp;
3073 
3074 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
3075 		/* F16h has only DCT0, so no need to read dbam1. */
3076 		if (pvt->fam != 0x16)
3077 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
3078 
3079 		/* F10h, revD and later can do x8 ECC too. */
3080 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
3081 			pvt->ecc_sym_sz = 8;
3082 	}
3083 }
3084 
3085 /*
3086  * Retrieve the hardware registers of the memory controller.
3087  */
3088 static void umc_read_mc_regs(struct amd64_pvt *pvt)
3089 {
3090 	u8 nid = pvt->mc_node_id;
3091 	struct amd64_umc *umc;
3092 	u32 i, umc_base;
3093 
3094 	/* Read registers from each UMC */
3095 	for_each_umc(i) {
3096 
3097 		umc_base = get_umc_base(i);
3098 		umc = &pvt->umc[i];
3099 
3100 		amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
3101 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3102 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3103 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3104 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
3105 	}
3106 }
3107 
3108 /*
3109  * Retrieve the hardware registers of the memory controller (this includes the
3110  * 'Address Map' and 'Misc' device regs)
3111  */
3112 static void dct_read_mc_regs(struct amd64_pvt *pvt)
3113 {
3114 	unsigned int range;
3115 	u64 msr_val;
3116 
3117 	/*
3118 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
3119 	 * those are Read-As-Zero.
3120 	 */
3121 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
3122 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
3123 
3124 	/* Check first whether TOP_MEM2 is enabled: */
3125 	rdmsrl(MSR_AMD64_SYSCFG, msr_val);
3126 	if (msr_val & BIT(21)) {
3127 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
3128 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
3129 	} else {
3130 		edac_dbg(0, "  TOP_MEM2 disabled\n");
3131 	}
3132 
3133 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
3134 
3135 	read_dram_ctl_register(pvt);
3136 
3137 	for (range = 0; range < DRAM_RANGES; range++) {
3138 		u8 rw;
3139 
3140 		/* read settings for this DRAM range */
3141 		read_dram_base_limit_regs(pvt, range);
3142 
3143 		rw = dram_rw(pvt, range);
3144 		if (!rw)
3145 			continue;
3146 
3147 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
3148 			 range,
3149 			 get_dram_base(pvt, range),
3150 			 get_dram_limit(pvt, range));
3151 
3152 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
3153 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
3154 			 (rw & 0x1) ? "R" : "-",
3155 			 (rw & 0x2) ? "W" : "-",
3156 			 dram_intlv_sel(pvt, range),
3157 			 dram_dst_node(pvt, range));
3158 	}
3159 
3160 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
3161 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
3162 
3163 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
3164 
3165 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
3166 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
3167 
3168 	if (!dct_ganging_enabled(pvt)) {
3169 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
3170 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
3171 	}
3172 
3173 	determine_ecc_sym_sz(pvt);
3174 }
3175 
3176 /*
3177  * NOTE: CPU Revision Dependent code
3178  *
3179  * Input:
3180  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3181  *	k8 private pointer to -->
3182  *			DRAM Bank Address mapping register
3183  *			node_id
3184  *			DCL register where dual_channel_active is
3185  *
3186  * The DBAM register consists of 4 sets of 4 bits each definitions:
3187  *
3188  * Bits:	CSROWs
3189  * 0-3		CSROWs 0 and 1
3190  * 4-7		CSROWs 2 and 3
3191  * 8-11		CSROWs 4 and 5
3192  * 12-15	CSROWs 6 and 7
3193  *
3194  * Values range from: 0 to 15
3195  * The meaning of the values depends on CPU revision and dual-channel state,
3196  * see relevant BKDG more info.
3197  *
3198  * The memory controller provides for total of only 8 CSROWs in its current
3199  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
3200  * single channel or two (2) DIMMs in dual channel mode.
3201  *
3202  * The following code logic collapses the various tables for CSROW based on CPU
3203  * revision.
3204  *
3205  * Returns:
3206  *	The number of PAGE_SIZE pages on the specified CSROW number it
3207  *	encompasses
3208  *
3209  */
3210 static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3211 {
3212 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3213 	u32 cs_mode, nr_pages;
3214 
3215 	csrow_nr >>= 1;
3216 	cs_mode = DBAM_DIMM(csrow_nr, dbam);
3217 
3218 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3219 	nr_pages <<= 20 - PAGE_SHIFT;
3220 
3221 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3222 		    csrow_nr, dct,  cs_mode);
3223 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3224 
3225 	return nr_pages;
3226 }
3227 
3228 static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3229 {
3230 	int csrow_nr = csrow_nr_orig;
3231 	u32 cs_mode, nr_pages;
3232 
3233 	cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
3234 
3235 	nr_pages   = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3236 	nr_pages <<= 20 - PAGE_SHIFT;
3237 
3238 	edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
3239 		 csrow_nr_orig, dct,  cs_mode);
3240 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3241 
3242 	return nr_pages;
3243 }
3244 
3245 static void umc_init_csrows(struct mem_ctl_info *mci)
3246 {
3247 	struct amd64_pvt *pvt = mci->pvt_info;
3248 	enum edac_type edac_mode = EDAC_NONE;
3249 	enum dev_type dev_type = DEV_UNKNOWN;
3250 	struct dimm_info *dimm;
3251 	u8 umc, cs;
3252 
3253 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
3254 		edac_mode = EDAC_S16ECD16ED;
3255 		dev_type = DEV_X16;
3256 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
3257 		edac_mode = EDAC_S8ECD8ED;
3258 		dev_type = DEV_X8;
3259 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
3260 		edac_mode = EDAC_S4ECD4ED;
3261 		dev_type = DEV_X4;
3262 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
3263 		edac_mode = EDAC_SECDED;
3264 	}
3265 
3266 	for_each_umc(umc) {
3267 		for_each_chip_select(cs, umc, pvt) {
3268 			if (!csrow_enabled(cs, umc, pvt))
3269 				continue;
3270 
3271 			dimm = mci->csrows[cs]->channels[umc]->dimm;
3272 
3273 			edac_dbg(1, "MC node: %d, csrow: %d\n",
3274 					pvt->mc_node_id, cs);
3275 
3276 			dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
3277 			dimm->mtype = pvt->umc[umc].dram_type;
3278 			dimm->edac_mode = edac_mode;
3279 			dimm->dtype = dev_type;
3280 			dimm->grain = 64;
3281 		}
3282 	}
3283 }
3284 
3285 /*
3286  * Initialize the array of csrow attribute instances, based on the values
3287  * from pci config hardware registers.
3288  */
3289 static void dct_init_csrows(struct mem_ctl_info *mci)
3290 {
3291 	struct amd64_pvt *pvt = mci->pvt_info;
3292 	enum edac_type edac_mode = EDAC_NONE;
3293 	struct csrow_info *csrow;
3294 	struct dimm_info *dimm;
3295 	int nr_pages = 0;
3296 	int i, j;
3297 	u32 val;
3298 
3299 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3300 
3301 	pvt->nbcfg = val;
3302 
3303 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3304 		 pvt->mc_node_id, val,
3305 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3306 
3307 	/*
3308 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3309 	 */
3310 	for_each_chip_select(i, 0, pvt) {
3311 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3312 		bool row_dct1 = false;
3313 
3314 		if (pvt->fam != 0xf)
3315 			row_dct1 = !!csrow_enabled(i, 1, pvt);
3316 
3317 		if (!row_dct0 && !row_dct1)
3318 			continue;
3319 
3320 		csrow = mci->csrows[i];
3321 
3322 		edac_dbg(1, "MC node: %d, csrow: %d\n",
3323 			    pvt->mc_node_id, i);
3324 
3325 		if (row_dct0) {
3326 			nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
3327 			csrow->channels[0]->dimm->nr_pages = nr_pages;
3328 		}
3329 
3330 		/* K8 has only one DCT */
3331 		if (pvt->fam != 0xf && row_dct1) {
3332 			int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
3333 
3334 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3335 			nr_pages += row_dct1_pages;
3336 		}
3337 
3338 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3339 
3340 		/* Determine DIMM ECC mode: */
3341 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3342 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3343 					? EDAC_S4ECD4ED
3344 					: EDAC_SECDED;
3345 		}
3346 
3347 		for (j = 0; j < pvt->max_mcs; j++) {
3348 			dimm = csrow->channels[j]->dimm;
3349 			dimm->mtype = pvt->dram_type;
3350 			dimm->edac_mode = edac_mode;
3351 			dimm->grain = 64;
3352 		}
3353 	}
3354 }
3355 
3356 /* get all cores on this DCT */
3357 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3358 {
3359 	int cpu;
3360 
3361 	for_each_online_cpu(cpu)
3362 		if (topology_die_id(cpu) == nid)
3363 			cpumask_set_cpu(cpu, mask);
3364 }
3365 
3366 /* check MCG_CTL on all the cpus on this node */
3367 static bool nb_mce_bank_enabled_on_node(u16 nid)
3368 {
3369 	cpumask_var_t mask;
3370 	int cpu, nbe;
3371 	bool ret = false;
3372 
3373 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3374 		amd64_warn("%s: Error allocating mask\n", __func__);
3375 		return false;
3376 	}
3377 
3378 	get_cpus_on_this_dct_cpumask(mask, nid);
3379 
3380 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3381 
3382 	for_each_cpu(cpu, mask) {
3383 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3384 		nbe = reg->l & MSR_MCGCTL_NBE;
3385 
3386 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3387 			 cpu, reg->q,
3388 			 (nbe ? "enabled" : "disabled"));
3389 
3390 		if (!nbe)
3391 			goto out;
3392 	}
3393 	ret = true;
3394 
3395 out:
3396 	free_cpumask_var(mask);
3397 	return ret;
3398 }
3399 
3400 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3401 {
3402 	cpumask_var_t cmask;
3403 	int cpu;
3404 
3405 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3406 		amd64_warn("%s: error allocating mask\n", __func__);
3407 		return -ENOMEM;
3408 	}
3409 
3410 	get_cpus_on_this_dct_cpumask(cmask, nid);
3411 
3412 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3413 
3414 	for_each_cpu(cpu, cmask) {
3415 
3416 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3417 
3418 		if (on) {
3419 			if (reg->l & MSR_MCGCTL_NBE)
3420 				s->flags.nb_mce_enable = 1;
3421 
3422 			reg->l |= MSR_MCGCTL_NBE;
3423 		} else {
3424 			/*
3425 			 * Turn off NB MCE reporting only when it was off before
3426 			 */
3427 			if (!s->flags.nb_mce_enable)
3428 				reg->l &= ~MSR_MCGCTL_NBE;
3429 		}
3430 	}
3431 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3432 
3433 	free_cpumask_var(cmask);
3434 
3435 	return 0;
3436 }
3437 
3438 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3439 				       struct pci_dev *F3)
3440 {
3441 	bool ret = true;
3442 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3443 
3444 	if (toggle_ecc_err_reporting(s, nid, ON)) {
3445 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3446 		return false;
3447 	}
3448 
3449 	amd64_read_pci_cfg(F3, NBCTL, &value);
3450 
3451 	s->old_nbctl   = value & mask;
3452 	s->nbctl_valid = true;
3453 
3454 	value |= mask;
3455 	amd64_write_pci_cfg(F3, NBCTL, value);
3456 
3457 	amd64_read_pci_cfg(F3, NBCFG, &value);
3458 
3459 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3460 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3461 
3462 	if (!(value & NBCFG_ECC_ENABLE)) {
3463 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3464 
3465 		s->flags.nb_ecc_prev = 0;
3466 
3467 		/* Attempt to turn on DRAM ECC Enable */
3468 		value |= NBCFG_ECC_ENABLE;
3469 		amd64_write_pci_cfg(F3, NBCFG, value);
3470 
3471 		amd64_read_pci_cfg(F3, NBCFG, &value);
3472 
3473 		if (!(value & NBCFG_ECC_ENABLE)) {
3474 			amd64_warn("Hardware rejected DRAM ECC enable,"
3475 				   "check memory DIMM configuration.\n");
3476 			ret = false;
3477 		} else {
3478 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3479 		}
3480 	} else {
3481 		s->flags.nb_ecc_prev = 1;
3482 	}
3483 
3484 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3485 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3486 
3487 	return ret;
3488 }
3489 
3490 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3491 					struct pci_dev *F3)
3492 {
3493 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3494 
3495 	if (!s->nbctl_valid)
3496 		return;
3497 
3498 	amd64_read_pci_cfg(F3, NBCTL, &value);
3499 	value &= ~mask;
3500 	value |= s->old_nbctl;
3501 
3502 	amd64_write_pci_cfg(F3, NBCTL, value);
3503 
3504 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3505 	if (!s->flags.nb_ecc_prev) {
3506 		amd64_read_pci_cfg(F3, NBCFG, &value);
3507 		value &= ~NBCFG_ECC_ENABLE;
3508 		amd64_write_pci_cfg(F3, NBCFG, value);
3509 	}
3510 
3511 	/* restore the NB Enable MCGCTL bit */
3512 	if (toggle_ecc_err_reporting(s, nid, OFF))
3513 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3514 }
3515 
3516 static bool dct_ecc_enabled(struct amd64_pvt *pvt)
3517 {
3518 	u16 nid = pvt->mc_node_id;
3519 	bool nb_mce_en = false;
3520 	u8 ecc_en = 0;
3521 	u32 value;
3522 
3523 	amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3524 
3525 	ecc_en = !!(value & NBCFG_ECC_ENABLE);
3526 
3527 	nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3528 	if (!nb_mce_en)
3529 		edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3530 			 MSR_IA32_MCG_CTL, nid);
3531 
3532 	edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3533 
3534 	if (!ecc_en || !nb_mce_en)
3535 		return false;
3536 	else
3537 		return true;
3538 }
3539 
3540 static bool umc_ecc_enabled(struct amd64_pvt *pvt)
3541 {
3542 	u8 umc_en_mask = 0, ecc_en_mask = 0;
3543 	u16 nid = pvt->mc_node_id;
3544 	struct amd64_umc *umc;
3545 	u8 ecc_en = 0, i;
3546 
3547 	for_each_umc(i) {
3548 		umc = &pvt->umc[i];
3549 
3550 		/* Only check enabled UMCs. */
3551 		if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3552 			continue;
3553 
3554 		umc_en_mask |= BIT(i);
3555 
3556 		if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3557 			ecc_en_mask |= BIT(i);
3558 	}
3559 
3560 	/* Check whether at least one UMC is enabled: */
3561 	if (umc_en_mask)
3562 		ecc_en = umc_en_mask == ecc_en_mask;
3563 	else
3564 		edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3565 
3566 	edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3567 
3568 	if (!ecc_en)
3569 		return false;
3570 	else
3571 		return true;
3572 }
3573 
3574 static inline void
3575 umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3576 {
3577 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3578 
3579 	for_each_umc(i) {
3580 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3581 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3582 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3583 
3584 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3585 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3586 		}
3587 	}
3588 
3589 	/* Set chipkill only if ECC is enabled: */
3590 	if (ecc_en) {
3591 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3592 
3593 		if (!cpk_en)
3594 			return;
3595 
3596 		if (dev_x4)
3597 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3598 		else if (dev_x16)
3599 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3600 		else
3601 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3602 	}
3603 }
3604 
3605 static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3606 {
3607 	struct amd64_pvt *pvt = mci->pvt_info;
3608 
3609 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3610 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3611 
3612 	if (pvt->nbcap & NBCAP_SECDED)
3613 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3614 
3615 	if (pvt->nbcap & NBCAP_CHIPKILL)
3616 		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3617 
3618 	mci->edac_cap		= dct_determine_edac_cap(pvt);
3619 	mci->mod_name		= EDAC_MOD_STR;
3620 	mci->ctl_name		= pvt->ctl_name;
3621 	mci->dev_name		= pci_name(pvt->F3);
3622 	mci->ctl_page_to_phys	= NULL;
3623 
3624 	/* memory scrubber interface */
3625 	mci->set_sdram_scrub_rate = set_scrub_rate;
3626 	mci->get_sdram_scrub_rate = get_scrub_rate;
3627 
3628 	dct_init_csrows(mci);
3629 }
3630 
3631 static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3632 {
3633 	struct amd64_pvt *pvt = mci->pvt_info;
3634 
3635 	mci->mtype_cap		= MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
3636 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3637 
3638 	umc_determine_edac_ctl_cap(mci, pvt);
3639 
3640 	mci->edac_cap		= umc_determine_edac_cap(pvt);
3641 	mci->mod_name		= EDAC_MOD_STR;
3642 	mci->ctl_name		= pvt->ctl_name;
3643 	mci->dev_name		= pci_name(pvt->F3);
3644 	mci->ctl_page_to_phys	= NULL;
3645 
3646 	umc_init_csrows(mci);
3647 }
3648 
3649 static int dct_hw_info_get(struct amd64_pvt *pvt)
3650 {
3651 	int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
3652 
3653 	if (ret)
3654 		return ret;
3655 
3656 	dct_prep_chip_selects(pvt);
3657 	dct_read_base_mask(pvt);
3658 	dct_read_mc_regs(pvt);
3659 	dct_determine_memory_type(pvt);
3660 
3661 	return 0;
3662 }
3663 
3664 static int umc_hw_info_get(struct amd64_pvt *pvt)
3665 {
3666 	pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3667 	if (!pvt->umc)
3668 		return -ENOMEM;
3669 
3670 	umc_prep_chip_selects(pvt);
3671 	umc_read_base_mask(pvt);
3672 	umc_read_mc_regs(pvt);
3673 	umc_determine_memory_type(pvt);
3674 
3675 	return 0;
3676 }
3677 
3678 static void hw_info_put(struct amd64_pvt *pvt)
3679 {
3680 	pci_dev_put(pvt->F1);
3681 	pci_dev_put(pvt->F2);
3682 	kfree(pvt->umc);
3683 }
3684 
3685 static struct low_ops umc_ops = {
3686 	.hw_info_get			= umc_hw_info_get,
3687 	.ecc_enabled			= umc_ecc_enabled,
3688 	.setup_mci_misc_attrs		= umc_setup_mci_misc_attrs,
3689 	.dump_misc_regs			= umc_dump_misc_regs,
3690 	.get_err_info			= umc_get_err_info,
3691 };
3692 
3693 /* Use Family 16h versions for defaults and adjust as needed below. */
3694 static struct low_ops dct_ops = {
3695 	.map_sysaddr_to_csrow		= f1x_map_sysaddr_to_csrow,
3696 	.dbam_to_cs			= f16_dbam_to_chip_select,
3697 	.hw_info_get			= dct_hw_info_get,
3698 	.ecc_enabled			= dct_ecc_enabled,
3699 	.setup_mci_misc_attrs		= dct_setup_mci_misc_attrs,
3700 	.dump_misc_regs			= dct_dump_misc_regs,
3701 };
3702 
3703 static int per_family_init(struct amd64_pvt *pvt)
3704 {
3705 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3706 	pvt->stepping	= boot_cpu_data.x86_stepping;
3707 	pvt->model	= boot_cpu_data.x86_model;
3708 	pvt->fam	= boot_cpu_data.x86;
3709 	pvt->max_mcs	= 2;
3710 
3711 	/*
3712 	 * Decide on which ops group to use here and do any family/model
3713 	 * overrides below.
3714 	 */
3715 	if (pvt->fam >= 0x17)
3716 		pvt->ops = &umc_ops;
3717 	else
3718 		pvt->ops = &dct_ops;
3719 
3720 	switch (pvt->fam) {
3721 	case 0xf:
3722 		pvt->ctl_name				= (pvt->ext_model >= K8_REV_F) ?
3723 							  "K8 revF or later" : "K8 revE or earlier";
3724 		pvt->f1_id				= PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
3725 		pvt->f2_id				= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
3726 		pvt->ops->map_sysaddr_to_csrow		= k8_map_sysaddr_to_csrow;
3727 		pvt->ops->dbam_to_cs			= k8_dbam_to_chip_select;
3728 		break;
3729 
3730 	case 0x10:
3731 		pvt->ctl_name				= "F10h";
3732 		pvt->f1_id				= PCI_DEVICE_ID_AMD_10H_NB_MAP;
3733 		pvt->f2_id				= PCI_DEVICE_ID_AMD_10H_NB_DRAM;
3734 		pvt->ops->dbam_to_cs			= f10_dbam_to_chip_select;
3735 		break;
3736 
3737 	case 0x15:
3738 		switch (pvt->model) {
3739 		case 0x30:
3740 			pvt->ctl_name			= "F15h_M30h";
3741 			pvt->f1_id			= PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
3742 			pvt->f2_id			= PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
3743 			break;
3744 		case 0x60:
3745 			pvt->ctl_name			= "F15h_M60h";
3746 			pvt->f1_id			= PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
3747 			pvt->f2_id			= PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
3748 			pvt->ops->dbam_to_cs		= f15_m60h_dbam_to_chip_select;
3749 			break;
3750 		case 0x13:
3751 			/* Richland is only client */
3752 			return -ENODEV;
3753 		default:
3754 			pvt->ctl_name			= "F15h";
3755 			pvt->f1_id			= PCI_DEVICE_ID_AMD_15H_NB_F1;
3756 			pvt->f2_id			= PCI_DEVICE_ID_AMD_15H_NB_F2;
3757 			pvt->ops->dbam_to_cs		= f15_dbam_to_chip_select;
3758 			break;
3759 		}
3760 		break;
3761 
3762 	case 0x16:
3763 		switch (pvt->model) {
3764 		case 0x30:
3765 			pvt->ctl_name			= "F16h_M30h";
3766 			pvt->f1_id			= PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
3767 			pvt->f2_id			= PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
3768 			break;
3769 		default:
3770 			pvt->ctl_name			= "F16h";
3771 			pvt->f1_id			= PCI_DEVICE_ID_AMD_16H_NB_F1;
3772 			pvt->f2_id			= PCI_DEVICE_ID_AMD_16H_NB_F2;
3773 			break;
3774 		}
3775 		break;
3776 
3777 	case 0x17:
3778 		switch (pvt->model) {
3779 		case 0x10 ... 0x2f:
3780 			pvt->ctl_name			= "F17h_M10h";
3781 			break;
3782 		case 0x30 ... 0x3f:
3783 			pvt->ctl_name			= "F17h_M30h";
3784 			pvt->max_mcs			= 8;
3785 			break;
3786 		case 0x60 ... 0x6f:
3787 			pvt->ctl_name			= "F17h_M60h";
3788 			break;
3789 		case 0x70 ... 0x7f:
3790 			pvt->ctl_name			= "F17h_M70h";
3791 			break;
3792 		default:
3793 			pvt->ctl_name			= "F17h";
3794 			break;
3795 		}
3796 		break;
3797 
3798 	case 0x18:
3799 		pvt->ctl_name				= "F18h";
3800 		break;
3801 
3802 	case 0x19:
3803 		switch (pvt->model) {
3804 		case 0x00 ... 0x0f:
3805 			pvt->ctl_name			= "F19h";
3806 			pvt->max_mcs			= 8;
3807 			break;
3808 		case 0x10 ... 0x1f:
3809 			pvt->ctl_name			= "F19h_M10h";
3810 			pvt->max_mcs			= 12;
3811 			pvt->flags.zn_regs_v2		= 1;
3812 			break;
3813 		case 0x20 ... 0x2f:
3814 			pvt->ctl_name			= "F19h_M20h";
3815 			break;
3816 		case 0x50 ... 0x5f:
3817 			pvt->ctl_name			= "F19h_M50h";
3818 			break;
3819 		case 0xa0 ... 0xaf:
3820 			pvt->ctl_name			= "F19h_MA0h";
3821 			pvt->max_mcs			= 12;
3822 			pvt->flags.zn_regs_v2		= 1;
3823 			break;
3824 		}
3825 		break;
3826 
3827 	default:
3828 		amd64_err("Unsupported family!\n");
3829 		return -ENODEV;
3830 	}
3831 
3832 	return 0;
3833 }
3834 
3835 static const struct attribute_group *amd64_edac_attr_groups[] = {
3836 #ifdef CONFIG_EDAC_DEBUG
3837 	&dbg_group,
3838 	&inj_group,
3839 #endif
3840 	NULL
3841 };
3842 
3843 static int init_one_instance(struct amd64_pvt *pvt)
3844 {
3845 	struct mem_ctl_info *mci = NULL;
3846 	struct edac_mc_layer layers[2];
3847 	int ret = -ENOMEM;
3848 
3849 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3850 	layers[0].size = pvt->csels[0].b_cnt;
3851 	layers[0].is_virt_csrow = true;
3852 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3853 	layers[1].size = pvt->max_mcs;
3854 	layers[1].is_virt_csrow = false;
3855 
3856 	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3857 	if (!mci)
3858 		return ret;
3859 
3860 	mci->pvt_info = pvt;
3861 	mci->pdev = &pvt->F3->dev;
3862 
3863 	pvt->ops->setup_mci_misc_attrs(mci);
3864 
3865 	ret = -ENODEV;
3866 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3867 		edac_dbg(1, "failed edac_mc_add_mc()\n");
3868 		edac_mc_free(mci);
3869 		return ret;
3870 	}
3871 
3872 	return 0;
3873 }
3874 
3875 static bool instance_has_memory(struct amd64_pvt *pvt)
3876 {
3877 	bool cs_enabled = false;
3878 	int cs = 0, dct = 0;
3879 
3880 	for (dct = 0; dct < pvt->max_mcs; dct++) {
3881 		for_each_chip_select(cs, dct, pvt)
3882 			cs_enabled |= csrow_enabled(cs, dct, pvt);
3883 	}
3884 
3885 	return cs_enabled;
3886 }
3887 
3888 static int probe_one_instance(unsigned int nid)
3889 {
3890 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3891 	struct amd64_pvt *pvt = NULL;
3892 	struct ecc_settings *s;
3893 	int ret;
3894 
3895 	ret = -ENOMEM;
3896 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3897 	if (!s)
3898 		goto err_out;
3899 
3900 	ecc_stngs[nid] = s;
3901 
3902 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3903 	if (!pvt)
3904 		goto err_settings;
3905 
3906 	pvt->mc_node_id	= nid;
3907 	pvt->F3 = F3;
3908 
3909 	ret = per_family_init(pvt);
3910 	if (ret < 0)
3911 		goto err_enable;
3912 
3913 	ret = pvt->ops->hw_info_get(pvt);
3914 	if (ret < 0)
3915 		goto err_enable;
3916 
3917 	ret = 0;
3918 	if (!instance_has_memory(pvt)) {
3919 		amd64_info("Node %d: No DIMMs detected.\n", nid);
3920 		goto err_enable;
3921 	}
3922 
3923 	if (!pvt->ops->ecc_enabled(pvt)) {
3924 		ret = -ENODEV;
3925 
3926 		if (!ecc_enable_override)
3927 			goto err_enable;
3928 
3929 		if (boot_cpu_data.x86 >= 0x17) {
3930 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3931 			goto err_enable;
3932 		} else
3933 			amd64_warn("Forcing ECC on!\n");
3934 
3935 		if (!enable_ecc_error_reporting(s, nid, F3))
3936 			goto err_enable;
3937 	}
3938 
3939 	ret = init_one_instance(pvt);
3940 	if (ret < 0) {
3941 		amd64_err("Error probing instance: %d\n", nid);
3942 
3943 		if (boot_cpu_data.x86 < 0x17)
3944 			restore_ecc_error_reporting(s, nid, F3);
3945 
3946 		goto err_enable;
3947 	}
3948 
3949 	amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
3950 
3951 	/* Display and decode various registers for debug purposes. */
3952 	pvt->ops->dump_misc_regs(pvt);
3953 
3954 	return ret;
3955 
3956 err_enable:
3957 	hw_info_put(pvt);
3958 	kfree(pvt);
3959 
3960 err_settings:
3961 	kfree(s);
3962 	ecc_stngs[nid] = NULL;
3963 
3964 err_out:
3965 	return ret;
3966 }
3967 
3968 static void remove_one_instance(unsigned int nid)
3969 {
3970 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3971 	struct ecc_settings *s = ecc_stngs[nid];
3972 	struct mem_ctl_info *mci;
3973 	struct amd64_pvt *pvt;
3974 
3975 	/* Remove from EDAC CORE tracking list */
3976 	mci = edac_mc_del_mc(&F3->dev);
3977 	if (!mci)
3978 		return;
3979 
3980 	pvt = mci->pvt_info;
3981 
3982 	restore_ecc_error_reporting(s, nid, F3);
3983 
3984 	kfree(ecc_stngs[nid]);
3985 	ecc_stngs[nid] = NULL;
3986 
3987 	/* Free the EDAC CORE resources */
3988 	mci->pvt_info = NULL;
3989 
3990 	hw_info_put(pvt);
3991 	kfree(pvt);
3992 	edac_mc_free(mci);
3993 }
3994 
3995 static void setup_pci_device(void)
3996 {
3997 	if (pci_ctl)
3998 		return;
3999 
4000 	pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
4001 	if (!pci_ctl) {
4002 		pr_warn("%s(): Unable to create PCI control\n", __func__);
4003 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
4004 	}
4005 }
4006 
4007 static const struct x86_cpu_id amd64_cpuids[] = {
4008 	X86_MATCH_VENDOR_FAM(AMD,	0x0F, NULL),
4009 	X86_MATCH_VENDOR_FAM(AMD,	0x10, NULL),
4010 	X86_MATCH_VENDOR_FAM(AMD,	0x15, NULL),
4011 	X86_MATCH_VENDOR_FAM(AMD,	0x16, NULL),
4012 	X86_MATCH_VENDOR_FAM(AMD,	0x17, NULL),
4013 	X86_MATCH_VENDOR_FAM(HYGON,	0x18, NULL),
4014 	X86_MATCH_VENDOR_FAM(AMD,	0x19, NULL),
4015 	{ }
4016 };
4017 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
4018 
4019 static int __init amd64_edac_init(void)
4020 {
4021 	const char *owner;
4022 	int err = -ENODEV;
4023 	int i;
4024 
4025 	if (ghes_get_devices())
4026 		return -EBUSY;
4027 
4028 	owner = edac_get_owner();
4029 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
4030 		return -EBUSY;
4031 
4032 	if (!x86_match_cpu(amd64_cpuids))
4033 		return -ENODEV;
4034 
4035 	if (!amd_nb_num())
4036 		return -ENODEV;
4037 
4038 	opstate_init();
4039 
4040 	err = -ENOMEM;
4041 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
4042 	if (!ecc_stngs)
4043 		goto err_free;
4044 
4045 	msrs = msrs_alloc();
4046 	if (!msrs)
4047 		goto err_free;
4048 
4049 	for (i = 0; i < amd_nb_num(); i++) {
4050 		err = probe_one_instance(i);
4051 		if (err) {
4052 			/* unwind properly */
4053 			while (--i >= 0)
4054 				remove_one_instance(i);
4055 
4056 			goto err_pci;
4057 		}
4058 	}
4059 
4060 	if (!edac_has_mcs()) {
4061 		err = -ENODEV;
4062 		goto err_pci;
4063 	}
4064 
4065 	/* register stuff with EDAC MCE */
4066 	if (boot_cpu_data.x86 >= 0x17) {
4067 		amd_register_ecc_decoder(decode_umc_error);
4068 	} else {
4069 		amd_register_ecc_decoder(decode_bus_error);
4070 		setup_pci_device();
4071 	}
4072 
4073 #ifdef CONFIG_X86_32
4074 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
4075 #endif
4076 
4077 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
4078 
4079 	return 0;
4080 
4081 err_pci:
4082 	pci_ctl_dev = NULL;
4083 
4084 	msrs_free(msrs);
4085 	msrs = NULL;
4086 
4087 err_free:
4088 	kfree(ecc_stngs);
4089 	ecc_stngs = NULL;
4090 
4091 	return err;
4092 }
4093 
4094 static void __exit amd64_edac_exit(void)
4095 {
4096 	int i;
4097 
4098 	if (pci_ctl)
4099 		edac_pci_release_generic_ctl(pci_ctl);
4100 
4101 	/* unregister from EDAC MCE */
4102 	if (boot_cpu_data.x86 >= 0x17)
4103 		amd_unregister_ecc_decoder(decode_umc_error);
4104 	else
4105 		amd_unregister_ecc_decoder(decode_bus_error);
4106 
4107 	for (i = 0; i < amd_nb_num(); i++)
4108 		remove_one_instance(i);
4109 
4110 	kfree(ecc_stngs);
4111 	ecc_stngs = NULL;
4112 
4113 	pci_ctl_dev = NULL;
4114 
4115 	msrs_free(msrs);
4116 	msrs = NULL;
4117 }
4118 
4119 module_init(amd64_edac_init);
4120 module_exit(amd64_edac_exit);
4121 
4122 MODULE_LICENSE("GPL");
4123 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
4124 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " EDAC_AMD64_VERSION);
4125 
4126 module_param(edac_op_state, int, 0444);
4127 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
4128