xref: /linux/drivers/edac/amd64_edac.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
4 
5 static struct edac_pci_ctl_info *pci_ctl;
6 
7 /*
8  * Set by command line parameter. If BIOS has enabled the ECC, this override is
9  * cleared to prevent re-enabling the hardware by this driver.
10  */
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
13 
14 static struct msr __percpu *msrs;
15 
16 static struct amd64_family_type *fam_type;
17 
18 /* Per-node stuff */
19 static struct ecc_settings **ecc_stngs;
20 
21 /* Device for the PCI component */
22 static struct device *pci_ctl_dev;
23 
24 /*
25  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
26  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
27  * or higher value'.
28  *
29  *FIXME: Produce a better mapping/linearisation.
30  */
31 static const struct scrubrate {
32        u32 scrubval;           /* bit pattern for scrub rate */
33        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
34 } scrubrates[] = {
35 	{ 0x01, 1600000000UL},
36 	{ 0x02, 800000000UL},
37 	{ 0x03, 400000000UL},
38 	{ 0x04, 200000000UL},
39 	{ 0x05, 100000000UL},
40 	{ 0x06, 50000000UL},
41 	{ 0x07, 25000000UL},
42 	{ 0x08, 12284069UL},
43 	{ 0x09, 6274509UL},
44 	{ 0x0A, 3121951UL},
45 	{ 0x0B, 1560975UL},
46 	{ 0x0C, 781440UL},
47 	{ 0x0D, 390720UL},
48 	{ 0x0E, 195300UL},
49 	{ 0x0F, 97650UL},
50 	{ 0x10, 48854UL},
51 	{ 0x11, 24427UL},
52 	{ 0x12, 12213UL},
53 	{ 0x13, 6101UL},
54 	{ 0x14, 3051UL},
55 	{ 0x15, 1523UL},
56 	{ 0x16, 761UL},
57 	{ 0x00, 0UL},        /* scrubbing off */
58 };
59 
60 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
61 			       u32 *val, const char *func)
62 {
63 	int err = 0;
64 
65 	err = pci_read_config_dword(pdev, offset, val);
66 	if (err)
67 		amd64_warn("%s: error reading F%dx%03x.\n",
68 			   func, PCI_FUNC(pdev->devfn), offset);
69 
70 	return err;
71 }
72 
73 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 				u32 val, const char *func)
75 {
76 	int err = 0;
77 
78 	err = pci_write_config_dword(pdev, offset, val);
79 	if (err)
80 		amd64_warn("%s: error writing to F%dx%03x.\n",
81 			   func, PCI_FUNC(pdev->devfn), offset);
82 
83 	return err;
84 }
85 
86 /*
87  * Select DCT to which PCI cfg accesses are routed
88  */
89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
90 {
91 	u32 reg = 0;
92 
93 	amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
94 	reg &= (pvt->model == 0x30) ? ~3 : ~1;
95 	reg |= dct;
96 	amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
97 }
98 
99 /*
100  *
101  * Depending on the family, F2 DCT reads need special handling:
102  *
103  * K8: has a single DCT only and no address offsets >= 0x100
104  *
105  * F10h: each DCT has its own set of regs
106  *	DCT0 -> F2x040..
107  *	DCT1 -> F2x140..
108  *
109  * F16h: has only 1 DCT
110  *
111  * F15h: we select which DCT we access using F1x10C[DctCfgSel]
112  */
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
114 					 int offset, u32 *val)
115 {
116 	switch (pvt->fam) {
117 	case 0xf:
118 		if (dct || offset >= 0x100)
119 			return -EINVAL;
120 		break;
121 
122 	case 0x10:
123 		if (dct) {
124 			/*
125 			 * Note: If ganging is enabled, barring the regs
126 			 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
127 			 * return 0. (cf. Section 2.8.1 F10h BKDG)
128 			 */
129 			if (dct_ganging_enabled(pvt))
130 				return 0;
131 
132 			offset += 0x100;
133 		}
134 		break;
135 
136 	case 0x15:
137 		/*
138 		 * F15h: F2x1xx addresses do not map explicitly to DCT1.
139 		 * We should select which DCT we access using F1x10C[DctCfgSel]
140 		 */
141 		dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 		f15h_select_dct(pvt, dct);
143 		break;
144 
145 	case 0x16:
146 		if (dct)
147 			return -EINVAL;
148 		break;
149 
150 	default:
151 		break;
152 	}
153 	return amd64_read_pci_cfg(pvt->F2, offset, val);
154 }
155 
156 /*
157  * Memory scrubber control interface. For K8, memory scrubbing is handled by
158  * hardware and can involve L2 cache, dcache as well as the main memory. With
159  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
160  * functionality.
161  *
162  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
163  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
164  * bytes/sec for the setting.
165  *
166  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
167  * other archs, we might not have access to the caches directly.
168  */
169 
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
171 {
172 	/*
173 	 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
174 	 * are shifted down by 0x5, so scrubval 0x5 is written to the register
175 	 * as 0x0, scrubval 0x6 as 0x1, etc.
176 	 */
177 	if (scrubval >= 0x5 && scrubval <= 0x14) {
178 		scrubval -= 0x5;
179 		pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
181 	} else {
182 		pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
183 	}
184 }
185 /*
186  * Scan the scrub rate mapping table for a close or matching bandwidth value to
187  * issue. If requested is too big, then use last maximum value found.
188  */
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
190 {
191 	u32 scrubval;
192 	int i;
193 
194 	/*
195 	 * map the configured rate (new_bw) to a value specific to the AMD64
196 	 * memory controller and apply to register. Search for the first
197 	 * bandwidth entry that is greater or equal than the setting requested
198 	 * and program that. If at last entry, turn off DRAM scrubbing.
199 	 *
200 	 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
201 	 * by falling back to the last element in scrubrates[].
202 	 */
203 	for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
204 		/*
205 		 * skip scrub rates which aren't recommended
206 		 * (see F10 BKDG, F3x58)
207 		 */
208 		if (scrubrates[i].scrubval < min_rate)
209 			continue;
210 
211 		if (scrubrates[i].bandwidth <= new_bw)
212 			break;
213 	}
214 
215 	scrubval = scrubrates[i].scrubval;
216 
217 	if (pvt->umc) {
218 		__f17h_set_scrubval(pvt, scrubval);
219 	} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 		f15h_select_dct(pvt, 0);
221 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 		f15h_select_dct(pvt, 1);
223 		pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
224 	} else {
225 		pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
226 	}
227 
228 	if (scrubval)
229 		return scrubrates[i].bandwidth;
230 
231 	return 0;
232 }
233 
234 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
235 {
236 	struct amd64_pvt *pvt = mci->pvt_info;
237 	u32 min_scrubrate = 0x5;
238 
239 	if (pvt->fam == 0xf)
240 		min_scrubrate = 0x0;
241 
242 	if (pvt->fam == 0x15) {
243 		/* Erratum #505 */
244 		if (pvt->model < 0x10)
245 			f15h_select_dct(pvt, 0);
246 
247 		if (pvt->model == 0x60)
248 			min_scrubrate = 0x6;
249 	}
250 	return __set_scrub_rate(pvt, bw, min_scrubrate);
251 }
252 
253 static int get_scrub_rate(struct mem_ctl_info *mci)
254 {
255 	struct amd64_pvt *pvt = mci->pvt_info;
256 	int i, retval = -EINVAL;
257 	u32 scrubval = 0;
258 
259 	if (pvt->umc) {
260 		amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
261 		if (scrubval & BIT(0)) {
262 			amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
263 			scrubval &= 0xF;
264 			scrubval += 0x5;
265 		} else {
266 			scrubval = 0;
267 		}
268 	} else if (pvt->fam == 0x15) {
269 		/* Erratum #505 */
270 		if (pvt->model < 0x10)
271 			f15h_select_dct(pvt, 0);
272 
273 		if (pvt->model == 0x60)
274 			amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
275 		else
276 			amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
277 	} else {
278 		amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
279 	}
280 
281 	scrubval = scrubval & 0x001F;
282 
283 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
284 		if (scrubrates[i].scrubval == scrubval) {
285 			retval = scrubrates[i].bandwidth;
286 			break;
287 		}
288 	}
289 	return retval;
290 }
291 
292 /*
293  * returns true if the SysAddr given by sys_addr matches the
294  * DRAM base/limit associated with node_id
295  */
296 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
297 {
298 	u64 addr;
299 
300 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
301 	 * all ones if the most significant implemented address bit is 1.
302 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
303 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
304 	 * Application Programming.
305 	 */
306 	addr = sys_addr & 0x000000ffffffffffull;
307 
308 	return ((addr >= get_dram_base(pvt, nid)) &&
309 		(addr <= get_dram_limit(pvt, nid)));
310 }
311 
312 /*
313  * Attempt to map a SysAddr to a node. On success, return a pointer to the
314  * mem_ctl_info structure for the node that the SysAddr maps to.
315  *
316  * On failure, return NULL.
317  */
318 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
319 						u64 sys_addr)
320 {
321 	struct amd64_pvt *pvt;
322 	u8 node_id;
323 	u32 intlv_en, bits;
324 
325 	/*
326 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
327 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
328 	 */
329 	pvt = mci->pvt_info;
330 
331 	/*
332 	 * The value of this field should be the same for all DRAM Base
333 	 * registers.  Therefore we arbitrarily choose to read it from the
334 	 * register for node 0.
335 	 */
336 	intlv_en = dram_intlv_en(pvt, 0);
337 
338 	if (intlv_en == 0) {
339 		for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
340 			if (base_limit_match(pvt, sys_addr, node_id))
341 				goto found;
342 		}
343 		goto err_no_match;
344 	}
345 
346 	if (unlikely((intlv_en != 0x01) &&
347 		     (intlv_en != 0x03) &&
348 		     (intlv_en != 0x07))) {
349 		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
350 		return NULL;
351 	}
352 
353 	bits = (((u32) sys_addr) >> 12) & intlv_en;
354 
355 	for (node_id = 0; ; ) {
356 		if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
357 			break;	/* intlv_sel field matches */
358 
359 		if (++node_id >= DRAM_RANGES)
360 			goto err_no_match;
361 	}
362 
363 	/* sanity test for sys_addr */
364 	if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
365 		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
366 			   "range for node %d with node interleaving enabled.\n",
367 			   __func__, sys_addr, node_id);
368 		return NULL;
369 	}
370 
371 found:
372 	return edac_mc_find((int)node_id);
373 
374 err_no_match:
375 	edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
376 		 (unsigned long)sys_addr);
377 
378 	return NULL;
379 }
380 
381 /*
382  * compute the CS base address of the @csrow on the DRAM controller @dct.
383  * For details see F2x[5C:40] in the processor's BKDG
384  */
385 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
386 				 u64 *base, u64 *mask)
387 {
388 	u64 csbase, csmask, base_bits, mask_bits;
389 	u8 addr_shift;
390 
391 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
392 		csbase		= pvt->csels[dct].csbases[csrow];
393 		csmask		= pvt->csels[dct].csmasks[csrow];
394 		base_bits	= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
395 		mask_bits	= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
396 		addr_shift	= 4;
397 
398 	/*
399 	 * F16h and F15h, models 30h and later need two addr_shift values:
400 	 * 8 for high and 6 for low (cf. F16h BKDG).
401 	 */
402 	} else if (pvt->fam == 0x16 ||
403 		  (pvt->fam == 0x15 && pvt->model >= 0x30)) {
404 		csbase          = pvt->csels[dct].csbases[csrow];
405 		csmask          = pvt->csels[dct].csmasks[csrow >> 1];
406 
407 		*base  = (csbase & GENMASK_ULL(15,  5)) << 6;
408 		*base |= (csbase & GENMASK_ULL(30, 19)) << 8;
409 
410 		*mask = ~0ULL;
411 		/* poke holes for the csmask */
412 		*mask &= ~((GENMASK_ULL(15, 5)  << 6) |
413 			   (GENMASK_ULL(30, 19) << 8));
414 
415 		*mask |= (csmask & GENMASK_ULL(15, 5))  << 6;
416 		*mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
417 
418 		return;
419 	} else {
420 		csbase		= pvt->csels[dct].csbases[csrow];
421 		csmask		= pvt->csels[dct].csmasks[csrow >> 1];
422 		addr_shift	= 8;
423 
424 		if (pvt->fam == 0x15)
425 			base_bits = mask_bits =
426 				GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
427 		else
428 			base_bits = mask_bits =
429 				GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
430 	}
431 
432 	*base  = (csbase & base_bits) << addr_shift;
433 
434 	*mask  = ~0ULL;
435 	/* poke holes for the csmask */
436 	*mask &= ~(mask_bits << addr_shift);
437 	/* OR them in */
438 	*mask |= (csmask & mask_bits) << addr_shift;
439 }
440 
441 #define for_each_chip_select(i, dct, pvt) \
442 	for (i = 0; i < pvt->csels[dct].b_cnt; i++)
443 
444 #define chip_select_base(i, dct, pvt) \
445 	pvt->csels[dct].csbases[i]
446 
447 #define for_each_chip_select_mask(i, dct, pvt) \
448 	for (i = 0; i < pvt->csels[dct].m_cnt; i++)
449 
450 #define for_each_umc(i) \
451 	for (i = 0; i < fam_type->max_mcs; i++)
452 
453 /*
454  * @input_addr is an InputAddr associated with the node given by mci. Return the
455  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
456  */
457 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
458 {
459 	struct amd64_pvt *pvt;
460 	int csrow;
461 	u64 base, mask;
462 
463 	pvt = mci->pvt_info;
464 
465 	for_each_chip_select(csrow, 0, pvt) {
466 		if (!csrow_enabled(csrow, 0, pvt))
467 			continue;
468 
469 		get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
470 
471 		mask = ~mask;
472 
473 		if ((input_addr & mask) == (base & mask)) {
474 			edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
475 				 (unsigned long)input_addr, csrow,
476 				 pvt->mc_node_id);
477 
478 			return csrow;
479 		}
480 	}
481 	edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
482 		 (unsigned long)input_addr, pvt->mc_node_id);
483 
484 	return -1;
485 }
486 
487 /*
488  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
489  * for the node represented by mci. Info is passed back in *hole_base,
490  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
491  * info is invalid. Info may be invalid for either of the following reasons:
492  *
493  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
494  *   Address Register does not exist.
495  *
496  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
497  *   indicating that its contents are not valid.
498  *
499  * The values passed back in *hole_base, *hole_offset, and *hole_size are
500  * complete 32-bit values despite the fact that the bitfields in the DHAR
501  * only represent bits 31-24 of the base and offset values.
502  */
503 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
504 			     u64 *hole_offset, u64 *hole_size)
505 {
506 	struct amd64_pvt *pvt = mci->pvt_info;
507 
508 	/* only revE and later have the DRAM Hole Address Register */
509 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
510 		edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
511 			 pvt->ext_model, pvt->mc_node_id);
512 		return 1;
513 	}
514 
515 	/* valid for Fam10h and above */
516 	if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
517 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
518 		return 1;
519 	}
520 
521 	if (!dhar_valid(pvt)) {
522 		edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this node %d\n",
523 			 pvt->mc_node_id);
524 		return 1;
525 	}
526 
527 	/* This node has Memory Hoisting */
528 
529 	/* +------------------+--------------------+--------------------+-----
530 	 * | memory           | DRAM hole          | relocated          |
531 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
532 	 * |                  |                    | DRAM hole          |
533 	 * |                  |                    | [0x100000000,      |
534 	 * |                  |                    |  (0x100000000+     |
535 	 * |                  |                    |   (0xffffffff-x))] |
536 	 * +------------------+--------------------+--------------------+-----
537 	 *
538 	 * Above is a diagram of physical memory showing the DRAM hole and the
539 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
540 	 * starts at address x (the base address) and extends through address
541 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
542 	 * addresses in the hole so that they start at 0x100000000.
543 	 */
544 
545 	*hole_base = dhar_base(pvt);
546 	*hole_size = (1ULL << 32) - *hole_base;
547 
548 	*hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
549 					: k8_dhar_offset(pvt);
550 
551 	edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
552 		 pvt->mc_node_id, (unsigned long)*hole_base,
553 		 (unsigned long)*hole_offset, (unsigned long)*hole_size);
554 
555 	return 0;
556 }
557 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
558 
559 /*
560  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
561  * assumed that sys_addr maps to the node given by mci.
562  *
563  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
564  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
565  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
566  * then it is also involved in translating a SysAddr to a DramAddr. Sections
567  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
568  * These parts of the documentation are unclear. I interpret them as follows:
569  *
570  * When node n receives a SysAddr, it processes the SysAddr as follows:
571  *
572  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
573  *    Limit registers for node n. If the SysAddr is not within the range
574  *    specified by the base and limit values, then node n ignores the Sysaddr
575  *    (since it does not map to node n). Otherwise continue to step 2 below.
576  *
577  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
578  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
579  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
580  *    hole. If not, skip to step 3 below. Else get the value of the
581  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
582  *    offset defined by this value from the SysAddr.
583  *
584  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
585  *    Base register for node n. To obtain the DramAddr, subtract the base
586  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
587  */
588 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
589 {
590 	struct amd64_pvt *pvt = mci->pvt_info;
591 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
592 	int ret;
593 
594 	dram_base = get_dram_base(pvt, pvt->mc_node_id);
595 
596 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
597 				      &hole_size);
598 	if (!ret) {
599 		if ((sys_addr >= (1ULL << 32)) &&
600 		    (sys_addr < ((1ULL << 32) + hole_size))) {
601 			/* use DHAR to translate SysAddr to DramAddr */
602 			dram_addr = sys_addr - hole_offset;
603 
604 			edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
605 				 (unsigned long)sys_addr,
606 				 (unsigned long)dram_addr);
607 
608 			return dram_addr;
609 		}
610 	}
611 
612 	/*
613 	 * Translate the SysAddr to a DramAddr as shown near the start of
614 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
615 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
616 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
617 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
618 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
619 	 * Programmer's Manual Volume 1 Application Programming.
620 	 */
621 	dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
622 
623 	edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
624 		 (unsigned long)sys_addr, (unsigned long)dram_addr);
625 	return dram_addr;
626 }
627 
628 /*
629  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
630  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
631  * for node interleaving.
632  */
633 static int num_node_interleave_bits(unsigned intlv_en)
634 {
635 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
636 	int n;
637 
638 	BUG_ON(intlv_en > 7);
639 	n = intlv_shift_table[intlv_en];
640 	return n;
641 }
642 
643 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
644 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
645 {
646 	struct amd64_pvt *pvt;
647 	int intlv_shift;
648 	u64 input_addr;
649 
650 	pvt = mci->pvt_info;
651 
652 	/*
653 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
654 	 * concerning translating a DramAddr to an InputAddr.
655 	 */
656 	intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
657 	input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
658 		      (dram_addr & 0xfff);
659 
660 	edac_dbg(2, "  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
661 		 intlv_shift, (unsigned long)dram_addr,
662 		 (unsigned long)input_addr);
663 
664 	return input_addr;
665 }
666 
667 /*
668  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
669  * assumed that @sys_addr maps to the node given by mci.
670  */
671 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
672 {
673 	u64 input_addr;
674 
675 	input_addr =
676 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
677 
678 	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
679 		 (unsigned long)sys_addr, (unsigned long)input_addr);
680 
681 	return input_addr;
682 }
683 
684 /* Map the Error address to a PAGE and PAGE OFFSET. */
685 static inline void error_address_to_page_and_offset(u64 error_address,
686 						    struct err_info *err)
687 {
688 	err->page = (u32) (error_address >> PAGE_SHIFT);
689 	err->offset = ((u32) error_address) & ~PAGE_MASK;
690 }
691 
692 /*
693  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
694  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
695  * of a node that detected an ECC memory error.  mci represents the node that
696  * the error address maps to (possibly different from the node that detected
697  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
698  * error.
699  */
700 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
701 {
702 	int csrow;
703 
704 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
705 
706 	if (csrow == -1)
707 		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
708 				  "address 0x%lx\n", (unsigned long)sys_addr);
709 	return csrow;
710 }
711 
712 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
713 
714 /*
715  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
716  * are ECC capable.
717  */
718 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
719 {
720 	unsigned long edac_cap = EDAC_FLAG_NONE;
721 	u8 bit;
722 
723 	if (pvt->umc) {
724 		u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
725 
726 		for_each_umc(i) {
727 			if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
728 				continue;
729 
730 			umc_en_mask |= BIT(i);
731 
732 			/* UMC Configuration bit 12 (DimmEccEn) */
733 			if (pvt->umc[i].umc_cfg & BIT(12))
734 				dimm_ecc_en_mask |= BIT(i);
735 		}
736 
737 		if (umc_en_mask == dimm_ecc_en_mask)
738 			edac_cap = EDAC_FLAG_SECDED;
739 	} else {
740 		bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
741 			? 19
742 			: 17;
743 
744 		if (pvt->dclr0 & BIT(bit))
745 			edac_cap = EDAC_FLAG_SECDED;
746 	}
747 
748 	return edac_cap;
749 }
750 
751 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
752 
753 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
754 {
755 	edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
756 
757 	if (pvt->dram_type == MEM_LRDDR3) {
758 		u32 dcsm = pvt->csels[chan].csmasks[0];
759 		/*
760 		 * It's assumed all LRDIMMs in a DCT are going to be of
761 		 * same 'type' until proven otherwise. So, use a cs
762 		 * value of '0' here to get dcsm value.
763 		 */
764 		edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
765 	}
766 
767 	edac_dbg(1, "All DIMMs support ECC:%s\n",
768 		    (dclr & BIT(19)) ? "yes" : "no");
769 
770 
771 	edac_dbg(1, "  PAR/ERR parity: %s\n",
772 		 (dclr & BIT(8)) ?  "enabled" : "disabled");
773 
774 	if (pvt->fam == 0x10)
775 		edac_dbg(1, "  DCT 128bit mode width: %s\n",
776 			 (dclr & BIT(11)) ?  "128b" : "64b");
777 
778 	edac_dbg(1, "  x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
779 		 (dclr & BIT(12)) ?  "yes" : "no",
780 		 (dclr & BIT(13)) ?  "yes" : "no",
781 		 (dclr & BIT(14)) ?  "yes" : "no",
782 		 (dclr & BIT(15)) ?  "yes" : "no");
783 }
784 
785 #define CS_EVEN_PRIMARY		BIT(0)
786 #define CS_ODD_PRIMARY		BIT(1)
787 #define CS_EVEN_SECONDARY	BIT(2)
788 #define CS_ODD_SECONDARY	BIT(3)
789 
790 #define CS_EVEN			(CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
791 #define CS_ODD			(CS_ODD_PRIMARY | CS_ODD_SECONDARY)
792 
793 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
794 {
795 	int cs_mode = 0;
796 
797 	if (csrow_enabled(2 * dimm, ctrl, pvt))
798 		cs_mode |= CS_EVEN_PRIMARY;
799 
800 	if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
801 		cs_mode |= CS_ODD_PRIMARY;
802 
803 	/* Asymmetric dual-rank DIMM support. */
804 	if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
805 		cs_mode |= CS_ODD_SECONDARY;
806 
807 	return cs_mode;
808 }
809 
810 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
811 {
812 	int dimm, size0, size1, cs0, cs1, cs_mode;
813 
814 	edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
815 
816 	for (dimm = 0; dimm < 2; dimm++) {
817 		cs0 = dimm * 2;
818 		cs1 = dimm * 2 + 1;
819 
820 		cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
821 
822 		size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
823 		size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
824 
825 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
826 				cs0,	size0,
827 				cs1,	size1);
828 	}
829 }
830 
831 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
832 {
833 	struct amd64_umc *umc;
834 	u32 i, tmp, umc_base;
835 
836 	for_each_umc(i) {
837 		umc_base = get_umc_base(i);
838 		umc = &pvt->umc[i];
839 
840 		edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
841 		edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
842 		edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
843 		edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
844 
845 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
846 		edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
847 
848 		amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
849 		edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
850 		edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
851 
852 		edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
853 				i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
854 				    (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
855 		edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
856 				i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
857 		edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
858 				i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
859 		edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
860 				i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
861 
862 		if (pvt->dram_type == MEM_LRDDR4) {
863 			amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
864 			edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
865 					i, 1 << ((tmp >> 4) & 0x3));
866 		}
867 
868 		debug_display_dimm_sizes_df(pvt, i);
869 	}
870 
871 	edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
872 		 pvt->dhar, dhar_base(pvt));
873 }
874 
875 /* Display and decode various NB registers for debug purposes. */
876 static void __dump_misc_regs(struct amd64_pvt *pvt)
877 {
878 	edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
879 
880 	edac_dbg(1, "  NB two channel DRAM capable: %s\n",
881 		 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
882 
883 	edac_dbg(1, "  ECC capable: %s, ChipKill ECC capable: %s\n",
884 		 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
885 		 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
886 
887 	debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
888 
889 	edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
890 
891 	edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
892 		 pvt->dhar, dhar_base(pvt),
893 		 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
894 				   : f10_dhar_offset(pvt));
895 
896 	debug_display_dimm_sizes(pvt, 0);
897 
898 	/* everything below this point is Fam10h and above */
899 	if (pvt->fam == 0xf)
900 		return;
901 
902 	debug_display_dimm_sizes(pvt, 1);
903 
904 	/* Only if NOT ganged does dclr1 have valid info */
905 	if (!dct_ganging_enabled(pvt))
906 		debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
907 }
908 
909 /* Display and decode various NB registers for debug purposes. */
910 static void dump_misc_regs(struct amd64_pvt *pvt)
911 {
912 	if (pvt->umc)
913 		__dump_misc_regs_df(pvt);
914 	else
915 		__dump_misc_regs(pvt);
916 
917 	edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
918 
919 	amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
920 }
921 
922 /*
923  * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
924  */
925 static void prep_chip_selects(struct amd64_pvt *pvt)
926 {
927 	if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
928 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
929 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
930 	} else if (pvt->fam == 0x15 && pvt->model == 0x30) {
931 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
932 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
933 	} else if (pvt->fam >= 0x17) {
934 		int umc;
935 
936 		for_each_umc(umc) {
937 			pvt->csels[umc].b_cnt = 4;
938 			pvt->csels[umc].m_cnt = 2;
939 		}
940 
941 	} else {
942 		pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
943 		pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
944 	}
945 }
946 
947 static void read_umc_base_mask(struct amd64_pvt *pvt)
948 {
949 	u32 umc_base_reg, umc_base_reg_sec;
950 	u32 umc_mask_reg, umc_mask_reg_sec;
951 	u32 base_reg, base_reg_sec;
952 	u32 mask_reg, mask_reg_sec;
953 	u32 *base, *base_sec;
954 	u32 *mask, *mask_sec;
955 	int cs, umc;
956 
957 	for_each_umc(umc) {
958 		umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
959 		umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
960 
961 		for_each_chip_select(cs, umc, pvt) {
962 			base = &pvt->csels[umc].csbases[cs];
963 			base_sec = &pvt->csels[umc].csbases_sec[cs];
964 
965 			base_reg = umc_base_reg + (cs * 4);
966 			base_reg_sec = umc_base_reg_sec + (cs * 4);
967 
968 			if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
969 				edac_dbg(0, "  DCSB%d[%d]=0x%08x reg: 0x%x\n",
970 					 umc, cs, *base, base_reg);
971 
972 			if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
973 				edac_dbg(0, "    DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
974 					 umc, cs, *base_sec, base_reg_sec);
975 		}
976 
977 		umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
978 		umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
979 
980 		for_each_chip_select_mask(cs, umc, pvt) {
981 			mask = &pvt->csels[umc].csmasks[cs];
982 			mask_sec = &pvt->csels[umc].csmasks_sec[cs];
983 
984 			mask_reg = umc_mask_reg + (cs * 4);
985 			mask_reg_sec = umc_mask_reg_sec + (cs * 4);
986 
987 			if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
988 				edac_dbg(0, "  DCSM%d[%d]=0x%08x reg: 0x%x\n",
989 					 umc, cs, *mask, mask_reg);
990 
991 			if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
992 				edac_dbg(0, "    DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
993 					 umc, cs, *mask_sec, mask_reg_sec);
994 		}
995 	}
996 }
997 
998 /*
999  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1000  */
1001 static void read_dct_base_mask(struct amd64_pvt *pvt)
1002 {
1003 	int cs;
1004 
1005 	prep_chip_selects(pvt);
1006 
1007 	if (pvt->umc)
1008 		return read_umc_base_mask(pvt);
1009 
1010 	for_each_chip_select(cs, 0, pvt) {
1011 		int reg0   = DCSB0 + (cs * 4);
1012 		int reg1   = DCSB1 + (cs * 4);
1013 		u32 *base0 = &pvt->csels[0].csbases[cs];
1014 		u32 *base1 = &pvt->csels[1].csbases[cs];
1015 
1016 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1017 			edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
1018 				 cs, *base0, reg0);
1019 
1020 		if (pvt->fam == 0xf)
1021 			continue;
1022 
1023 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1024 			edac_dbg(0, "  DCSB1[%d]=0x%08x reg: F2x%x\n",
1025 				 cs, *base1, (pvt->fam == 0x10) ? reg1
1026 							: reg0);
1027 	}
1028 
1029 	for_each_chip_select_mask(cs, 0, pvt) {
1030 		int reg0   = DCSM0 + (cs * 4);
1031 		int reg1   = DCSM1 + (cs * 4);
1032 		u32 *mask0 = &pvt->csels[0].csmasks[cs];
1033 		u32 *mask1 = &pvt->csels[1].csmasks[cs];
1034 
1035 		if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1036 			edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
1037 				 cs, *mask0, reg0);
1038 
1039 		if (pvt->fam == 0xf)
1040 			continue;
1041 
1042 		if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1043 			edac_dbg(0, "    DCSM1[%d]=0x%08x reg: F2x%x\n",
1044 				 cs, *mask1, (pvt->fam == 0x10) ? reg1
1045 							: reg0);
1046 	}
1047 }
1048 
1049 static void determine_memory_type(struct amd64_pvt *pvt)
1050 {
1051 	u32 dram_ctrl, dcsm;
1052 
1053 	if (pvt->umc) {
1054 		if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1055 			pvt->dram_type = MEM_LRDDR4;
1056 		else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1057 			pvt->dram_type = MEM_RDDR4;
1058 		else
1059 			pvt->dram_type = MEM_DDR4;
1060 		return;
1061 	}
1062 
1063 	switch (pvt->fam) {
1064 	case 0xf:
1065 		if (pvt->ext_model >= K8_REV_F)
1066 			goto ddr3;
1067 
1068 		pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1069 		return;
1070 
1071 	case 0x10:
1072 		if (pvt->dchr0 & DDR3_MODE)
1073 			goto ddr3;
1074 
1075 		pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1076 		return;
1077 
1078 	case 0x15:
1079 		if (pvt->model < 0x60)
1080 			goto ddr3;
1081 
1082 		/*
1083 		 * Model 0x60h needs special handling:
1084 		 *
1085 		 * We use a Chip Select value of '0' to obtain dcsm.
1086 		 * Theoretically, it is possible to populate LRDIMMs of different
1087 		 * 'Rank' value on a DCT. But this is not the common case. So,
1088 		 * it's reasonable to assume all DIMMs are going to be of same
1089 		 * 'type' until proven otherwise.
1090 		 */
1091 		amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1092 		dcsm = pvt->csels[0].csmasks[0];
1093 
1094 		if (((dram_ctrl >> 8) & 0x7) == 0x2)
1095 			pvt->dram_type = MEM_DDR4;
1096 		else if (pvt->dclr0 & BIT(16))
1097 			pvt->dram_type = MEM_DDR3;
1098 		else if (dcsm & 0x3)
1099 			pvt->dram_type = MEM_LRDDR3;
1100 		else
1101 			pvt->dram_type = MEM_RDDR3;
1102 
1103 		return;
1104 
1105 	case 0x16:
1106 		goto ddr3;
1107 
1108 	default:
1109 		WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1110 		pvt->dram_type = MEM_EMPTY;
1111 	}
1112 	return;
1113 
1114 ddr3:
1115 	pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1116 }
1117 
1118 /* Get the number of DCT channels the memory controller is using. */
1119 static int k8_early_channel_count(struct amd64_pvt *pvt)
1120 {
1121 	int flag;
1122 
1123 	if (pvt->ext_model >= K8_REV_F)
1124 		/* RevF (NPT) and later */
1125 		flag = pvt->dclr0 & WIDTH_128;
1126 	else
1127 		/* RevE and earlier */
1128 		flag = pvt->dclr0 & REVE_WIDTH_128;
1129 
1130 	/* not used */
1131 	pvt->dclr1 = 0;
1132 
1133 	return (flag) ? 2 : 1;
1134 }
1135 
1136 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1137 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1138 {
1139 	u16 mce_nid = topology_die_id(m->extcpu);
1140 	struct mem_ctl_info *mci;
1141 	u8 start_bit = 1;
1142 	u8 end_bit   = 47;
1143 	u64 addr;
1144 
1145 	mci = edac_mc_find(mce_nid);
1146 	if (!mci)
1147 		return 0;
1148 
1149 	pvt = mci->pvt_info;
1150 
1151 	if (pvt->fam == 0xf) {
1152 		start_bit = 3;
1153 		end_bit   = 39;
1154 	}
1155 
1156 	addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1157 
1158 	/*
1159 	 * Erratum 637 workaround
1160 	 */
1161 	if (pvt->fam == 0x15) {
1162 		u64 cc6_base, tmp_addr;
1163 		u32 tmp;
1164 		u8 intlv_en;
1165 
1166 		if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1167 			return addr;
1168 
1169 
1170 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1171 		intlv_en = tmp >> 21 & 0x7;
1172 
1173 		/* add [47:27] + 3 trailing bits */
1174 		cc6_base  = (tmp & GENMASK_ULL(20, 0)) << 3;
1175 
1176 		/* reverse and add DramIntlvEn */
1177 		cc6_base |= intlv_en ^ 0x7;
1178 
1179 		/* pin at [47:24] */
1180 		cc6_base <<= 24;
1181 
1182 		if (!intlv_en)
1183 			return cc6_base | (addr & GENMASK_ULL(23, 0));
1184 
1185 		amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1186 
1187 							/* faster log2 */
1188 		tmp_addr  = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1189 
1190 		/* OR DramIntlvSel into bits [14:12] */
1191 		tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1192 
1193 		/* add remaining [11:0] bits from original MC4_ADDR */
1194 		tmp_addr |= addr & GENMASK_ULL(11, 0);
1195 
1196 		return cc6_base | tmp_addr;
1197 	}
1198 
1199 	return addr;
1200 }
1201 
1202 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1203 						unsigned int device,
1204 						struct pci_dev *related)
1205 {
1206 	struct pci_dev *dev = NULL;
1207 
1208 	while ((dev = pci_get_device(vendor, device, dev))) {
1209 		if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1210 		    (dev->bus->number == related->bus->number) &&
1211 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1212 			break;
1213 	}
1214 
1215 	return dev;
1216 }
1217 
1218 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1219 {
1220 	struct amd_northbridge *nb;
1221 	struct pci_dev *f1 = NULL;
1222 	unsigned int pci_func;
1223 	int off = range << 3;
1224 	u32 llim;
1225 
1226 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
1227 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1228 
1229 	if (pvt->fam == 0xf)
1230 		return;
1231 
1232 	if (!dram_rw(pvt, range))
1233 		return;
1234 
1235 	amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
1236 	amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1237 
1238 	/* F15h: factor in CC6 save area by reading dst node's limit reg */
1239 	if (pvt->fam != 0x15)
1240 		return;
1241 
1242 	nb = node_to_amd_nb(dram_dst_node(pvt, range));
1243 	if (WARN_ON(!nb))
1244 		return;
1245 
1246 	if (pvt->model == 0x60)
1247 		pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1248 	else if (pvt->model == 0x30)
1249 		pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1250 	else
1251 		pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1252 
1253 	f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1254 	if (WARN_ON(!f1))
1255 		return;
1256 
1257 	amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1258 
1259 	pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1260 
1261 				    /* {[39:27],111b} */
1262 	pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1263 
1264 	pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1265 
1266 				    /* [47:40] */
1267 	pvt->ranges[range].lim.hi |= llim >> 13;
1268 
1269 	pci_dev_put(f1);
1270 }
1271 
1272 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1273 				    struct err_info *err)
1274 {
1275 	struct amd64_pvt *pvt = mci->pvt_info;
1276 
1277 	error_address_to_page_and_offset(sys_addr, err);
1278 
1279 	/*
1280 	 * Find out which node the error address belongs to. This may be
1281 	 * different from the node that detected the error.
1282 	 */
1283 	err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1284 	if (!err->src_mci) {
1285 		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1286 			     (unsigned long)sys_addr);
1287 		err->err_code = ERR_NODE;
1288 		return;
1289 	}
1290 
1291 	/* Now map the sys_addr to a CSROW */
1292 	err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1293 	if (err->csrow < 0) {
1294 		err->err_code = ERR_CSROW;
1295 		return;
1296 	}
1297 
1298 	/* CHIPKILL enabled */
1299 	if (pvt->nbcfg & NBCFG_CHIPKILL) {
1300 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1301 		if (err->channel < 0) {
1302 			/*
1303 			 * Syndrome didn't map, so we don't know which of the
1304 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1305 			 * as suspect.
1306 			 */
1307 			amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1308 				      "possible error reporting race\n",
1309 				      err->syndrome);
1310 			err->err_code = ERR_CHANNEL;
1311 			return;
1312 		}
1313 	} else {
1314 		/*
1315 		 * non-chipkill ecc mode
1316 		 *
1317 		 * The k8 documentation is unclear about how to determine the
1318 		 * channel number when using non-chipkill memory.  This method
1319 		 * was obtained from email communication with someone at AMD.
1320 		 * (Wish the email was placed in this comment - norsk)
1321 		 */
1322 		err->channel = ((sys_addr & BIT(3)) != 0);
1323 	}
1324 }
1325 
1326 static int ddr2_cs_size(unsigned i, bool dct_width)
1327 {
1328 	unsigned shift = 0;
1329 
1330 	if (i <= 2)
1331 		shift = i;
1332 	else if (!(i & 0x1))
1333 		shift = i >> 1;
1334 	else
1335 		shift = (i + 1) >> 1;
1336 
1337 	return 128 << (shift + !!dct_width);
1338 }
1339 
1340 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1341 				  unsigned cs_mode, int cs_mask_nr)
1342 {
1343 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1344 
1345 	if (pvt->ext_model >= K8_REV_F) {
1346 		WARN_ON(cs_mode > 11);
1347 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1348 	}
1349 	else if (pvt->ext_model >= K8_REV_D) {
1350 		unsigned diff;
1351 		WARN_ON(cs_mode > 10);
1352 
1353 		/*
1354 		 * the below calculation, besides trying to win an obfuscated C
1355 		 * contest, maps cs_mode values to DIMM chip select sizes. The
1356 		 * mappings are:
1357 		 *
1358 		 * cs_mode	CS size (mb)
1359 		 * =======	============
1360 		 * 0		32
1361 		 * 1		64
1362 		 * 2		128
1363 		 * 3		128
1364 		 * 4		256
1365 		 * 5		512
1366 		 * 6		256
1367 		 * 7		512
1368 		 * 8		1024
1369 		 * 9		1024
1370 		 * 10		2048
1371 		 *
1372 		 * Basically, it calculates a value with which to shift the
1373 		 * smallest CS size of 32MB.
1374 		 *
1375 		 * ddr[23]_cs_size have a similar purpose.
1376 		 */
1377 		diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1378 
1379 		return 32 << (cs_mode - diff);
1380 	}
1381 	else {
1382 		WARN_ON(cs_mode > 6);
1383 		return 32 << cs_mode;
1384 	}
1385 }
1386 
1387 /*
1388  * Get the number of DCT channels in use.
1389  *
1390  * Return:
1391  *	number of Memory Channels in operation
1392  * Pass back:
1393  *	contents of the DCL0_LOW register
1394  */
1395 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1396 {
1397 	int i, j, channels = 0;
1398 
1399 	/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1400 	if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1401 		return 2;
1402 
1403 	/*
1404 	 * Need to check if in unganged mode: In such, there are 2 channels,
1405 	 * but they are not in 128 bit mode and thus the above 'dclr0' status
1406 	 * bit will be OFF.
1407 	 *
1408 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1409 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1410 	 */
1411 	edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1412 
1413 	/*
1414 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1415 	 * is more than just one DIMM present in unganged mode. Need to check
1416 	 * both controllers since DIMMs can be placed in either one.
1417 	 */
1418 	for (i = 0; i < 2; i++) {
1419 		u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1420 
1421 		for (j = 0; j < 4; j++) {
1422 			if (DBAM_DIMM(j, dbam) > 0) {
1423 				channels++;
1424 				break;
1425 			}
1426 		}
1427 	}
1428 
1429 	if (channels > 2)
1430 		channels = 2;
1431 
1432 	amd64_info("MCT channel count: %d\n", channels);
1433 
1434 	return channels;
1435 }
1436 
1437 static int f17_early_channel_count(struct amd64_pvt *pvt)
1438 {
1439 	int i, channels = 0;
1440 
1441 	/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1442 	for_each_umc(i)
1443 		channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1444 
1445 	amd64_info("MCT channel count: %d\n", channels);
1446 
1447 	return channels;
1448 }
1449 
1450 static int ddr3_cs_size(unsigned i, bool dct_width)
1451 {
1452 	unsigned shift = 0;
1453 	int cs_size = 0;
1454 
1455 	if (i == 0 || i == 3 || i == 4)
1456 		cs_size = -1;
1457 	else if (i <= 2)
1458 		shift = i;
1459 	else if (i == 12)
1460 		shift = 7;
1461 	else if (!(i & 0x1))
1462 		shift = i >> 1;
1463 	else
1464 		shift = (i + 1) >> 1;
1465 
1466 	if (cs_size != -1)
1467 		cs_size = (128 * (1 << !!dct_width)) << shift;
1468 
1469 	return cs_size;
1470 }
1471 
1472 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1473 {
1474 	unsigned shift = 0;
1475 	int cs_size = 0;
1476 
1477 	if (i < 4 || i == 6)
1478 		cs_size = -1;
1479 	else if (i == 12)
1480 		shift = 7;
1481 	else if (!(i & 0x1))
1482 		shift = i >> 1;
1483 	else
1484 		shift = (i + 1) >> 1;
1485 
1486 	if (cs_size != -1)
1487 		cs_size = rank_multiply * (128 << shift);
1488 
1489 	return cs_size;
1490 }
1491 
1492 static int ddr4_cs_size(unsigned i)
1493 {
1494 	int cs_size = 0;
1495 
1496 	if (i == 0)
1497 		cs_size = -1;
1498 	else if (i == 1)
1499 		cs_size = 1024;
1500 	else
1501 		/* Min cs_size = 1G */
1502 		cs_size = 1024 * (1 << (i >> 1));
1503 
1504 	return cs_size;
1505 }
1506 
1507 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1508 				   unsigned cs_mode, int cs_mask_nr)
1509 {
1510 	u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1511 
1512 	WARN_ON(cs_mode > 11);
1513 
1514 	if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1515 		return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1516 	else
1517 		return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1518 }
1519 
1520 /*
1521  * F15h supports only 64bit DCT interfaces
1522  */
1523 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1524 				   unsigned cs_mode, int cs_mask_nr)
1525 {
1526 	WARN_ON(cs_mode > 12);
1527 
1528 	return ddr3_cs_size(cs_mode, false);
1529 }
1530 
1531 /* F15h M60h supports DDR4 mapping as well.. */
1532 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1533 					unsigned cs_mode, int cs_mask_nr)
1534 {
1535 	int cs_size;
1536 	u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1537 
1538 	WARN_ON(cs_mode > 12);
1539 
1540 	if (pvt->dram_type == MEM_DDR4) {
1541 		if (cs_mode > 9)
1542 			return -1;
1543 
1544 		cs_size = ddr4_cs_size(cs_mode);
1545 	} else if (pvt->dram_type == MEM_LRDDR3) {
1546 		unsigned rank_multiply = dcsm & 0xf;
1547 
1548 		if (rank_multiply == 3)
1549 			rank_multiply = 4;
1550 		cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1551 	} else {
1552 		/* Minimum cs size is 512mb for F15hM60h*/
1553 		if (cs_mode == 0x1)
1554 			return -1;
1555 
1556 		cs_size = ddr3_cs_size(cs_mode, false);
1557 	}
1558 
1559 	return cs_size;
1560 }
1561 
1562 /*
1563  * F16h and F15h model 30h have only limited cs_modes.
1564  */
1565 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1566 				unsigned cs_mode, int cs_mask_nr)
1567 {
1568 	WARN_ON(cs_mode > 12);
1569 
1570 	if (cs_mode == 6 || cs_mode == 8 ||
1571 	    cs_mode == 9 || cs_mode == 12)
1572 		return -1;
1573 	else
1574 		return ddr3_cs_size(cs_mode, false);
1575 }
1576 
1577 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1578 				    unsigned int cs_mode, int csrow_nr)
1579 {
1580 	u32 addr_mask_orig, addr_mask_deinterleaved;
1581 	u32 msb, weight, num_zero_bits;
1582 	int dimm, size = 0;
1583 
1584 	/* No Chip Selects are enabled. */
1585 	if (!cs_mode)
1586 		return size;
1587 
1588 	/* Requested size of an even CS but none are enabled. */
1589 	if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1590 		return size;
1591 
1592 	/* Requested size of an odd CS but none are enabled. */
1593 	if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1594 		return size;
1595 
1596 	/*
1597 	 * There is one mask per DIMM, and two Chip Selects per DIMM.
1598 	 *	CS0 and CS1 -> DIMM0
1599 	 *	CS2 and CS3 -> DIMM1
1600 	 */
1601 	dimm = csrow_nr >> 1;
1602 
1603 	/* Asymmetric dual-rank DIMM support. */
1604 	if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1605 		addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1606 	else
1607 		addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1608 
1609 	/*
1610 	 * The number of zero bits in the mask is equal to the number of bits
1611 	 * in a full mask minus the number of bits in the current mask.
1612 	 *
1613 	 * The MSB is the number of bits in the full mask because BIT[0] is
1614 	 * always 0.
1615 	 */
1616 	msb = fls(addr_mask_orig) - 1;
1617 	weight = hweight_long(addr_mask_orig);
1618 	num_zero_bits = msb - weight;
1619 
1620 	/* Take the number of zero bits off from the top of the mask. */
1621 	addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1622 
1623 	edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1624 	edac_dbg(1, "  Original AddrMask: 0x%x\n", addr_mask_orig);
1625 	edac_dbg(1, "  Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1626 
1627 	/* Register [31:1] = Address [39:9]. Size is in kBs here. */
1628 	size = (addr_mask_deinterleaved >> 2) + 1;
1629 
1630 	/* Return size in MBs. */
1631 	return size >> 10;
1632 }
1633 
1634 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1635 {
1636 
1637 	if (pvt->fam == 0xf)
1638 		return;
1639 
1640 	if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1641 		edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1642 			 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1643 
1644 		edac_dbg(0, "  DCTs operate in %s mode\n",
1645 			 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1646 
1647 		if (!dct_ganging_enabled(pvt))
1648 			edac_dbg(0, "  Address range split per DCT: %s\n",
1649 				 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1650 
1651 		edac_dbg(0, "  data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1652 			 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1653 			 (dct_memory_cleared(pvt) ? "yes" : "no"));
1654 
1655 		edac_dbg(0, "  channel interleave: %s, "
1656 			 "interleave bits selector: 0x%x\n",
1657 			 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1658 			 dct_sel_interleave_addr(pvt));
1659 	}
1660 
1661 	amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1662 }
1663 
1664 /*
1665  * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1666  * 2.10.12 Memory Interleaving Modes).
1667  */
1668 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1669 				     u8 intlv_en, int num_dcts_intlv,
1670 				     u32 dct_sel)
1671 {
1672 	u8 channel = 0;
1673 	u8 select;
1674 
1675 	if (!(intlv_en))
1676 		return (u8)(dct_sel);
1677 
1678 	if (num_dcts_intlv == 2) {
1679 		select = (sys_addr >> 8) & 0x3;
1680 		channel = select ? 0x3 : 0;
1681 	} else if (num_dcts_intlv == 4) {
1682 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1683 		switch (intlv_addr) {
1684 		case 0x4:
1685 			channel = (sys_addr >> 8) & 0x3;
1686 			break;
1687 		case 0x5:
1688 			channel = (sys_addr >> 9) & 0x3;
1689 			break;
1690 		}
1691 	}
1692 	return channel;
1693 }
1694 
1695 /*
1696  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1697  * Interleaving Modes.
1698  */
1699 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1700 				bool hi_range_sel, u8 intlv_en)
1701 {
1702 	u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1703 
1704 	if (dct_ganging_enabled(pvt))
1705 		return 0;
1706 
1707 	if (hi_range_sel)
1708 		return dct_sel_high;
1709 
1710 	/*
1711 	 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1712 	 */
1713 	if (dct_interleave_enabled(pvt)) {
1714 		u8 intlv_addr = dct_sel_interleave_addr(pvt);
1715 
1716 		/* return DCT select function: 0=DCT0, 1=DCT1 */
1717 		if (!intlv_addr)
1718 			return sys_addr >> 6 & 1;
1719 
1720 		if (intlv_addr & 0x2) {
1721 			u8 shift = intlv_addr & 0x1 ? 9 : 6;
1722 			u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1723 
1724 			return ((sys_addr >> shift) & 1) ^ temp;
1725 		}
1726 
1727 		if (intlv_addr & 0x4) {
1728 			u8 shift = intlv_addr & 0x1 ? 9 : 8;
1729 
1730 			return (sys_addr >> shift) & 1;
1731 		}
1732 
1733 		return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1734 	}
1735 
1736 	if (dct_high_range_enabled(pvt))
1737 		return ~dct_sel_high & 1;
1738 
1739 	return 0;
1740 }
1741 
1742 /* Convert the sys_addr to the normalized DCT address */
1743 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1744 				 u64 sys_addr, bool hi_rng,
1745 				 u32 dct_sel_base_addr)
1746 {
1747 	u64 chan_off;
1748 	u64 dram_base		= get_dram_base(pvt, range);
1749 	u64 hole_off		= f10_dhar_offset(pvt);
1750 	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1751 
1752 	if (hi_rng) {
1753 		/*
1754 		 * if
1755 		 * base address of high range is below 4Gb
1756 		 * (bits [47:27] at [31:11])
1757 		 * DRAM address space on this DCT is hoisted above 4Gb	&&
1758 		 * sys_addr > 4Gb
1759 		 *
1760 		 *	remove hole offset from sys_addr
1761 		 * else
1762 		 *	remove high range offset from sys_addr
1763 		 */
1764 		if ((!(dct_sel_base_addr >> 16) ||
1765 		     dct_sel_base_addr < dhar_base(pvt)) &&
1766 		    dhar_valid(pvt) &&
1767 		    (sys_addr >= BIT_64(32)))
1768 			chan_off = hole_off;
1769 		else
1770 			chan_off = dct_sel_base_off;
1771 	} else {
1772 		/*
1773 		 * if
1774 		 * we have a valid hole		&&
1775 		 * sys_addr > 4Gb
1776 		 *
1777 		 *	remove hole
1778 		 * else
1779 		 *	remove dram base to normalize to DCT address
1780 		 */
1781 		if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1782 			chan_off = hole_off;
1783 		else
1784 			chan_off = dram_base;
1785 	}
1786 
1787 	return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1788 }
1789 
1790 /*
1791  * checks if the csrow passed in is marked as SPARED, if so returns the new
1792  * spare row
1793  */
1794 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1795 {
1796 	int tmp_cs;
1797 
1798 	if (online_spare_swap_done(pvt, dct) &&
1799 	    csrow == online_spare_bad_dramcs(pvt, dct)) {
1800 
1801 		for_each_chip_select(tmp_cs, dct, pvt) {
1802 			if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1803 				csrow = tmp_cs;
1804 				break;
1805 			}
1806 		}
1807 	}
1808 	return csrow;
1809 }
1810 
1811 /*
1812  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1813  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1814  *
1815  * Return:
1816  *	-EINVAL:  NOT FOUND
1817  *	0..csrow = Chip-Select Row
1818  */
1819 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1820 {
1821 	struct mem_ctl_info *mci;
1822 	struct amd64_pvt *pvt;
1823 	u64 cs_base, cs_mask;
1824 	int cs_found = -EINVAL;
1825 	int csrow;
1826 
1827 	mci = edac_mc_find(nid);
1828 	if (!mci)
1829 		return cs_found;
1830 
1831 	pvt = mci->pvt_info;
1832 
1833 	edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1834 
1835 	for_each_chip_select(csrow, dct, pvt) {
1836 		if (!csrow_enabled(csrow, dct, pvt))
1837 			continue;
1838 
1839 		get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1840 
1841 		edac_dbg(1, "    CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1842 			 csrow, cs_base, cs_mask);
1843 
1844 		cs_mask = ~cs_mask;
1845 
1846 		edac_dbg(1, "    (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1847 			 (in_addr & cs_mask), (cs_base & cs_mask));
1848 
1849 		if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1850 			if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1851 				cs_found =  csrow;
1852 				break;
1853 			}
1854 			cs_found = f10_process_possible_spare(pvt, dct, csrow);
1855 
1856 			edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1857 			break;
1858 		}
1859 	}
1860 	return cs_found;
1861 }
1862 
1863 /*
1864  * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1865  * swapped with a region located at the bottom of memory so that the GPU can use
1866  * the interleaved region and thus two channels.
1867  */
1868 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1869 {
1870 	u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1871 
1872 	if (pvt->fam == 0x10) {
1873 		/* only revC3 and revE have that feature */
1874 		if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1875 			return sys_addr;
1876 	}
1877 
1878 	amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1879 
1880 	if (!(swap_reg & 0x1))
1881 		return sys_addr;
1882 
1883 	swap_base	= (swap_reg >> 3) & 0x7f;
1884 	swap_limit	= (swap_reg >> 11) & 0x7f;
1885 	rgn_size	= (swap_reg >> 20) & 0x7f;
1886 	tmp_addr	= sys_addr >> 27;
1887 
1888 	if (!(sys_addr >> 34) &&
1889 	    (((tmp_addr >= swap_base) &&
1890 	     (tmp_addr <= swap_limit)) ||
1891 	     (tmp_addr < rgn_size)))
1892 		return sys_addr ^ (u64)swap_base << 27;
1893 
1894 	return sys_addr;
1895 }
1896 
1897 /* For a given @dram_range, check if @sys_addr falls within it. */
1898 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1899 				  u64 sys_addr, int *chan_sel)
1900 {
1901 	int cs_found = -EINVAL;
1902 	u64 chan_addr;
1903 	u32 dct_sel_base;
1904 	u8 channel;
1905 	bool high_range = false;
1906 
1907 	u8 node_id    = dram_dst_node(pvt, range);
1908 	u8 intlv_en   = dram_intlv_en(pvt, range);
1909 	u32 intlv_sel = dram_intlv_sel(pvt, range);
1910 
1911 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1912 		 range, sys_addr, get_dram_limit(pvt, range));
1913 
1914 	if (dhar_valid(pvt) &&
1915 	    dhar_base(pvt) <= sys_addr &&
1916 	    sys_addr < BIT_64(32)) {
1917 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1918 			    sys_addr);
1919 		return -EINVAL;
1920 	}
1921 
1922 	if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1923 		return -EINVAL;
1924 
1925 	sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1926 
1927 	dct_sel_base = dct_sel_baseaddr(pvt);
1928 
1929 	/*
1930 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1931 	 * select between DCT0 and DCT1.
1932 	 */
1933 	if (dct_high_range_enabled(pvt) &&
1934 	   !dct_ganging_enabled(pvt) &&
1935 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1936 		high_range = true;
1937 
1938 	channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1939 
1940 	chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1941 					  high_range, dct_sel_base);
1942 
1943 	/* Remove node interleaving, see F1x120 */
1944 	if (intlv_en)
1945 		chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1946 			    (chan_addr & 0xfff);
1947 
1948 	/* remove channel interleave */
1949 	if (dct_interleave_enabled(pvt) &&
1950 	   !dct_high_range_enabled(pvt) &&
1951 	   !dct_ganging_enabled(pvt)) {
1952 
1953 		if (dct_sel_interleave_addr(pvt) != 1) {
1954 			if (dct_sel_interleave_addr(pvt) == 0x3)
1955 				/* hash 9 */
1956 				chan_addr = ((chan_addr >> 10) << 9) |
1957 					     (chan_addr & 0x1ff);
1958 			else
1959 				/* A[6] or hash 6 */
1960 				chan_addr = ((chan_addr >> 7) << 6) |
1961 					     (chan_addr & 0x3f);
1962 		} else
1963 			/* A[12] */
1964 			chan_addr = ((chan_addr >> 13) << 12) |
1965 				     (chan_addr & 0xfff);
1966 	}
1967 
1968 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
1969 
1970 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1971 
1972 	if (cs_found >= 0)
1973 		*chan_sel = channel;
1974 
1975 	return cs_found;
1976 }
1977 
1978 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1979 					u64 sys_addr, int *chan_sel)
1980 {
1981 	int cs_found = -EINVAL;
1982 	int num_dcts_intlv = 0;
1983 	u64 chan_addr, chan_offset;
1984 	u64 dct_base, dct_limit;
1985 	u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1986 	u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1987 
1988 	u64 dhar_offset		= f10_dhar_offset(pvt);
1989 	u8 intlv_addr		= dct_sel_interleave_addr(pvt);
1990 	u8 node_id		= dram_dst_node(pvt, range);
1991 	u8 intlv_en		= dram_intlv_en(pvt, range);
1992 
1993 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1994 	amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1995 
1996 	dct_offset_en		= (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1997 	dct_sel			= (u8) ((dct_cont_base_reg >> 4) & 0x7);
1998 
1999 	edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2000 		 range, sys_addr, get_dram_limit(pvt, range));
2001 
2002 	if (!(get_dram_base(pvt, range)  <= sys_addr) &&
2003 	    !(get_dram_limit(pvt, range) >= sys_addr))
2004 		return -EINVAL;
2005 
2006 	if (dhar_valid(pvt) &&
2007 	    dhar_base(pvt) <= sys_addr &&
2008 	    sys_addr < BIT_64(32)) {
2009 		amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2010 			    sys_addr);
2011 		return -EINVAL;
2012 	}
2013 
2014 	/* Verify sys_addr is within DCT Range. */
2015 	dct_base = (u64) dct_sel_baseaddr(pvt);
2016 	dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2017 
2018 	if (!(dct_cont_base_reg & BIT(0)) &&
2019 	    !(dct_base <= (sys_addr >> 27) &&
2020 	      dct_limit >= (sys_addr >> 27)))
2021 		return -EINVAL;
2022 
2023 	/* Verify number of dct's that participate in channel interleaving. */
2024 	num_dcts_intlv = (int) hweight8(intlv_en);
2025 
2026 	if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2027 		return -EINVAL;
2028 
2029 	if (pvt->model >= 0x60)
2030 		channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2031 	else
2032 		channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2033 						     num_dcts_intlv, dct_sel);
2034 
2035 	/* Verify we stay within the MAX number of channels allowed */
2036 	if (channel > 3)
2037 		return -EINVAL;
2038 
2039 	leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2040 
2041 	/* Get normalized DCT addr */
2042 	if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2043 		chan_offset = dhar_offset;
2044 	else
2045 		chan_offset = dct_base << 27;
2046 
2047 	chan_addr = sys_addr - chan_offset;
2048 
2049 	/* remove channel interleave */
2050 	if (num_dcts_intlv == 2) {
2051 		if (intlv_addr == 0x4)
2052 			chan_addr = ((chan_addr >> 9) << 8) |
2053 						(chan_addr & 0xff);
2054 		else if (intlv_addr == 0x5)
2055 			chan_addr = ((chan_addr >> 10) << 9) |
2056 						(chan_addr & 0x1ff);
2057 		else
2058 			return -EINVAL;
2059 
2060 	} else if (num_dcts_intlv == 4) {
2061 		if (intlv_addr == 0x4)
2062 			chan_addr = ((chan_addr >> 10) << 8) |
2063 							(chan_addr & 0xff);
2064 		else if (intlv_addr == 0x5)
2065 			chan_addr = ((chan_addr >> 11) << 9) |
2066 							(chan_addr & 0x1ff);
2067 		else
2068 			return -EINVAL;
2069 	}
2070 
2071 	if (dct_offset_en) {
2072 		amd64_read_pci_cfg(pvt->F1,
2073 				   DRAM_CONT_HIGH_OFF + (int) channel * 4,
2074 				   &tmp);
2075 		chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
2076 	}
2077 
2078 	f15h_select_dct(pvt, channel);
2079 
2080 	edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
2081 
2082 	/*
2083 	 * Find Chip select:
2084 	 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2085 	 * there is support for 4 DCT's, but only 2 are currently functional.
2086 	 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2087 	 * pvt->csels[1]. So we need to use '1' here to get correct info.
2088 	 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2089 	 */
2090 	alias_channel =  (channel == 3) ? 1 : channel;
2091 
2092 	cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2093 
2094 	if (cs_found >= 0)
2095 		*chan_sel = alias_channel;
2096 
2097 	return cs_found;
2098 }
2099 
2100 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2101 					u64 sys_addr,
2102 					int *chan_sel)
2103 {
2104 	int cs_found = -EINVAL;
2105 	unsigned range;
2106 
2107 	for (range = 0; range < DRAM_RANGES; range++) {
2108 		if (!dram_rw(pvt, range))
2109 			continue;
2110 
2111 		if (pvt->fam == 0x15 && pvt->model >= 0x30)
2112 			cs_found = f15_m30h_match_to_this_node(pvt, range,
2113 							       sys_addr,
2114 							       chan_sel);
2115 
2116 		else if ((get_dram_base(pvt, range)  <= sys_addr) &&
2117 			 (get_dram_limit(pvt, range) >= sys_addr)) {
2118 			cs_found = f1x_match_to_this_node(pvt, range,
2119 							  sys_addr, chan_sel);
2120 			if (cs_found >= 0)
2121 				break;
2122 		}
2123 	}
2124 	return cs_found;
2125 }
2126 
2127 /*
2128  * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2129  * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2130  *
2131  * The @sys_addr is usually an error address received from the hardware
2132  * (MCX_ADDR).
2133  */
2134 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2135 				     struct err_info *err)
2136 {
2137 	struct amd64_pvt *pvt = mci->pvt_info;
2138 
2139 	error_address_to_page_and_offset(sys_addr, err);
2140 
2141 	err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2142 	if (err->csrow < 0) {
2143 		err->err_code = ERR_CSROW;
2144 		return;
2145 	}
2146 
2147 	/*
2148 	 * We need the syndromes for channel detection only when we're
2149 	 * ganged. Otherwise @chan should already contain the channel at
2150 	 * this point.
2151 	 */
2152 	if (dct_ganging_enabled(pvt))
2153 		err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2154 }
2155 
2156 /*
2157  * debug routine to display the memory sizes of all logical DIMMs and its
2158  * CSROWs
2159  */
2160 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2161 {
2162 	int dimm, size0, size1;
2163 	u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2164 	u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
2165 
2166 	if (pvt->fam == 0xf) {
2167 		/* K8 families < revF not supported yet */
2168 	       if (pvt->ext_model < K8_REV_F)
2169 			return;
2170 	       else
2171 		       WARN_ON(ctrl != 0);
2172 	}
2173 
2174 	if (pvt->fam == 0x10) {
2175 		dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2176 							   : pvt->dbam0;
2177 		dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2178 				 pvt->csels[1].csbases :
2179 				 pvt->csels[0].csbases;
2180 	} else if (ctrl) {
2181 		dbam = pvt->dbam0;
2182 		dcsb = pvt->csels[1].csbases;
2183 	}
2184 	edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2185 		 ctrl, dbam);
2186 
2187 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2188 
2189 	/* Dump memory sizes for DIMM and its CSROWs */
2190 	for (dimm = 0; dimm < 4; dimm++) {
2191 
2192 		size0 = 0;
2193 		if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2194 			/*
2195 			 * For F15m60h, we need multiplier for LRDIMM cs_size
2196 			 * calculation. We pass dimm value to the dbam_to_cs
2197 			 * mapper so we can find the multiplier from the
2198 			 * corresponding DCSM.
2199 			 */
2200 			size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2201 						     DBAM_DIMM(dimm, dbam),
2202 						     dimm);
2203 
2204 		size1 = 0;
2205 		if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2206 			size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2207 						     DBAM_DIMM(dimm, dbam),
2208 						     dimm);
2209 
2210 		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2211 				dimm * 2,     size0,
2212 				dimm * 2 + 1, size1);
2213 	}
2214 }
2215 
2216 static struct amd64_family_type family_types[] = {
2217 	[K8_CPUS] = {
2218 		.ctl_name = "K8",
2219 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2220 		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2221 		.max_mcs = 2,
2222 		.ops = {
2223 			.early_channel_count	= k8_early_channel_count,
2224 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
2225 			.dbam_to_cs		= k8_dbam_to_chip_select,
2226 		}
2227 	},
2228 	[F10_CPUS] = {
2229 		.ctl_name = "F10h",
2230 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2231 		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2232 		.max_mcs = 2,
2233 		.ops = {
2234 			.early_channel_count	= f1x_early_channel_count,
2235 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2236 			.dbam_to_cs		= f10_dbam_to_chip_select,
2237 		}
2238 	},
2239 	[F15_CPUS] = {
2240 		.ctl_name = "F15h",
2241 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2242 		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2243 		.max_mcs = 2,
2244 		.ops = {
2245 			.early_channel_count	= f1x_early_channel_count,
2246 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2247 			.dbam_to_cs		= f15_dbam_to_chip_select,
2248 		}
2249 	},
2250 	[F15_M30H_CPUS] = {
2251 		.ctl_name = "F15h_M30h",
2252 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2253 		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2254 		.max_mcs = 2,
2255 		.ops = {
2256 			.early_channel_count	= f1x_early_channel_count,
2257 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2258 			.dbam_to_cs		= f16_dbam_to_chip_select,
2259 		}
2260 	},
2261 	[F15_M60H_CPUS] = {
2262 		.ctl_name = "F15h_M60h",
2263 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2264 		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2265 		.max_mcs = 2,
2266 		.ops = {
2267 			.early_channel_count	= f1x_early_channel_count,
2268 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2269 			.dbam_to_cs		= f15_m60h_dbam_to_chip_select,
2270 		}
2271 	},
2272 	[F16_CPUS] = {
2273 		.ctl_name = "F16h",
2274 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2275 		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2276 		.max_mcs = 2,
2277 		.ops = {
2278 			.early_channel_count	= f1x_early_channel_count,
2279 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2280 			.dbam_to_cs		= f16_dbam_to_chip_select,
2281 		}
2282 	},
2283 	[F16_M30H_CPUS] = {
2284 		.ctl_name = "F16h_M30h",
2285 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2286 		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2287 		.max_mcs = 2,
2288 		.ops = {
2289 			.early_channel_count	= f1x_early_channel_count,
2290 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
2291 			.dbam_to_cs		= f16_dbam_to_chip_select,
2292 		}
2293 	},
2294 	[F17_CPUS] = {
2295 		.ctl_name = "F17h",
2296 		.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2297 		.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2298 		.max_mcs = 2,
2299 		.ops = {
2300 			.early_channel_count	= f17_early_channel_count,
2301 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2302 		}
2303 	},
2304 	[F17_M10H_CPUS] = {
2305 		.ctl_name = "F17h_M10h",
2306 		.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2307 		.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2308 		.max_mcs = 2,
2309 		.ops = {
2310 			.early_channel_count	= f17_early_channel_count,
2311 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2312 		}
2313 	},
2314 	[F17_M30H_CPUS] = {
2315 		.ctl_name = "F17h_M30h",
2316 		.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2317 		.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2318 		.max_mcs = 8,
2319 		.ops = {
2320 			.early_channel_count	= f17_early_channel_count,
2321 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2322 		}
2323 	},
2324 	[F17_M60H_CPUS] = {
2325 		.ctl_name = "F17h_M60h",
2326 		.f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2327 		.f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2328 		.max_mcs = 2,
2329 		.ops = {
2330 			.early_channel_count	= f17_early_channel_count,
2331 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2332 		}
2333 	},
2334 	[F17_M70H_CPUS] = {
2335 		.ctl_name = "F17h_M70h",
2336 		.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2337 		.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2338 		.max_mcs = 2,
2339 		.ops = {
2340 			.early_channel_count	= f17_early_channel_count,
2341 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2342 		}
2343 	},
2344 	[F19_CPUS] = {
2345 		.ctl_name = "F19h",
2346 		.f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2347 		.f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2348 		.max_mcs = 8,
2349 		.ops = {
2350 			.early_channel_count	= f17_early_channel_count,
2351 			.dbam_to_cs		= f17_addr_mask_to_cs_size,
2352 		}
2353 	},
2354 };
2355 
2356 /*
2357  * These are tables of eigenvectors (one per line) which can be used for the
2358  * construction of the syndrome tables. The modified syndrome search algorithm
2359  * uses those to find the symbol in error and thus the DIMM.
2360  *
2361  * Algorithm courtesy of Ross LaFetra from AMD.
2362  */
2363 static const u16 x4_vectors[] = {
2364 	0x2f57, 0x1afe, 0x66cc, 0xdd88,
2365 	0x11eb, 0x3396, 0x7f4c, 0xeac8,
2366 	0x0001, 0x0002, 0x0004, 0x0008,
2367 	0x1013, 0x3032, 0x4044, 0x8088,
2368 	0x106b, 0x30d6, 0x70fc, 0xe0a8,
2369 	0x4857, 0xc4fe, 0x13cc, 0x3288,
2370 	0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2371 	0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2372 	0x15c1, 0x2a42, 0x89ac, 0x4758,
2373 	0x2b03, 0x1602, 0x4f0c, 0xca08,
2374 	0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2375 	0x8ba7, 0x465e, 0x244c, 0x1cc8,
2376 	0x2b87, 0x164e, 0x642c, 0xdc18,
2377 	0x40b9, 0x80de, 0x1094, 0x20e8,
2378 	0x27db, 0x1eb6, 0x9dac, 0x7b58,
2379 	0x11c1, 0x2242, 0x84ac, 0x4c58,
2380 	0x1be5, 0x2d7a, 0x5e34, 0xa718,
2381 	0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2382 	0x4c97, 0xc87e, 0x11fc, 0x33a8,
2383 	0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2384 	0x16b3, 0x3d62, 0x4f34, 0x8518,
2385 	0x1e2f, 0x391a, 0x5cac, 0xf858,
2386 	0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2387 	0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2388 	0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2389 	0x4397, 0xc27e, 0x17fc, 0x3ea8,
2390 	0x1617, 0x3d3e, 0x6464, 0xb8b8,
2391 	0x23ff, 0x12aa, 0xab6c, 0x56d8,
2392 	0x2dfb, 0x1ba6, 0x913c, 0x7328,
2393 	0x185d, 0x2ca6, 0x7914, 0x9e28,
2394 	0x171b, 0x3e36, 0x7d7c, 0xebe8,
2395 	0x4199, 0x82ee, 0x19f4, 0x2e58,
2396 	0x4807, 0xc40e, 0x130c, 0x3208,
2397 	0x1905, 0x2e0a, 0x5804, 0xac08,
2398 	0x213f, 0x132a, 0xadfc, 0x5ba8,
2399 	0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2400 };
2401 
2402 static const u16 x8_vectors[] = {
2403 	0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2404 	0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2405 	0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2406 	0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2407 	0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2408 	0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2409 	0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2410 	0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2411 	0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2412 	0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2413 	0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2414 	0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2415 	0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2416 	0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2417 	0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2418 	0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2419 	0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2420 	0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2421 	0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2422 };
2423 
2424 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2425 			   unsigned v_dim)
2426 {
2427 	unsigned int i, err_sym;
2428 
2429 	for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2430 		u16 s = syndrome;
2431 		unsigned v_idx =  err_sym * v_dim;
2432 		unsigned v_end = (err_sym + 1) * v_dim;
2433 
2434 		/* walk over all 16 bits of the syndrome */
2435 		for (i = 1; i < (1U << 16); i <<= 1) {
2436 
2437 			/* if bit is set in that eigenvector... */
2438 			if (v_idx < v_end && vectors[v_idx] & i) {
2439 				u16 ev_comp = vectors[v_idx++];
2440 
2441 				/* ... and bit set in the modified syndrome, */
2442 				if (s & i) {
2443 					/* remove it. */
2444 					s ^= ev_comp;
2445 
2446 					if (!s)
2447 						return err_sym;
2448 				}
2449 
2450 			} else if (s & i)
2451 				/* can't get to zero, move to next symbol */
2452 				break;
2453 		}
2454 	}
2455 
2456 	edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2457 	return -1;
2458 }
2459 
2460 static int map_err_sym_to_channel(int err_sym, int sym_size)
2461 {
2462 	if (sym_size == 4)
2463 		switch (err_sym) {
2464 		case 0x20:
2465 		case 0x21:
2466 			return 0;
2467 		case 0x22:
2468 		case 0x23:
2469 			return 1;
2470 		default:
2471 			return err_sym >> 4;
2472 		}
2473 	/* x8 symbols */
2474 	else
2475 		switch (err_sym) {
2476 		/* imaginary bits not in a DIMM */
2477 		case 0x10:
2478 			WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2479 					  err_sym);
2480 			return -1;
2481 		case 0x11:
2482 			return 0;
2483 		case 0x12:
2484 			return 1;
2485 		default:
2486 			return err_sym >> 3;
2487 		}
2488 	return -1;
2489 }
2490 
2491 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2492 {
2493 	struct amd64_pvt *pvt = mci->pvt_info;
2494 	int err_sym = -1;
2495 
2496 	if (pvt->ecc_sym_sz == 8)
2497 		err_sym = decode_syndrome(syndrome, x8_vectors,
2498 					  ARRAY_SIZE(x8_vectors),
2499 					  pvt->ecc_sym_sz);
2500 	else if (pvt->ecc_sym_sz == 4)
2501 		err_sym = decode_syndrome(syndrome, x4_vectors,
2502 					  ARRAY_SIZE(x4_vectors),
2503 					  pvt->ecc_sym_sz);
2504 	else {
2505 		amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2506 		return err_sym;
2507 	}
2508 
2509 	return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2510 }
2511 
2512 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2513 			    u8 ecc_type)
2514 {
2515 	enum hw_event_mc_err_type err_type;
2516 	const char *string;
2517 
2518 	if (ecc_type == 2)
2519 		err_type = HW_EVENT_ERR_CORRECTED;
2520 	else if (ecc_type == 1)
2521 		err_type = HW_EVENT_ERR_UNCORRECTED;
2522 	else if (ecc_type == 3)
2523 		err_type = HW_EVENT_ERR_DEFERRED;
2524 	else {
2525 		WARN(1, "Something is rotten in the state of Denmark.\n");
2526 		return;
2527 	}
2528 
2529 	switch (err->err_code) {
2530 	case DECODE_OK:
2531 		string = "";
2532 		break;
2533 	case ERR_NODE:
2534 		string = "Failed to map error addr to a node";
2535 		break;
2536 	case ERR_CSROW:
2537 		string = "Failed to map error addr to a csrow";
2538 		break;
2539 	case ERR_CHANNEL:
2540 		string = "Unknown syndrome - possible error reporting race";
2541 		break;
2542 	case ERR_SYND:
2543 		string = "MCA_SYND not valid - unknown syndrome and csrow";
2544 		break;
2545 	case ERR_NORM_ADDR:
2546 		string = "Cannot decode normalized address";
2547 		break;
2548 	default:
2549 		string = "WTF error";
2550 		break;
2551 	}
2552 
2553 	edac_mc_handle_error(err_type, mci, 1,
2554 			     err->page, err->offset, err->syndrome,
2555 			     err->csrow, err->channel, -1,
2556 			     string, "");
2557 }
2558 
2559 static inline void decode_bus_error(int node_id, struct mce *m)
2560 {
2561 	struct mem_ctl_info *mci;
2562 	struct amd64_pvt *pvt;
2563 	u8 ecc_type = (m->status >> 45) & 0x3;
2564 	u8 xec = XEC(m->status, 0x1f);
2565 	u16 ec = EC(m->status);
2566 	u64 sys_addr;
2567 	struct err_info err;
2568 
2569 	mci = edac_mc_find(node_id);
2570 	if (!mci)
2571 		return;
2572 
2573 	pvt = mci->pvt_info;
2574 
2575 	/* Bail out early if this was an 'observed' error */
2576 	if (PP(ec) == NBSL_PP_OBS)
2577 		return;
2578 
2579 	/* Do only ECC errors */
2580 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2581 		return;
2582 
2583 	memset(&err, 0, sizeof(err));
2584 
2585 	sys_addr = get_error_address(pvt, m);
2586 
2587 	if (ecc_type == 2)
2588 		err.syndrome = extract_syndrome(m->status);
2589 
2590 	pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2591 
2592 	__log_ecc_error(mci, &err, ecc_type);
2593 }
2594 
2595 /*
2596  * To find the UMC channel represented by this bank we need to match on its
2597  * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2598  * IPID.
2599  *
2600  * Currently, we can derive the channel number by looking at the 6th nibble in
2601  * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2602  * number.
2603  */
2604 static int find_umc_channel(struct mce *m)
2605 {
2606 	return (m->ipid & GENMASK(31, 0)) >> 20;
2607 }
2608 
2609 static void decode_umc_error(int node_id, struct mce *m)
2610 {
2611 	u8 ecc_type = (m->status >> 45) & 0x3;
2612 	struct mem_ctl_info *mci;
2613 	struct amd64_pvt *pvt;
2614 	struct err_info err;
2615 	u64 sys_addr;
2616 
2617 	mci = edac_mc_find(node_id);
2618 	if (!mci)
2619 		return;
2620 
2621 	pvt = mci->pvt_info;
2622 
2623 	memset(&err, 0, sizeof(err));
2624 
2625 	if (m->status & MCI_STATUS_DEFERRED)
2626 		ecc_type = 3;
2627 
2628 	err.channel = find_umc_channel(m);
2629 
2630 	if (!(m->status & MCI_STATUS_SYNDV)) {
2631 		err.err_code = ERR_SYND;
2632 		goto log_error;
2633 	}
2634 
2635 	if (ecc_type == 2) {
2636 		u8 length = (m->synd >> 18) & 0x3f;
2637 
2638 		if (length)
2639 			err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2640 		else
2641 			err.err_code = ERR_CHANNEL;
2642 	}
2643 
2644 	err.csrow = m->synd & 0x7;
2645 
2646 	if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2647 		err.err_code = ERR_NORM_ADDR;
2648 		goto log_error;
2649 	}
2650 
2651 	error_address_to_page_and_offset(sys_addr, &err);
2652 
2653 log_error:
2654 	__log_ecc_error(mci, &err, ecc_type);
2655 }
2656 
2657 /*
2658  * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2659  * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2660  * Reserve F0 and F6 on systems with a UMC.
2661  */
2662 static int
2663 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2664 {
2665 	if (pvt->umc) {
2666 		pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2667 		if (!pvt->F0) {
2668 			amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2669 			return -ENODEV;
2670 		}
2671 
2672 		pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2673 		if (!pvt->F6) {
2674 			pci_dev_put(pvt->F0);
2675 			pvt->F0 = NULL;
2676 
2677 			amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2678 			return -ENODEV;
2679 		}
2680 
2681 		if (!pci_ctl_dev)
2682 			pci_ctl_dev = &pvt->F0->dev;
2683 
2684 		edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2685 		edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2686 		edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2687 
2688 		return 0;
2689 	}
2690 
2691 	/* Reserve the ADDRESS MAP Device */
2692 	pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2693 	if (!pvt->F1) {
2694 		amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2695 		return -ENODEV;
2696 	}
2697 
2698 	/* Reserve the DCT Device */
2699 	pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2700 	if (!pvt->F2) {
2701 		pci_dev_put(pvt->F1);
2702 		pvt->F1 = NULL;
2703 
2704 		amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2705 		return -ENODEV;
2706 	}
2707 
2708 	if (!pci_ctl_dev)
2709 		pci_ctl_dev = &pvt->F2->dev;
2710 
2711 	edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2712 	edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2713 	edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2714 
2715 	return 0;
2716 }
2717 
2718 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2719 {
2720 	if (pvt->umc) {
2721 		pci_dev_put(pvt->F0);
2722 		pci_dev_put(pvt->F6);
2723 	} else {
2724 		pci_dev_put(pvt->F1);
2725 		pci_dev_put(pvt->F2);
2726 	}
2727 }
2728 
2729 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2730 {
2731 	pvt->ecc_sym_sz = 4;
2732 
2733 	if (pvt->umc) {
2734 		u8 i;
2735 
2736 		for_each_umc(i) {
2737 			/* Check enabled channels only: */
2738 			if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2739 				if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2740 					pvt->ecc_sym_sz = 16;
2741 					return;
2742 				} else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2743 					pvt->ecc_sym_sz = 8;
2744 					return;
2745 				}
2746 			}
2747 		}
2748 	} else if (pvt->fam >= 0x10) {
2749 		u32 tmp;
2750 
2751 		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2752 		/* F16h has only DCT0, so no need to read dbam1. */
2753 		if (pvt->fam != 0x16)
2754 			amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2755 
2756 		/* F10h, revD and later can do x8 ECC too. */
2757 		if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2758 			pvt->ecc_sym_sz = 8;
2759 	}
2760 }
2761 
2762 /*
2763  * Retrieve the hardware registers of the memory controller.
2764  */
2765 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2766 {
2767 	u8 nid = pvt->mc_node_id;
2768 	struct amd64_umc *umc;
2769 	u32 i, umc_base;
2770 
2771 	/* Read registers from each UMC */
2772 	for_each_umc(i) {
2773 
2774 		umc_base = get_umc_base(i);
2775 		umc = &pvt->umc[i];
2776 
2777 		amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2778 		amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2779 		amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2780 		amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2781 		amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2782 	}
2783 }
2784 
2785 /*
2786  * Retrieve the hardware registers of the memory controller (this includes the
2787  * 'Address Map' and 'Misc' device regs)
2788  */
2789 static void read_mc_regs(struct amd64_pvt *pvt)
2790 {
2791 	unsigned int range;
2792 	u64 msr_val;
2793 
2794 	/*
2795 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2796 	 * those are Read-As-Zero.
2797 	 */
2798 	rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2799 	edac_dbg(0, "  TOP_MEM:  0x%016llx\n", pvt->top_mem);
2800 
2801 	/* Check first whether TOP_MEM2 is enabled: */
2802 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2803 	if (msr_val & BIT(21)) {
2804 		rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2805 		edac_dbg(0, "  TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2806 	} else {
2807 		edac_dbg(0, "  TOP_MEM2 disabled\n");
2808 	}
2809 
2810 	if (pvt->umc) {
2811 		__read_mc_regs_df(pvt);
2812 		amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2813 
2814 		goto skip;
2815 	}
2816 
2817 	amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2818 
2819 	read_dram_ctl_register(pvt);
2820 
2821 	for (range = 0; range < DRAM_RANGES; range++) {
2822 		u8 rw;
2823 
2824 		/* read settings for this DRAM range */
2825 		read_dram_base_limit_regs(pvt, range);
2826 
2827 		rw = dram_rw(pvt, range);
2828 		if (!rw)
2829 			continue;
2830 
2831 		edac_dbg(1, "  DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2832 			 range,
2833 			 get_dram_base(pvt, range),
2834 			 get_dram_limit(pvt, range));
2835 
2836 		edac_dbg(1, "   IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2837 			 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2838 			 (rw & 0x1) ? "R" : "-",
2839 			 (rw & 0x2) ? "W" : "-",
2840 			 dram_intlv_sel(pvt, range),
2841 			 dram_dst_node(pvt, range));
2842 	}
2843 
2844 	amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2845 	amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2846 
2847 	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2848 
2849 	amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2850 	amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2851 
2852 	if (!dct_ganging_enabled(pvt)) {
2853 		amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2854 		amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2855 	}
2856 
2857 skip:
2858 	read_dct_base_mask(pvt);
2859 
2860 	determine_memory_type(pvt);
2861 	edac_dbg(1, "  DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2862 
2863 	determine_ecc_sym_sz(pvt);
2864 }
2865 
2866 /*
2867  * NOTE: CPU Revision Dependent code
2868  *
2869  * Input:
2870  *	@csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2871  *	k8 private pointer to -->
2872  *			DRAM Bank Address mapping register
2873  *			node_id
2874  *			DCL register where dual_channel_active is
2875  *
2876  * The DBAM register consists of 4 sets of 4 bits each definitions:
2877  *
2878  * Bits:	CSROWs
2879  * 0-3		CSROWs 0 and 1
2880  * 4-7		CSROWs 2 and 3
2881  * 8-11		CSROWs 4 and 5
2882  * 12-15	CSROWs 6 and 7
2883  *
2884  * Values range from: 0 to 15
2885  * The meaning of the values depends on CPU revision and dual-channel state,
2886  * see relevant BKDG more info.
2887  *
2888  * The memory controller provides for total of only 8 CSROWs in its current
2889  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2890  * single channel or two (2) DIMMs in dual channel mode.
2891  *
2892  * The following code logic collapses the various tables for CSROW based on CPU
2893  * revision.
2894  *
2895  * Returns:
2896  *	The number of PAGE_SIZE pages on the specified CSROW number it
2897  *	encompasses
2898  *
2899  */
2900 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2901 {
2902 	u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2903 	int csrow_nr = csrow_nr_orig;
2904 	u32 cs_mode, nr_pages;
2905 
2906 	if (!pvt->umc) {
2907 		csrow_nr >>= 1;
2908 		cs_mode = DBAM_DIMM(csrow_nr, dbam);
2909 	} else {
2910 		cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2911 	}
2912 
2913 	nr_pages   = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2914 	nr_pages <<= 20 - PAGE_SHIFT;
2915 
2916 	edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2917 		    csrow_nr_orig, dct,  cs_mode);
2918 	edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2919 
2920 	return nr_pages;
2921 }
2922 
2923 static int init_csrows_df(struct mem_ctl_info *mci)
2924 {
2925 	struct amd64_pvt *pvt = mci->pvt_info;
2926 	enum edac_type edac_mode = EDAC_NONE;
2927 	enum dev_type dev_type = DEV_UNKNOWN;
2928 	struct dimm_info *dimm;
2929 	int empty = 1;
2930 	u8 umc, cs;
2931 
2932 	if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2933 		edac_mode = EDAC_S16ECD16ED;
2934 		dev_type = DEV_X16;
2935 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2936 		edac_mode = EDAC_S8ECD8ED;
2937 		dev_type = DEV_X8;
2938 	} else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2939 		edac_mode = EDAC_S4ECD4ED;
2940 		dev_type = DEV_X4;
2941 	} else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2942 		edac_mode = EDAC_SECDED;
2943 	}
2944 
2945 	for_each_umc(umc) {
2946 		for_each_chip_select(cs, umc, pvt) {
2947 			if (!csrow_enabled(cs, umc, pvt))
2948 				continue;
2949 
2950 			empty = 0;
2951 			dimm = mci->csrows[cs]->channels[umc]->dimm;
2952 
2953 			edac_dbg(1, "MC node: %d, csrow: %d\n",
2954 					pvt->mc_node_id, cs);
2955 
2956 			dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2957 			dimm->mtype = pvt->dram_type;
2958 			dimm->edac_mode = edac_mode;
2959 			dimm->dtype = dev_type;
2960 			dimm->grain = 64;
2961 		}
2962 	}
2963 
2964 	return empty;
2965 }
2966 
2967 /*
2968  * Initialize the array of csrow attribute instances, based on the values
2969  * from pci config hardware registers.
2970  */
2971 static int init_csrows(struct mem_ctl_info *mci)
2972 {
2973 	struct amd64_pvt *pvt = mci->pvt_info;
2974 	enum edac_type edac_mode = EDAC_NONE;
2975 	struct csrow_info *csrow;
2976 	struct dimm_info *dimm;
2977 	int i, j, empty = 1;
2978 	int nr_pages = 0;
2979 	u32 val;
2980 
2981 	if (pvt->umc)
2982 		return init_csrows_df(mci);
2983 
2984 	amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2985 
2986 	pvt->nbcfg = val;
2987 
2988 	edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2989 		 pvt->mc_node_id, val,
2990 		 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2991 
2992 	/*
2993 	 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2994 	 */
2995 	for_each_chip_select(i, 0, pvt) {
2996 		bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2997 		bool row_dct1 = false;
2998 
2999 		if (pvt->fam != 0xf)
3000 			row_dct1 = !!csrow_enabled(i, 1, pvt);
3001 
3002 		if (!row_dct0 && !row_dct1)
3003 			continue;
3004 
3005 		csrow = mci->csrows[i];
3006 		empty = 0;
3007 
3008 		edac_dbg(1, "MC node: %d, csrow: %d\n",
3009 			    pvt->mc_node_id, i);
3010 
3011 		if (row_dct0) {
3012 			nr_pages = get_csrow_nr_pages(pvt, 0, i);
3013 			csrow->channels[0]->dimm->nr_pages = nr_pages;
3014 		}
3015 
3016 		/* K8 has only one DCT */
3017 		if (pvt->fam != 0xf && row_dct1) {
3018 			int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3019 
3020 			csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3021 			nr_pages += row_dct1_pages;
3022 		}
3023 
3024 		edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3025 
3026 		/* Determine DIMM ECC mode: */
3027 		if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3028 			edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3029 					? EDAC_S4ECD4ED
3030 					: EDAC_SECDED;
3031 		}
3032 
3033 		for (j = 0; j < pvt->channel_count; j++) {
3034 			dimm = csrow->channels[j]->dimm;
3035 			dimm->mtype = pvt->dram_type;
3036 			dimm->edac_mode = edac_mode;
3037 			dimm->grain = 64;
3038 		}
3039 	}
3040 
3041 	return empty;
3042 }
3043 
3044 /* get all cores on this DCT */
3045 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3046 {
3047 	int cpu;
3048 
3049 	for_each_online_cpu(cpu)
3050 		if (topology_die_id(cpu) == nid)
3051 			cpumask_set_cpu(cpu, mask);
3052 }
3053 
3054 /* check MCG_CTL on all the cpus on this node */
3055 static bool nb_mce_bank_enabled_on_node(u16 nid)
3056 {
3057 	cpumask_var_t mask;
3058 	int cpu, nbe;
3059 	bool ret = false;
3060 
3061 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3062 		amd64_warn("%s: Error allocating mask\n", __func__);
3063 		return false;
3064 	}
3065 
3066 	get_cpus_on_this_dct_cpumask(mask, nid);
3067 
3068 	rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3069 
3070 	for_each_cpu(cpu, mask) {
3071 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3072 		nbe = reg->l & MSR_MCGCTL_NBE;
3073 
3074 		edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3075 			 cpu, reg->q,
3076 			 (nbe ? "enabled" : "disabled"));
3077 
3078 		if (!nbe)
3079 			goto out;
3080 	}
3081 	ret = true;
3082 
3083 out:
3084 	free_cpumask_var(mask);
3085 	return ret;
3086 }
3087 
3088 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3089 {
3090 	cpumask_var_t cmask;
3091 	int cpu;
3092 
3093 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3094 		amd64_warn("%s: error allocating mask\n", __func__);
3095 		return -ENOMEM;
3096 	}
3097 
3098 	get_cpus_on_this_dct_cpumask(cmask, nid);
3099 
3100 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3101 
3102 	for_each_cpu(cpu, cmask) {
3103 
3104 		struct msr *reg = per_cpu_ptr(msrs, cpu);
3105 
3106 		if (on) {
3107 			if (reg->l & MSR_MCGCTL_NBE)
3108 				s->flags.nb_mce_enable = 1;
3109 
3110 			reg->l |= MSR_MCGCTL_NBE;
3111 		} else {
3112 			/*
3113 			 * Turn off NB MCE reporting only when it was off before
3114 			 */
3115 			if (!s->flags.nb_mce_enable)
3116 				reg->l &= ~MSR_MCGCTL_NBE;
3117 		}
3118 	}
3119 	wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3120 
3121 	free_cpumask_var(cmask);
3122 
3123 	return 0;
3124 }
3125 
3126 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3127 				       struct pci_dev *F3)
3128 {
3129 	bool ret = true;
3130 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3131 
3132 	if (toggle_ecc_err_reporting(s, nid, ON)) {
3133 		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3134 		return false;
3135 	}
3136 
3137 	amd64_read_pci_cfg(F3, NBCTL, &value);
3138 
3139 	s->old_nbctl   = value & mask;
3140 	s->nbctl_valid = true;
3141 
3142 	value |= mask;
3143 	amd64_write_pci_cfg(F3, NBCTL, value);
3144 
3145 	amd64_read_pci_cfg(F3, NBCFG, &value);
3146 
3147 	edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3148 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3149 
3150 	if (!(value & NBCFG_ECC_ENABLE)) {
3151 		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3152 
3153 		s->flags.nb_ecc_prev = 0;
3154 
3155 		/* Attempt to turn on DRAM ECC Enable */
3156 		value |= NBCFG_ECC_ENABLE;
3157 		amd64_write_pci_cfg(F3, NBCFG, value);
3158 
3159 		amd64_read_pci_cfg(F3, NBCFG, &value);
3160 
3161 		if (!(value & NBCFG_ECC_ENABLE)) {
3162 			amd64_warn("Hardware rejected DRAM ECC enable,"
3163 				   "check memory DIMM configuration.\n");
3164 			ret = false;
3165 		} else {
3166 			amd64_info("Hardware accepted DRAM ECC Enable\n");
3167 		}
3168 	} else {
3169 		s->flags.nb_ecc_prev = 1;
3170 	}
3171 
3172 	edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3173 		 nid, value, !!(value & NBCFG_ECC_ENABLE));
3174 
3175 	return ret;
3176 }
3177 
3178 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3179 					struct pci_dev *F3)
3180 {
3181 	u32 value, mask = 0x3;		/* UECC/CECC enable */
3182 
3183 	if (!s->nbctl_valid)
3184 		return;
3185 
3186 	amd64_read_pci_cfg(F3, NBCTL, &value);
3187 	value &= ~mask;
3188 	value |= s->old_nbctl;
3189 
3190 	amd64_write_pci_cfg(F3, NBCTL, value);
3191 
3192 	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3193 	if (!s->flags.nb_ecc_prev) {
3194 		amd64_read_pci_cfg(F3, NBCFG, &value);
3195 		value &= ~NBCFG_ECC_ENABLE;
3196 		amd64_write_pci_cfg(F3, NBCFG, value);
3197 	}
3198 
3199 	/* restore the NB Enable MCGCTL bit */
3200 	if (toggle_ecc_err_reporting(s, nid, OFF))
3201 		amd64_warn("Error restoring NB MCGCTL settings!\n");
3202 }
3203 
3204 static bool ecc_enabled(struct amd64_pvt *pvt)
3205 {
3206 	u16 nid = pvt->mc_node_id;
3207 	bool nb_mce_en = false;
3208 	u8 ecc_en = 0, i;
3209 	u32 value;
3210 
3211 	if (boot_cpu_data.x86 >= 0x17) {
3212 		u8 umc_en_mask = 0, ecc_en_mask = 0;
3213 		struct amd64_umc *umc;
3214 
3215 		for_each_umc(i) {
3216 			umc = &pvt->umc[i];
3217 
3218 			/* Only check enabled UMCs. */
3219 			if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3220 				continue;
3221 
3222 			umc_en_mask |= BIT(i);
3223 
3224 			if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3225 				ecc_en_mask |= BIT(i);
3226 		}
3227 
3228 		/* Check whether at least one UMC is enabled: */
3229 		if (umc_en_mask)
3230 			ecc_en = umc_en_mask == ecc_en_mask;
3231 		else
3232 			edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3233 
3234 		/* Assume UMC MCA banks are enabled. */
3235 		nb_mce_en = true;
3236 	} else {
3237 		amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3238 
3239 		ecc_en = !!(value & NBCFG_ECC_ENABLE);
3240 
3241 		nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3242 		if (!nb_mce_en)
3243 			edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3244 				     MSR_IA32_MCG_CTL, nid);
3245 	}
3246 
3247 	amd64_info("Node %d: DRAM ECC %s.\n",
3248 		   nid, (ecc_en ? "enabled" : "disabled"));
3249 
3250 	if (!ecc_en || !nb_mce_en)
3251 		return false;
3252 	else
3253 		return true;
3254 }
3255 
3256 static inline void
3257 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3258 {
3259 	u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3260 
3261 	for_each_umc(i) {
3262 		if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3263 			ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3264 			cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3265 
3266 			dev_x4  &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3267 			dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3268 		}
3269 	}
3270 
3271 	/* Set chipkill only if ECC is enabled: */
3272 	if (ecc_en) {
3273 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3274 
3275 		if (!cpk_en)
3276 			return;
3277 
3278 		if (dev_x4)
3279 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3280 		else if (dev_x16)
3281 			mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3282 		else
3283 			mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3284 	}
3285 }
3286 
3287 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3288 {
3289 	struct amd64_pvt *pvt = mci->pvt_info;
3290 
3291 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3292 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
3293 
3294 	if (pvt->umc) {
3295 		f17h_determine_edac_ctl_cap(mci, pvt);
3296 	} else {
3297 		if (pvt->nbcap & NBCAP_SECDED)
3298 			mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3299 
3300 		if (pvt->nbcap & NBCAP_CHIPKILL)
3301 			mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3302 	}
3303 
3304 	mci->edac_cap		= determine_edac_cap(pvt);
3305 	mci->mod_name		= EDAC_MOD_STR;
3306 	mci->ctl_name		= fam_type->ctl_name;
3307 	mci->dev_name		= pci_name(pvt->F3);
3308 	mci->ctl_page_to_phys	= NULL;
3309 
3310 	/* memory scrubber interface */
3311 	mci->set_sdram_scrub_rate = set_scrub_rate;
3312 	mci->get_sdram_scrub_rate = get_scrub_rate;
3313 }
3314 
3315 /*
3316  * returns a pointer to the family descriptor on success, NULL otherwise.
3317  */
3318 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3319 {
3320 	pvt->ext_model  = boot_cpu_data.x86_model >> 4;
3321 	pvt->stepping	= boot_cpu_data.x86_stepping;
3322 	pvt->model	= boot_cpu_data.x86_model;
3323 	pvt->fam	= boot_cpu_data.x86;
3324 
3325 	switch (pvt->fam) {
3326 	case 0xf:
3327 		fam_type	= &family_types[K8_CPUS];
3328 		pvt->ops	= &family_types[K8_CPUS].ops;
3329 		break;
3330 
3331 	case 0x10:
3332 		fam_type	= &family_types[F10_CPUS];
3333 		pvt->ops	= &family_types[F10_CPUS].ops;
3334 		break;
3335 
3336 	case 0x15:
3337 		if (pvt->model == 0x30) {
3338 			fam_type = &family_types[F15_M30H_CPUS];
3339 			pvt->ops = &family_types[F15_M30H_CPUS].ops;
3340 			break;
3341 		} else if (pvt->model == 0x60) {
3342 			fam_type = &family_types[F15_M60H_CPUS];
3343 			pvt->ops = &family_types[F15_M60H_CPUS].ops;
3344 			break;
3345 		}
3346 
3347 		fam_type	= &family_types[F15_CPUS];
3348 		pvt->ops	= &family_types[F15_CPUS].ops;
3349 		break;
3350 
3351 	case 0x16:
3352 		if (pvt->model == 0x30) {
3353 			fam_type = &family_types[F16_M30H_CPUS];
3354 			pvt->ops = &family_types[F16_M30H_CPUS].ops;
3355 			break;
3356 		}
3357 		fam_type	= &family_types[F16_CPUS];
3358 		pvt->ops	= &family_types[F16_CPUS].ops;
3359 		break;
3360 
3361 	case 0x17:
3362 		if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3363 			fam_type = &family_types[F17_M10H_CPUS];
3364 			pvt->ops = &family_types[F17_M10H_CPUS].ops;
3365 			break;
3366 		} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3367 			fam_type = &family_types[F17_M30H_CPUS];
3368 			pvt->ops = &family_types[F17_M30H_CPUS].ops;
3369 			break;
3370 		} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3371 			fam_type = &family_types[F17_M60H_CPUS];
3372 			pvt->ops = &family_types[F17_M60H_CPUS].ops;
3373 			break;
3374 		} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3375 			fam_type = &family_types[F17_M70H_CPUS];
3376 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
3377 			break;
3378 		}
3379 		fallthrough;
3380 	case 0x18:
3381 		fam_type	= &family_types[F17_CPUS];
3382 		pvt->ops	= &family_types[F17_CPUS].ops;
3383 
3384 		if (pvt->fam == 0x18)
3385 			family_types[F17_CPUS].ctl_name = "F18h";
3386 		break;
3387 
3388 	case 0x19:
3389 		if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
3390 			fam_type = &family_types[F17_M70H_CPUS];
3391 			pvt->ops = &family_types[F17_M70H_CPUS].ops;
3392 			fam_type->ctl_name = "F19h_M20h";
3393 			break;
3394 		}
3395 		fam_type	= &family_types[F19_CPUS];
3396 		pvt->ops	= &family_types[F19_CPUS].ops;
3397 		family_types[F19_CPUS].ctl_name = "F19h";
3398 		break;
3399 
3400 	default:
3401 		amd64_err("Unsupported family!\n");
3402 		return NULL;
3403 	}
3404 
3405 	amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3406 		     (pvt->fam == 0xf ?
3407 				(pvt->ext_model >= K8_REV_F  ? "revF or later "
3408 							     : "revE or earlier ")
3409 				 : ""), pvt->mc_node_id);
3410 	return fam_type;
3411 }
3412 
3413 static const struct attribute_group *amd64_edac_attr_groups[] = {
3414 #ifdef CONFIG_EDAC_DEBUG
3415 	&amd64_edac_dbg_group,
3416 #endif
3417 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3418 	&amd64_edac_inj_group,
3419 #endif
3420 	NULL
3421 };
3422 
3423 static int hw_info_get(struct amd64_pvt *pvt)
3424 {
3425 	u16 pci_id1, pci_id2;
3426 	int ret;
3427 
3428 	if (pvt->fam >= 0x17) {
3429 		pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3430 		if (!pvt->umc)
3431 			return -ENOMEM;
3432 
3433 		pci_id1 = fam_type->f0_id;
3434 		pci_id2 = fam_type->f6_id;
3435 	} else {
3436 		pci_id1 = fam_type->f1_id;
3437 		pci_id2 = fam_type->f2_id;
3438 	}
3439 
3440 	ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3441 	if (ret)
3442 		return ret;
3443 
3444 	read_mc_regs(pvt);
3445 
3446 	return 0;
3447 }
3448 
3449 static void hw_info_put(struct amd64_pvt *pvt)
3450 {
3451 	if (pvt->F0 || pvt->F1)
3452 		free_mc_sibling_devs(pvt);
3453 
3454 	kfree(pvt->umc);
3455 }
3456 
3457 static int init_one_instance(struct amd64_pvt *pvt)
3458 {
3459 	struct mem_ctl_info *mci = NULL;
3460 	struct edac_mc_layer layers[2];
3461 	int ret = -EINVAL;
3462 
3463 	/*
3464 	 * We need to determine how many memory channels there are. Then use
3465 	 * that information for calculating the size of the dynamic instance
3466 	 * tables in the 'mci' structure.
3467 	 */
3468 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
3469 	if (pvt->channel_count < 0)
3470 		return ret;
3471 
3472 	ret = -ENOMEM;
3473 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3474 	layers[0].size = pvt->csels[0].b_cnt;
3475 	layers[0].is_virt_csrow = true;
3476 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
3477 
3478 	/*
3479 	 * Always allocate two channels since we can have setups with DIMMs on
3480 	 * only one channel. Also, this simplifies handling later for the price
3481 	 * of a couple of KBs tops.
3482 	 */
3483 	layers[1].size = fam_type->max_mcs;
3484 	layers[1].is_virt_csrow = false;
3485 
3486 	mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3487 	if (!mci)
3488 		return ret;
3489 
3490 	mci->pvt_info = pvt;
3491 	mci->pdev = &pvt->F3->dev;
3492 
3493 	setup_mci_misc_attrs(mci);
3494 
3495 	if (init_csrows(mci))
3496 		mci->edac_cap = EDAC_FLAG_NONE;
3497 
3498 	ret = -ENODEV;
3499 	if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3500 		edac_dbg(1, "failed edac_mc_add_mc()\n");
3501 		edac_mc_free(mci);
3502 		return ret;
3503 	}
3504 
3505 	return 0;
3506 }
3507 
3508 static bool instance_has_memory(struct amd64_pvt *pvt)
3509 {
3510 	bool cs_enabled = false;
3511 	int cs = 0, dct = 0;
3512 
3513 	for (dct = 0; dct < fam_type->max_mcs; dct++) {
3514 		for_each_chip_select(cs, dct, pvt)
3515 			cs_enabled |= csrow_enabled(cs, dct, pvt);
3516 	}
3517 
3518 	return cs_enabled;
3519 }
3520 
3521 static int probe_one_instance(unsigned int nid)
3522 {
3523 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3524 	struct amd64_pvt *pvt = NULL;
3525 	struct ecc_settings *s;
3526 	int ret;
3527 
3528 	ret = -ENOMEM;
3529 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3530 	if (!s)
3531 		goto err_out;
3532 
3533 	ecc_stngs[nid] = s;
3534 
3535 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3536 	if (!pvt)
3537 		goto err_settings;
3538 
3539 	pvt->mc_node_id	= nid;
3540 	pvt->F3 = F3;
3541 
3542 	fam_type = per_family_init(pvt);
3543 	if (!fam_type)
3544 		goto err_enable;
3545 
3546 	ret = hw_info_get(pvt);
3547 	if (ret < 0)
3548 		goto err_enable;
3549 
3550 	ret = 0;
3551 	if (!instance_has_memory(pvt)) {
3552 		amd64_info("Node %d: No DIMMs detected.\n", nid);
3553 		goto err_enable;
3554 	}
3555 
3556 	if (!ecc_enabled(pvt)) {
3557 		ret = -ENODEV;
3558 
3559 		if (!ecc_enable_override)
3560 			goto err_enable;
3561 
3562 		if (boot_cpu_data.x86 >= 0x17) {
3563 			amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3564 			goto err_enable;
3565 		} else
3566 			amd64_warn("Forcing ECC on!\n");
3567 
3568 		if (!enable_ecc_error_reporting(s, nid, F3))
3569 			goto err_enable;
3570 	}
3571 
3572 	ret = init_one_instance(pvt);
3573 	if (ret < 0) {
3574 		amd64_err("Error probing instance: %d\n", nid);
3575 
3576 		if (boot_cpu_data.x86 < 0x17)
3577 			restore_ecc_error_reporting(s, nid, F3);
3578 
3579 		goto err_enable;
3580 	}
3581 
3582 	dump_misc_regs(pvt);
3583 
3584 	return ret;
3585 
3586 err_enable:
3587 	hw_info_put(pvt);
3588 	kfree(pvt);
3589 
3590 err_settings:
3591 	kfree(s);
3592 	ecc_stngs[nid] = NULL;
3593 
3594 err_out:
3595 	return ret;
3596 }
3597 
3598 static void remove_one_instance(unsigned int nid)
3599 {
3600 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3601 	struct ecc_settings *s = ecc_stngs[nid];
3602 	struct mem_ctl_info *mci;
3603 	struct amd64_pvt *pvt;
3604 
3605 	/* Remove from EDAC CORE tracking list */
3606 	mci = edac_mc_del_mc(&F3->dev);
3607 	if (!mci)
3608 		return;
3609 
3610 	pvt = mci->pvt_info;
3611 
3612 	restore_ecc_error_reporting(s, nid, F3);
3613 
3614 	kfree(ecc_stngs[nid]);
3615 	ecc_stngs[nid] = NULL;
3616 
3617 	/* Free the EDAC CORE resources */
3618 	mci->pvt_info = NULL;
3619 
3620 	hw_info_put(pvt);
3621 	kfree(pvt);
3622 	edac_mc_free(mci);
3623 }
3624 
3625 static void setup_pci_device(void)
3626 {
3627 	if (pci_ctl)
3628 		return;
3629 
3630 	pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
3631 	if (!pci_ctl) {
3632 		pr_warn("%s(): Unable to create PCI control\n", __func__);
3633 		pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3634 	}
3635 }
3636 
3637 static const struct x86_cpu_id amd64_cpuids[] = {
3638 	X86_MATCH_VENDOR_FAM(AMD,	0x0F, NULL),
3639 	X86_MATCH_VENDOR_FAM(AMD,	0x10, NULL),
3640 	X86_MATCH_VENDOR_FAM(AMD,	0x15, NULL),
3641 	X86_MATCH_VENDOR_FAM(AMD,	0x16, NULL),
3642 	X86_MATCH_VENDOR_FAM(AMD,	0x17, NULL),
3643 	X86_MATCH_VENDOR_FAM(HYGON,	0x18, NULL),
3644 	X86_MATCH_VENDOR_FAM(AMD,	0x19, NULL),
3645 	{ }
3646 };
3647 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3648 
3649 static int __init amd64_edac_init(void)
3650 {
3651 	const char *owner;
3652 	int err = -ENODEV;
3653 	int i;
3654 
3655 	owner = edac_get_owner();
3656 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3657 		return -EBUSY;
3658 
3659 	if (!x86_match_cpu(amd64_cpuids))
3660 		return -ENODEV;
3661 
3662 	if (amd_cache_northbridges() < 0)
3663 		return -ENODEV;
3664 
3665 	opstate_init();
3666 
3667 	err = -ENOMEM;
3668 	ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3669 	if (!ecc_stngs)
3670 		goto err_free;
3671 
3672 	msrs = msrs_alloc();
3673 	if (!msrs)
3674 		goto err_free;
3675 
3676 	for (i = 0; i < amd_nb_num(); i++) {
3677 		err = probe_one_instance(i);
3678 		if (err) {
3679 			/* unwind properly */
3680 			while (--i >= 0)
3681 				remove_one_instance(i);
3682 
3683 			goto err_pci;
3684 		}
3685 	}
3686 
3687 	if (!edac_has_mcs()) {
3688 		err = -ENODEV;
3689 		goto err_pci;
3690 	}
3691 
3692 	/* register stuff with EDAC MCE */
3693 	if (boot_cpu_data.x86 >= 0x17)
3694 		amd_register_ecc_decoder(decode_umc_error);
3695 	else
3696 		amd_register_ecc_decoder(decode_bus_error);
3697 
3698 	setup_pci_device();
3699 
3700 #ifdef CONFIG_X86_32
3701 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3702 #endif
3703 
3704 	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3705 
3706 	return 0;
3707 
3708 err_pci:
3709 	pci_ctl_dev = NULL;
3710 
3711 	msrs_free(msrs);
3712 	msrs = NULL;
3713 
3714 err_free:
3715 	kfree(ecc_stngs);
3716 	ecc_stngs = NULL;
3717 
3718 	return err;
3719 }
3720 
3721 static void __exit amd64_edac_exit(void)
3722 {
3723 	int i;
3724 
3725 	if (pci_ctl)
3726 		edac_pci_release_generic_ctl(pci_ctl);
3727 
3728 	/* unregister from EDAC MCE */
3729 	if (boot_cpu_data.x86 >= 0x17)
3730 		amd_unregister_ecc_decoder(decode_umc_error);
3731 	else
3732 		amd_unregister_ecc_decoder(decode_bus_error);
3733 
3734 	for (i = 0; i < amd_nb_num(); i++)
3735 		remove_one_instance(i);
3736 
3737 	kfree(ecc_stngs);
3738 	ecc_stngs = NULL;
3739 
3740 	pci_ctl_dev = NULL;
3741 
3742 	msrs_free(msrs);
3743 	msrs = NULL;
3744 }
3745 
3746 module_init(amd64_edac_init);
3747 module_exit(amd64_edac_exit);
3748 
3749 MODULE_LICENSE("GPL");
3750 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3751 		"Dave Peterson, Thayne Harbaugh");
3752 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3753 		EDAC_AMD64_VERSION);
3754 
3755 module_param(edac_op_state, int, 0444);
3756 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3757