xref: /linux/drivers/edac/amd64_edac.c (revision a0b54e256d513ed99e456bea6e4e188ff92e7c46)
1 #include "amd64_edac.h"
2 #include <asm/k8.h>
3 
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
5 
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8 
9 /*
10  * Set by command line parameter. If BIOS has enabled the ECC, this override is
11  * cleared to prevent re-enabling the hardware by this driver.
12  */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15 
16 /* Lookup table for all possible MC control instances */
17 struct amd64_pvt;
18 static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
19 static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
20 
21 /*
22  * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
23  * for DDR2 DRAM mapping.
24  */
25 u32 revf_quad_ddr2_shift[] = {
26 	0,	/* 0000b NULL DIMM (128mb) */
27 	28,	/* 0001b 256mb */
28 	29,	/* 0010b 512mb */
29 	29,	/* 0011b 512mb */
30 	29,	/* 0100b 512mb */
31 	30,	/* 0101b 1gb */
32 	30,	/* 0110b 1gb */
33 	31,	/* 0111b 2gb */
34 	31,	/* 1000b 2gb */
35 	32,	/* 1001b 4gb */
36 	32,	/* 1010b 4gb */
37 	33,	/* 1011b 8gb */
38 	0,	/* 1100b future */
39 	0,	/* 1101b future */
40 	0,	/* 1110b future */
41 	0	/* 1111b future */
42 };
43 
44 /*
45  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
46  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
47  * or higher value'.
48  *
49  *FIXME: Produce a better mapping/linearisation.
50  */
51 
52 struct scrubrate scrubrates[] = {
53 	{ 0x01, 1600000000UL},
54 	{ 0x02, 800000000UL},
55 	{ 0x03, 400000000UL},
56 	{ 0x04, 200000000UL},
57 	{ 0x05, 100000000UL},
58 	{ 0x06, 50000000UL},
59 	{ 0x07, 25000000UL},
60 	{ 0x08, 12284069UL},
61 	{ 0x09, 6274509UL},
62 	{ 0x0A, 3121951UL},
63 	{ 0x0B, 1560975UL},
64 	{ 0x0C, 781440UL},
65 	{ 0x0D, 390720UL},
66 	{ 0x0E, 195300UL},
67 	{ 0x0F, 97650UL},
68 	{ 0x10, 48854UL},
69 	{ 0x11, 24427UL},
70 	{ 0x12, 12213UL},
71 	{ 0x13, 6101UL},
72 	{ 0x14, 3051UL},
73 	{ 0x15, 1523UL},
74 	{ 0x16, 761UL},
75 	{ 0x00, 0UL},        /* scrubbing off */
76 };
77 
78 /*
79  * Memory scrubber control interface. For K8, memory scrubbing is handled by
80  * hardware and can involve L2 cache, dcache as well as the main memory. With
81  * F10, this is extended to L3 cache scrubbing on CPU models sporting that
82  * functionality.
83  *
84  * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
85  * (dram) over to cache lines. This is nasty, so we will use bandwidth in
86  * bytes/sec for the setting.
87  *
88  * Currently, we only do dram scrubbing. If the scrubbing is done in software on
89  * other archs, we might not have access to the caches directly.
90  */
91 
92 /*
93  * scan the scrub rate mapping table for a close or matching bandwidth value to
94  * issue. If requested is too big, then use last maximum value found.
95  */
96 static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
97 				       u32 min_scrubrate)
98 {
99 	u32 scrubval;
100 	int i;
101 
102 	/*
103 	 * map the configured rate (new_bw) to a value specific to the AMD64
104 	 * memory controller and apply to register. Search for the first
105 	 * bandwidth entry that is greater or equal than the setting requested
106 	 * and program that. If at last entry, turn off DRAM scrubbing.
107 	 */
108 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
109 		/*
110 		 * skip scrub rates which aren't recommended
111 		 * (see F10 BKDG, F3x58)
112 		 */
113 		if (scrubrates[i].scrubval < min_scrubrate)
114 			continue;
115 
116 		if (scrubrates[i].bandwidth <= new_bw)
117 			break;
118 
119 		/*
120 		 * if no suitable bandwidth found, turn off DRAM scrubbing
121 		 * entirely by falling back to the last element in the
122 		 * scrubrates array.
123 		 */
124 	}
125 
126 	scrubval = scrubrates[i].scrubval;
127 	if (scrubval)
128 		edac_printk(KERN_DEBUG, EDAC_MC,
129 			    "Setting scrub rate bandwidth: %u\n",
130 			    scrubrates[i].bandwidth);
131 	else
132 		edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
133 
134 	pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
135 
136 	return 0;
137 }
138 
139 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
140 {
141 	struct amd64_pvt *pvt = mci->pvt_info;
142 	u32 min_scrubrate = 0x0;
143 
144 	switch (boot_cpu_data.x86) {
145 	case 0xf:
146 		min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
147 		break;
148 	case 0x10:
149 		min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
150 		break;
151 	case 0x11:
152 		min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
153 		break;
154 
155 	default:
156 		amd64_printk(KERN_ERR, "Unsupported family!\n");
157 		break;
158 	}
159 	return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
160 			min_scrubrate);
161 }
162 
163 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
164 {
165 	struct amd64_pvt *pvt = mci->pvt_info;
166 	u32 scrubval = 0;
167 	int status = -1, i, ret = 0;
168 
169 	ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
170 	if (ret)
171 		debugf0("Reading K8_SCRCTRL failed\n");
172 
173 	scrubval = scrubval & 0x001F;
174 
175 	edac_printk(KERN_DEBUG, EDAC_MC,
176 		    "pci-read, sdram scrub control value: %d \n", scrubval);
177 
178 	for (i = 0; ARRAY_SIZE(scrubrates); i++) {
179 		if (scrubrates[i].scrubval == scrubval) {
180 			*bw = scrubrates[i].bandwidth;
181 			status = 0;
182 			break;
183 		}
184 	}
185 
186 	return status;
187 }
188 
189 /* Map from a CSROW entry to the mask entry that operates on it */
190 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
191 {
192 	return csrow >> (pvt->num_dcsm >> 3);
193 }
194 
195 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
196 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
197 {
198 	if (dct == 0)
199 		return pvt->dcsb0[csrow];
200 	else
201 		return pvt->dcsb1[csrow];
202 }
203 
204 /*
205  * Return the 'mask' address the i'th CS entry. This function is needed because
206  * there number of DCSM registers on Rev E and prior vs Rev F and later is
207  * different.
208  */
209 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
210 {
211 	if (dct == 0)
212 		return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
213 	else
214 		return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
215 }
216 
217 
218 /*
219  * In *base and *limit, pass back the full 40-bit base and limit physical
220  * addresses for the node given by node_id.  This information is obtained from
221  * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
222  * base and limit addresses are of type SysAddr, as defined at the start of
223  * section 3.4.4 (p. 70).  They are the lowest and highest physical addresses
224  * in the address range they represent.
225  */
226 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
227 			       u64 *base, u64 *limit)
228 {
229 	*base = pvt->dram_base[node_id];
230 	*limit = pvt->dram_limit[node_id];
231 }
232 
233 /*
234  * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
235  * with node_id
236  */
237 static int amd64_base_limit_match(struct amd64_pvt *pvt,
238 					u64 sys_addr, int node_id)
239 {
240 	u64 base, limit, addr;
241 
242 	amd64_get_base_and_limit(pvt, node_id, &base, &limit);
243 
244 	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
245 	 * all ones if the most significant implemented address bit is 1.
246 	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
247 	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
248 	 * Application Programming.
249 	 */
250 	addr = sys_addr & 0x000000ffffffffffull;
251 
252 	return (addr >= base) && (addr <= limit);
253 }
254 
255 /*
256  * Attempt to map a SysAddr to a node. On success, return a pointer to the
257  * mem_ctl_info structure for the node that the SysAddr maps to.
258  *
259  * On failure, return NULL.
260  */
261 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
262 						u64 sys_addr)
263 {
264 	struct amd64_pvt *pvt;
265 	int node_id;
266 	u32 intlv_en, bits;
267 
268 	/*
269 	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
270 	 * 3.4.4.2) registers to map the SysAddr to a node ID.
271 	 */
272 	pvt = mci->pvt_info;
273 
274 	/*
275 	 * The value of this field should be the same for all DRAM Base
276 	 * registers.  Therefore we arbitrarily choose to read it from the
277 	 * register for node 0.
278 	 */
279 	intlv_en = pvt->dram_IntlvEn[0];
280 
281 	if (intlv_en == 0) {
282 		for (node_id = 0; ; ) {
283 			if (amd64_base_limit_match(pvt, sys_addr, node_id))
284 				break;
285 
286 			if (++node_id >= DRAM_REG_COUNT)
287 				goto err_no_match;
288 		}
289 		goto found;
290 	}
291 
292 	if (unlikely((intlv_en != (0x01 << 8)) &&
293 		     (intlv_en != (0x03 << 8)) &&
294 		     (intlv_en != (0x07 << 8)))) {
295 		amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
296 			     "IntlvEn field of DRAM Base Register for node 0: "
297 			     "This probably indicates a BIOS bug.\n", intlv_en);
298 		return NULL;
299 	}
300 
301 	bits = (((u32) sys_addr) >> 12) & intlv_en;
302 
303 	for (node_id = 0; ; ) {
304 		if ((pvt->dram_limit[node_id] & intlv_en) == bits)
305 			break;	/* intlv_sel field matches */
306 
307 		if (++node_id >= DRAM_REG_COUNT)
308 			goto err_no_match;
309 	}
310 
311 	/* sanity test for sys_addr */
312 	if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 		amd64_printk(KERN_WARNING,
314 			  "%s(): sys_addr 0x%lx falls outside base/limit "
315 			  "address range for node %d with node interleaving "
316 			  "enabled.\n", __func__, (unsigned long)sys_addr,
317 			  node_id);
318 		return NULL;
319 	}
320 
321 found:
322 	return edac_mc_find(node_id);
323 
324 err_no_match:
325 	debugf2("sys_addr 0x%lx doesn't match any node\n",
326 		(unsigned long)sys_addr);
327 
328 	return NULL;
329 }
330 
331 /*
332  * Extract the DRAM CS base address from selected csrow register.
333  */
334 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
335 {
336 	return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
337 				pvt->dcs_shift;
338 }
339 
340 /*
341  * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
342  */
343 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
344 {
345 	u64 dcsm_bits, other_bits;
346 	u64 mask;
347 
348 	/* Extract bits from DRAM CS Mask. */
349 	dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
350 
351 	other_bits = pvt->dcsm_mask;
352 	other_bits = ~(other_bits << pvt->dcs_shift);
353 
354 	/*
355 	 * The extracted bits from DCSM belong in the spaces represented by
356 	 * the cleared bits in other_bits.
357 	 */
358 	mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
359 
360 	return mask;
361 }
362 
363 /*
364  * @input_addr is an InputAddr associated with the node given by mci. Return the
365  * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
366  */
367 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
368 {
369 	struct amd64_pvt *pvt;
370 	int csrow;
371 	u64 base, mask;
372 
373 	pvt = mci->pvt_info;
374 
375 	/*
376 	 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
377 	 * base/mask register pair, test the condition shown near the start of
378 	 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
379 	 */
380 	for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
381 
382 		/* This DRAM chip select is disabled on this node */
383 		if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
384 			continue;
385 
386 		base = base_from_dct_base(pvt, csrow);
387 		mask = ~mask_from_dct_mask(pvt, csrow);
388 
389 		if ((input_addr & mask) == (base & mask)) {
390 			debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
391 				(unsigned long)input_addr, csrow,
392 				pvt->mc_node_id);
393 
394 			return csrow;
395 		}
396 	}
397 
398 	debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
399 		(unsigned long)input_addr, pvt->mc_node_id);
400 
401 	return -1;
402 }
403 
404 /*
405  * Return the base value defined by the DRAM Base register for the node
406  * represented by mci.  This function returns the full 40-bit value despite the
407  * fact that the register only stores bits 39-24 of the value. See section
408  * 3.4.4.1 (BKDG #26094, K8, revA-E)
409  */
410 static inline u64 get_dram_base(struct mem_ctl_info *mci)
411 {
412 	struct amd64_pvt *pvt = mci->pvt_info;
413 
414 	return pvt->dram_base[pvt->mc_node_id];
415 }
416 
417 /*
418  * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
419  * for the node represented by mci. Info is passed back in *hole_base,
420  * *hole_offset, and *hole_size.  Function returns 0 if info is valid or 1 if
421  * info is invalid. Info may be invalid for either of the following reasons:
422  *
423  * - The revision of the node is not E or greater.  In this case, the DRAM Hole
424  *   Address Register does not exist.
425  *
426  * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
427  *   indicating that its contents are not valid.
428  *
429  * The values passed back in *hole_base, *hole_offset, and *hole_size are
430  * complete 32-bit values despite the fact that the bitfields in the DHAR
431  * only represent bits 31-24 of the base and offset values.
432  */
433 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
434 			     u64 *hole_offset, u64 *hole_size)
435 {
436 	struct amd64_pvt *pvt = mci->pvt_info;
437 	u64 base;
438 
439 	/* only revE and later have the DRAM Hole Address Register */
440 	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
441 		debugf1("  revision %d for node %d does not support DHAR\n",
442 			pvt->ext_model, pvt->mc_node_id);
443 		return 1;
444 	}
445 
446 	/* only valid for Fam10h */
447 	if (boot_cpu_data.x86 == 0x10 &&
448 	    (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
449 		debugf1("  Dram Memory Hoisting is DISABLED on this system\n");
450 		return 1;
451 	}
452 
453 	if ((pvt->dhar & DHAR_VALID) == 0) {
454 		debugf1("  Dram Memory Hoisting is DISABLED on this node %d\n",
455 			pvt->mc_node_id);
456 		return 1;
457 	}
458 
459 	/* This node has Memory Hoisting */
460 
461 	/* +------------------+--------------------+--------------------+-----
462 	 * | memory           | DRAM hole          | relocated          |
463 	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
464 	 * |                  |                    | DRAM hole          |
465 	 * |                  |                    | [0x100000000,      |
466 	 * |                  |                    |  (0x100000000+     |
467 	 * |                  |                    |   (0xffffffff-x))] |
468 	 * +------------------+--------------------+--------------------+-----
469 	 *
470 	 * Above is a diagram of physical memory showing the DRAM hole and the
471 	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
472 	 * starts at address x (the base address) and extends through address
473 	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
474 	 * addresses in the hole so that they start at 0x100000000.
475 	 */
476 
477 	base = dhar_base(pvt->dhar);
478 
479 	*hole_base = base;
480 	*hole_size = (0x1ull << 32) - base;
481 
482 	if (boot_cpu_data.x86 > 0xf)
483 		*hole_offset = f10_dhar_offset(pvt->dhar);
484 	else
485 		*hole_offset = k8_dhar_offset(pvt->dhar);
486 
487 	debugf1("  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
488 		pvt->mc_node_id, (unsigned long)*hole_base,
489 		(unsigned long)*hole_offset, (unsigned long)*hole_size);
490 
491 	return 0;
492 }
493 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
494 
495 /*
496  * Return the DramAddr that the SysAddr given by @sys_addr maps to.  It is
497  * assumed that sys_addr maps to the node given by mci.
498  *
499  * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
500  * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
501  * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
502  * then it is also involved in translating a SysAddr to a DramAddr. Sections
503  * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
504  * These parts of the documentation are unclear. I interpret them as follows:
505  *
506  * When node n receives a SysAddr, it processes the SysAddr as follows:
507  *
508  * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
509  *    Limit registers for node n. If the SysAddr is not within the range
510  *    specified by the base and limit values, then node n ignores the Sysaddr
511  *    (since it does not map to node n). Otherwise continue to step 2 below.
512  *
513  * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
514  *    disabled so skip to step 3 below. Otherwise see if the SysAddr is within
515  *    the range of relocated addresses (starting at 0x100000000) from the DRAM
516  *    hole. If not, skip to step 3 below. Else get the value of the
517  *    DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
518  *    offset defined by this value from the SysAddr.
519  *
520  * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
521  *    Base register for node n. To obtain the DramAddr, subtract the base
522  *    address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
523  */
524 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
525 {
526 	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
527 	int ret = 0;
528 
529 	dram_base = get_dram_base(mci);
530 
531 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
532 				      &hole_size);
533 	if (!ret) {
534 		if ((sys_addr >= (1ull << 32)) &&
535 		    (sys_addr < ((1ull << 32) + hole_size))) {
536 			/* use DHAR to translate SysAddr to DramAddr */
537 			dram_addr = sys_addr - hole_offset;
538 
539 			debugf2("using DHAR to translate SysAddr 0x%lx to "
540 				"DramAddr 0x%lx\n",
541 				(unsigned long)sys_addr,
542 				(unsigned long)dram_addr);
543 
544 			return dram_addr;
545 		}
546 	}
547 
548 	/*
549 	 * Translate the SysAddr to a DramAddr as shown near the start of
550 	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
551 	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
552 	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
553 	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
554 	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
555 	 * Programmer's Manual Volume 1 Application Programming.
556 	 */
557 	dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
558 
559 	debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
560 		"DramAddr 0x%lx\n", (unsigned long)sys_addr,
561 		(unsigned long)dram_addr);
562 	return dram_addr;
563 }
564 
565 /*
566  * @intlv_en is the value of the IntlvEn field from a DRAM Base register
567  * (section 3.4.4.1).  Return the number of bits from a SysAddr that are used
568  * for node interleaving.
569  */
570 static int num_node_interleave_bits(unsigned intlv_en)
571 {
572 	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
573 	int n;
574 
575 	BUG_ON(intlv_en > 7);
576 	n = intlv_shift_table[intlv_en];
577 	return n;
578 }
579 
580 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
581 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
582 {
583 	struct amd64_pvt *pvt;
584 	int intlv_shift;
585 	u64 input_addr;
586 
587 	pvt = mci->pvt_info;
588 
589 	/*
590 	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
591 	 * concerning translating a DramAddr to an InputAddr.
592 	 */
593 	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
594 	input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
595 	    (dram_addr & 0xfff);
596 
597 	debugf2("  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
598 		intlv_shift, (unsigned long)dram_addr,
599 		(unsigned long)input_addr);
600 
601 	return input_addr;
602 }
603 
604 /*
605  * Translate the SysAddr represented by @sys_addr to an InputAddr.  It is
606  * assumed that @sys_addr maps to the node given by mci.
607  */
608 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
609 {
610 	u64 input_addr;
611 
612 	input_addr =
613 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
614 
615 	debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
616 		(unsigned long)sys_addr, (unsigned long)input_addr);
617 
618 	return input_addr;
619 }
620 
621 
622 /*
623  * @input_addr is an InputAddr associated with the node represented by mci.
624  * Translate @input_addr to a DramAddr and return the result.
625  */
626 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
627 {
628 	struct amd64_pvt *pvt;
629 	int node_id, intlv_shift;
630 	u64 bits, dram_addr;
631 	u32 intlv_sel;
632 
633 	/*
634 	 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
635 	 * shows how to translate a DramAddr to an InputAddr. Here we reverse
636 	 * this procedure. When translating from a DramAddr to an InputAddr, the
637 	 * bits used for node interleaving are discarded.  Here we recover these
638 	 * bits from the IntlvSel field of the DRAM Limit register (section
639 	 * 3.4.4.2) for the node that input_addr is associated with.
640 	 */
641 	pvt = mci->pvt_info;
642 	node_id = pvt->mc_node_id;
643 	BUG_ON((node_id < 0) || (node_id > 7));
644 
645 	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
646 
647 	if (intlv_shift == 0) {
648 		debugf1("    InputAddr 0x%lx translates to DramAddr of "
649 			"same value\n",	(unsigned long)input_addr);
650 
651 		return input_addr;
652 	}
653 
654 	bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
655 	    (input_addr & 0xfff);
656 
657 	intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
658 	dram_addr = bits + (intlv_sel << 12);
659 
660 	debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
661 		"(%d node interleave bits)\n", (unsigned long)input_addr,
662 		(unsigned long)dram_addr, intlv_shift);
663 
664 	return dram_addr;
665 }
666 
667 /*
668  * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
669  * @dram_addr to a SysAddr.
670  */
671 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
672 {
673 	struct amd64_pvt *pvt = mci->pvt_info;
674 	u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
675 	int ret = 0;
676 
677 	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
678 				      &hole_size);
679 	if (!ret) {
680 		if ((dram_addr >= hole_base) &&
681 		    (dram_addr < (hole_base + hole_size))) {
682 			sys_addr = dram_addr + hole_offset;
683 
684 			debugf1("using DHAR to translate DramAddr 0x%lx to "
685 				"SysAddr 0x%lx\n", (unsigned long)dram_addr,
686 				(unsigned long)sys_addr);
687 
688 			return sys_addr;
689 		}
690 	}
691 
692 	amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
693 	sys_addr = dram_addr + base;
694 
695 	/*
696 	 * The sys_addr we have computed up to this point is a 40-bit value
697 	 * because the k8 deals with 40-bit values.  However, the value we are
698 	 * supposed to return is a full 64-bit physical address.  The AMD
699 	 * x86-64 architecture specifies that the most significant implemented
700 	 * address bit through bit 63 of a physical address must be either all
701 	 * 0s or all 1s.  Therefore we sign-extend the 40-bit sys_addr to a
702 	 * 64-bit value below.  See section 3.4.2 of AMD publication 24592:
703 	 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
704 	 * Programming.
705 	 */
706 	sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
707 
708 	debugf1("    Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
709 		pvt->mc_node_id, (unsigned long)dram_addr,
710 		(unsigned long)sys_addr);
711 
712 	return sys_addr;
713 }
714 
715 /*
716  * @input_addr is an InputAddr associated with the node given by mci. Translate
717  * @input_addr to a SysAddr.
718  */
719 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
720 					 u64 input_addr)
721 {
722 	return dram_addr_to_sys_addr(mci,
723 				     input_addr_to_dram_addr(mci, input_addr));
724 }
725 
726 /*
727  * Find the minimum and maximum InputAddr values that map to the given @csrow.
728  * Pass back these values in *input_addr_min and *input_addr_max.
729  */
730 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
731 			      u64 *input_addr_min, u64 *input_addr_max)
732 {
733 	struct amd64_pvt *pvt;
734 	u64 base, mask;
735 
736 	pvt = mci->pvt_info;
737 	BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
738 
739 	base = base_from_dct_base(pvt, csrow);
740 	mask = mask_from_dct_mask(pvt, csrow);
741 
742 	*input_addr_min = base & ~mask;
743 	*input_addr_max = base | mask | pvt->dcs_mask_notused;
744 }
745 
746 /*
747  * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
748  * Address High (section 3.6.4.6) register values and return the result. Address
749  * is located in the info structure (nbeah and nbeal), the encoding is device
750  * specific.
751  */
752 static u64 extract_error_address(struct mem_ctl_info *mci,
753 				 struct err_regs *info)
754 {
755 	struct amd64_pvt *pvt = mci->pvt_info;
756 
757 	return pvt->ops->get_error_address(mci, info);
758 }
759 
760 
761 /* Map the Error address to a PAGE and PAGE OFFSET. */
762 static inline void error_address_to_page_and_offset(u64 error_address,
763 						    u32 *page, u32 *offset)
764 {
765 	*page = (u32) (error_address >> PAGE_SHIFT);
766 	*offset = ((u32) error_address) & ~PAGE_MASK;
767 }
768 
769 /*
770  * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
771  * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
772  * of a node that detected an ECC memory error.  mci represents the node that
773  * the error address maps to (possibly different from the node that detected
774  * the error).  Return the number of the csrow that sys_addr maps to, or -1 on
775  * error.
776  */
777 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
778 {
779 	int csrow;
780 
781 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
782 
783 	if (csrow == -1)
784 		amd64_mc_printk(mci, KERN_ERR,
785 			     "Failed to translate InputAddr to csrow for "
786 			     "address 0x%lx\n", (unsigned long)sys_addr);
787 	return csrow;
788 }
789 
790 static int get_channel_from_ecc_syndrome(unsigned short syndrome);
791 
792 static void amd64_cpu_display_info(struct amd64_pvt *pvt)
793 {
794 	if (boot_cpu_data.x86 == 0x11)
795 		edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
796 	else if (boot_cpu_data.x86 == 0x10)
797 		edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
798 	else if (boot_cpu_data.x86 == 0xf)
799 		edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
800 			(pvt->ext_model >= OPTERON_CPU_REV_F) ?
801 			"Rev F or later" : "Rev E or earlier");
802 	else
803 		/* we'll hardly ever ever get here */
804 		edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
805 }
806 
807 /*
808  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
809  * are ECC capable.
810  */
811 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
812 {
813 	int bit;
814 	enum dev_type edac_cap = EDAC_FLAG_NONE;
815 
816 	bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
817 		? 19
818 		: 17;
819 
820 	if (pvt->dclr0 & BIT(bit))
821 		edac_cap = EDAC_FLAG_SECDED;
822 
823 	return edac_cap;
824 }
825 
826 
827 static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
828 					 int ganged);
829 
830 /* Display and decode various NB registers for debug purposes. */
831 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
832 {
833 	int ganged;
834 
835 	debugf1("  nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
836 		pvt->nbcap,
837 		(pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
838 		(pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
839 		(pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
840 	debugf1("    ECC Capable=%s   ChipKill Capable=%s\n",
841 		(pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
842 		(pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
843 	debugf1("  DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
844 		pvt->dclr0,
845 		(pvt->dclr0 & BIT(19)) ?  "Enabled" : "Disabled",
846 		(pvt->dclr0 & BIT(8)) ?  "Enabled" : "Disabled",
847 		(pvt->dclr0 & BIT(11)) ?  "128b" : "64b");
848 	debugf1("    DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s  DIMM Type=%s\n",
849 		(pvt->dclr0 & BIT(12)) ?  "Y" : "N",
850 		(pvt->dclr0 & BIT(13)) ?  "Y" : "N",
851 		(pvt->dclr0 & BIT(14)) ?  "Y" : "N",
852 		(pvt->dclr0 & BIT(15)) ?  "Y" : "N",
853 		(pvt->dclr0 & BIT(16)) ?  "UN-Buffered" : "Buffered");
854 
855 
856 	debugf1("  online-spare: 0x%8.08x\n", pvt->online_spare);
857 
858 	if (boot_cpu_data.x86 == 0xf) {
859 		debugf1("  dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
860 			pvt->dhar, dhar_base(pvt->dhar),
861 			k8_dhar_offset(pvt->dhar));
862 		debugf1("      DramHoleValid=%s\n",
863 			(pvt->dhar & DHAR_VALID) ?  "True" : "False");
864 
865 		debugf1("  dbam-dkt: 0x%8.08x\n", pvt->dbam0);
866 
867 		/* everything below this point is Fam10h and above */
868 		return;
869 
870 	} else {
871 		debugf1("  dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
872 			pvt->dhar, dhar_base(pvt->dhar),
873 			f10_dhar_offset(pvt->dhar));
874 		debugf1("    DramMemHoistValid=%s DramHoleValid=%s\n",
875 			(pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
876 			"True" : "False",
877 			(pvt->dhar & DHAR_VALID) ?
878 			"True" : "False");
879 	}
880 
881 	/* Only if NOT ganged does dcl1 have valid info */
882 	if (!dct_ganging_enabled(pvt)) {
883 		debugf1("  DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
884 			"Width=%s\n", pvt->dclr1,
885 			(pvt->dclr1 & BIT(19)) ?  "Enabled" : "Disabled",
886 			(pvt->dclr1 & BIT(8)) ?  "Enabled" : "Disabled",
887 			(pvt->dclr1 & BIT(11)) ?  "128b" : "64b");
888 		debugf1("    DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s  "
889 			"DIMM Type=%s\n",
890 			(pvt->dclr1 & BIT(12)) ?  "Y" : "N",
891 			(pvt->dclr1 & BIT(13)) ?  "Y" : "N",
892 			(pvt->dclr1 & BIT(14)) ?  "Y" : "N",
893 			(pvt->dclr1 & BIT(15)) ?  "Y" : "N",
894 			(pvt->dclr1 & BIT(16)) ?  "UN-Buffered" : "Buffered");
895 	}
896 
897 	/*
898 	 * Determine if ganged and then dump memory sizes for first controller,
899 	 * and if NOT ganged dump info for 2nd controller.
900 	 */
901 	ganged = dct_ganging_enabled(pvt);
902 
903 	f10_debug_display_dimm_sizes(0, pvt, ganged);
904 
905 	if (!ganged)
906 		f10_debug_display_dimm_sizes(1, pvt, ganged);
907 }
908 
909 /* Read in both of DBAM registers */
910 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
911 {
912 	int err = 0;
913 	unsigned int reg;
914 
915 	reg = DBAM0;
916 	err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
917 	if (err)
918 		goto err_reg;
919 
920 	if (boot_cpu_data.x86 >= 0x10) {
921 		reg = DBAM1;
922 		err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
923 
924 		if (err)
925 			goto err_reg;
926 	}
927 
928 	return;
929 
930 err_reg:
931 	debugf0("Error reading F2x%03x.\n", reg);
932 }
933 
934 /*
935  * NOTE: CPU Revision Dependent code: Rev E and Rev F
936  *
937  * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
938  * set the shift factor for the DCSB and DCSM values.
939  *
940  * ->dcs_mask_notused, RevE:
941  *
942  * To find the max InputAddr for the csrow, start with the base address and set
943  * all bits that are "don't care" bits in the test at the start of section
944  * 3.5.4 (p. 84).
945  *
946  * The "don't care" bits are all set bits in the mask and all bits in the gaps
947  * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
948  * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
949  * gaps.
950  *
951  * ->dcs_mask_notused, RevF and later:
952  *
953  * To find the max InputAddr for the csrow, start with the base address and set
954  * all bits that are "don't care" bits in the test at the start of NPT section
955  * 4.5.4 (p. 87).
956  *
957  * The "don't care" bits are all set bits in the mask and all bits in the gaps
958  * between bit ranges [36:27] and [21:13].
959  *
960  * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
961  * which are all bits in the above-mentioned gaps.
962  */
963 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
964 {
965 	if (pvt->ext_model >= OPTERON_CPU_REV_F) {
966 		pvt->dcsb_base		= REV_F_F1Xh_DCSB_BASE_BITS;
967 		pvt->dcsm_mask		= REV_F_F1Xh_DCSM_MASK_BITS;
968 		pvt->dcs_mask_notused	= REV_F_F1Xh_DCS_NOTUSED_BITS;
969 		pvt->dcs_shift		= REV_F_F1Xh_DCS_SHIFT;
970 
971 		switch (boot_cpu_data.x86) {
972 		case 0xf:
973 			pvt->num_dcsm = REV_F_DCSM_COUNT;
974 			break;
975 
976 		case 0x10:
977 			pvt->num_dcsm = F10_DCSM_COUNT;
978 			break;
979 
980 		case 0x11:
981 			pvt->num_dcsm = F11_DCSM_COUNT;
982 			break;
983 
984 		default:
985 			amd64_printk(KERN_ERR, "Unsupported family!\n");
986 			break;
987 		}
988 	} else {
989 		pvt->dcsb_base		= REV_E_DCSB_BASE_BITS;
990 		pvt->dcsm_mask		= REV_E_DCSM_MASK_BITS;
991 		pvt->dcs_mask_notused	= REV_E_DCS_NOTUSED_BITS;
992 		pvt->dcs_shift		= REV_E_DCS_SHIFT;
993 		pvt->num_dcsm		= REV_E_DCSM_COUNT;
994 	}
995 }
996 
997 /*
998  * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
999  */
1000 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
1001 {
1002 	int cs, reg, err = 0;
1003 
1004 	amd64_set_dct_base_and_mask(pvt);
1005 
1006 	for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
1007 		reg = K8_DCSB0 + (cs * 4);
1008 		err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1009 						&pvt->dcsb0[cs]);
1010 		if (unlikely(err))
1011 			debugf0("Reading K8_DCSB0[%d] failed\n", cs);
1012 		else
1013 			debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
1014 				cs, pvt->dcsb0[cs], reg);
1015 
1016 		/* If DCT are NOT ganged, then read in DCT1's base */
1017 		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1018 			reg = F10_DCSB1 + (cs * 4);
1019 			err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1020 							&pvt->dcsb1[cs]);
1021 			if (unlikely(err))
1022 				debugf0("Reading F10_DCSB1[%d] failed\n", cs);
1023 			else
1024 				debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
1025 					cs, pvt->dcsb1[cs], reg);
1026 		} else {
1027 			pvt->dcsb1[cs] = 0;
1028 		}
1029 	}
1030 
1031 	for (cs = 0; cs < pvt->num_dcsm; cs++) {
1032 		reg = K8_DCSM0 + (cs * 4);
1033 		err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1034 					&pvt->dcsm0[cs]);
1035 		if (unlikely(err))
1036 			debugf0("Reading K8_DCSM0 failed\n");
1037 		else
1038 			debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
1039 				cs, pvt->dcsm0[cs], reg);
1040 
1041 		/* If DCT are NOT ganged, then read in DCT1's mask */
1042 		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1043 			reg = F10_DCSM1 + (cs * 4);
1044 			err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1045 					&pvt->dcsm1[cs]);
1046 			if (unlikely(err))
1047 				debugf0("Reading F10_DCSM1[%d] failed\n", cs);
1048 			else
1049 				debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
1050 					cs, pvt->dcsm1[cs], reg);
1051 		} else
1052 			pvt->dcsm1[cs] = 0;
1053 	}
1054 }
1055 
1056 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1057 {
1058 	enum mem_type type;
1059 
1060 	if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
1061 		/* Rev F and later */
1062 		type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1063 	} else {
1064 		/* Rev E and earlier */
1065 		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1066 	}
1067 
1068 	debugf1("  Memory type is: %s\n",
1069 		(type == MEM_DDR2) ? "MEM_DDR2" :
1070 		(type == MEM_RDDR2) ? "MEM_RDDR2" :
1071 		(type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
1072 
1073 	return type;
1074 }
1075 
1076 /*
1077  * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1078  * and the later RevF memory controllers (DDR vs DDR2)
1079  *
1080  * Return:
1081  *      number of memory channels in operation
1082  * Pass back:
1083  *      contents of the DCL0_LOW register
1084  */
1085 static int k8_early_channel_count(struct amd64_pvt *pvt)
1086 {
1087 	int flag, err = 0;
1088 
1089 	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1090 	if (err)
1091 		return err;
1092 
1093 	if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
1094 		/* RevF (NPT) and later */
1095 		flag = pvt->dclr0 & F10_WIDTH_128;
1096 	} else {
1097 		/* RevE and earlier */
1098 		flag = pvt->dclr0 & REVE_WIDTH_128;
1099 	}
1100 
1101 	/* not used */
1102 	pvt->dclr1 = 0;
1103 
1104 	return (flag) ? 2 : 1;
1105 }
1106 
1107 /* extract the ERROR ADDRESS for the K8 CPUs */
1108 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1109 				struct err_regs *info)
1110 {
1111 	return (((u64) (info->nbeah & 0xff)) << 32) +
1112 			(info->nbeal & ~0x03);
1113 }
1114 
1115 /*
1116  * Read the Base and Limit registers for K8 based Memory controllers; extract
1117  * fields from the 'raw' reg into separate data fields
1118  *
1119  * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1120  */
1121 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1122 {
1123 	u32 low;
1124 	u32 off = dram << 3;	/* 8 bytes between DRAM entries */
1125 	int err;
1126 
1127 	err = pci_read_config_dword(pvt->addr_f1_ctl,
1128 				    K8_DRAM_BASE_LOW + off, &low);
1129 	if (err)
1130 		debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1131 
1132 	/* Extract parts into separate data entries */
1133 	pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1134 	pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1135 	pvt->dram_rw_en[dram] = (low & 0x3);
1136 
1137 	err = pci_read_config_dword(pvt->addr_f1_ctl,
1138 				    K8_DRAM_LIMIT_LOW + off, &low);
1139 	if (err)
1140 		debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
1141 
1142 	/*
1143 	 * Extract parts into separate data entries. Limit is the HIGHEST memory
1144 	 * location of the region, so lower 24 bits need to be all ones
1145 	 */
1146 	pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1147 	pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1148 	pvt->dram_DstNode[dram] = (low & 0x7);
1149 }
1150 
1151 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1152 					struct err_regs *info,
1153 					u64 SystemAddress)
1154 {
1155 	struct mem_ctl_info *src_mci;
1156 	unsigned short syndrome;
1157 	int channel, csrow;
1158 	u32 page, offset;
1159 
1160 	/* Extract the syndrome parts and form a 16-bit syndrome */
1161 	syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
1162 	syndrome |= LOW_SYNDROME(info->nbsh);
1163 
1164 	/* CHIPKILL enabled */
1165 	if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1166 		channel = get_channel_from_ecc_syndrome(syndrome);
1167 		if (channel < 0) {
1168 			/*
1169 			 * Syndrome didn't map, so we don't know which of the
1170 			 * 2 DIMMs is in error. So we need to ID 'both' of them
1171 			 * as suspect.
1172 			 */
1173 			amd64_mc_printk(mci, KERN_WARNING,
1174 				       "unknown syndrome 0x%x - possible error "
1175 				       "reporting race\n", syndrome);
1176 			edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1177 			return;
1178 		}
1179 	} else {
1180 		/*
1181 		 * non-chipkill ecc mode
1182 		 *
1183 		 * The k8 documentation is unclear about how to determine the
1184 		 * channel number when using non-chipkill memory.  This method
1185 		 * was obtained from email communication with someone at AMD.
1186 		 * (Wish the email was placed in this comment - norsk)
1187 		 */
1188 		channel = ((SystemAddress & BIT(3)) != 0);
1189 	}
1190 
1191 	/*
1192 	 * Find out which node the error address belongs to. This may be
1193 	 * different from the node that detected the error.
1194 	 */
1195 	src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1196 	if (src_mci) {
1197 		amd64_mc_printk(mci, KERN_ERR,
1198 			     "failed to map error address 0x%lx to a node\n",
1199 			     (unsigned long)SystemAddress);
1200 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1201 		return;
1202 	}
1203 
1204 	/* Now map the SystemAddress to a CSROW */
1205 	csrow = sys_addr_to_csrow(src_mci, SystemAddress);
1206 	if (csrow < 0) {
1207 		edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1208 	} else {
1209 		error_address_to_page_and_offset(SystemAddress, &page, &offset);
1210 
1211 		edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1212 				  channel, EDAC_MOD_STR);
1213 	}
1214 }
1215 
1216 /*
1217  * determrine the number of PAGES in for this DIMM's size based on its DRAM
1218  * Address Mapping.
1219  *
1220  * First step is to calc the number of bits to shift a value of 1 left to
1221  * indicate show many pages. Start with the DBAM value as the starting bits,
1222  * then proceed to adjust those shift bits, based on CPU rev and the table.
1223  * See BKDG on the DBAM
1224  */
1225 static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1226 {
1227 	int nr_pages;
1228 
1229 	if (pvt->ext_model >= OPTERON_CPU_REV_F) {
1230 		nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1231 	} else {
1232 		/*
1233 		 * RevE and less section; this line is tricky. It collapses the
1234 		 * table used by RevD and later to one that matches revisions CG
1235 		 * and earlier.
1236 		 */
1237 		dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
1238 				(dram_map > 8 ? 4 : (dram_map > 5 ?
1239 				3 : (dram_map > 2 ? 1 : 0))) : 0;
1240 
1241 		/* 25 shift is 32MiB minimum DIMM size in RevE and prior */
1242 		nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
1243 	}
1244 
1245 	return nr_pages;
1246 }
1247 
1248 /*
1249  * Get the number of DCT channels in use.
1250  *
1251  * Return:
1252  *	number of Memory Channels in operation
1253  * Pass back:
1254  *	contents of the DCL0_LOW register
1255  */
1256 static int f10_early_channel_count(struct amd64_pvt *pvt)
1257 {
1258 	int err = 0, channels = 0;
1259 	u32 dbam;
1260 
1261 	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1262 	if (err)
1263 		goto err_reg;
1264 
1265 	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
1266 	if (err)
1267 		goto err_reg;
1268 
1269 	/* If we are in 128 bit mode, then we are using 2 channels */
1270 	if (pvt->dclr0 & F10_WIDTH_128) {
1271 		debugf0("Data WIDTH is 128 bits - 2 channels\n");
1272 		channels = 2;
1273 		return channels;
1274 	}
1275 
1276 	/*
1277 	 * Need to check if in UN-ganged mode: In such, there are 2 channels,
1278 	 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
1279 	 * will be OFF.
1280 	 *
1281 	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1282 	 * their CSEnable bit on. If so, then SINGLE DIMM case.
1283 	 */
1284 	debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
1285 
1286 	/*
1287 	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1288 	 * is more than just one DIMM present in unganged mode. Need to check
1289 	 * both controllers since DIMMs can be placed in either one.
1290 	 */
1291 	channels = 0;
1292 	err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM0, &dbam);
1293 	if (err)
1294 		goto err_reg;
1295 
1296 	if (DBAM_DIMM(0, dbam) > 0)
1297 		channels++;
1298 	if (DBAM_DIMM(1, dbam) > 0)
1299 		channels++;
1300 	if (DBAM_DIMM(2, dbam) > 0)
1301 		channels++;
1302 	if (DBAM_DIMM(3, dbam) > 0)
1303 		channels++;
1304 
1305 	/* If more than 2 DIMMs are present, then we have 2 channels */
1306 	if (channels > 2)
1307 		channels = 2;
1308 	else if (channels == 0) {
1309 		/* No DIMMs on DCT0, so look at DCT1 */
1310 		err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM1, &dbam);
1311 		if (err)
1312 			goto err_reg;
1313 
1314 		if (DBAM_DIMM(0, dbam) > 0)
1315 			channels++;
1316 		if (DBAM_DIMM(1, dbam) > 0)
1317 			channels++;
1318 		if (DBAM_DIMM(2, dbam) > 0)
1319 			channels++;
1320 		if (DBAM_DIMM(3, dbam) > 0)
1321 			channels++;
1322 
1323 		if (channels > 2)
1324 			channels = 2;
1325 	}
1326 
1327 	/* If we found ALL 0 values, then assume just ONE DIMM-ONE Channel */
1328 	if (channels == 0)
1329 		channels = 1;
1330 
1331 	debugf0("MCT channel count: %d\n", channels);
1332 
1333 	return channels;
1334 
1335 err_reg:
1336 	return -1;
1337 
1338 }
1339 
1340 static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1341 {
1342 	return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1343 }
1344 
1345 /* Enable extended configuration access via 0xCF8 feature */
1346 static void amd64_setup(struct amd64_pvt *pvt)
1347 {
1348 	u32 reg;
1349 
1350 	pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1351 
1352 	pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1353 	reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1354 	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1355 }
1356 
1357 /* Restore the extended configuration access via 0xCF8 feature */
1358 static void amd64_teardown(struct amd64_pvt *pvt)
1359 {
1360 	u32 reg;
1361 
1362 	pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1363 
1364 	reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1365 	if (pvt->flags.cf8_extcfg)
1366 		reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1367 	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1368 }
1369 
1370 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1371 			struct err_regs *info)
1372 {
1373 	return (((u64) (info->nbeah & 0xffff)) << 32) +
1374 			(info->nbeal & ~0x01);
1375 }
1376 
1377 /*
1378  * Read the Base and Limit registers for F10 based Memory controllers. Extract
1379  * fields from the 'raw' reg into separate data fields.
1380  *
1381  * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1382  */
1383 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1384 {
1385 	u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1386 
1387 	low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1388 	high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1389 
1390 	/* read the 'raw' DRAM BASE Address register */
1391 	pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
1392 
1393 	/* Read from the ECS data register */
1394 	pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
1395 
1396 	/* Extract parts into separate data entries */
1397 	pvt->dram_rw_en[dram] = (low_base & 0x3);
1398 
1399 	if (pvt->dram_rw_en[dram] == 0)
1400 		return;
1401 
1402 	pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1403 
1404 	pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
1405 				((u64) low_base & 0xFFFF0000))) << 8;
1406 
1407 	low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1408 	high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1409 
1410 	/* read the 'raw' LIMIT registers */
1411 	pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
1412 
1413 	/* Read from the ECS data register for the HIGH portion */
1414 	pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
1415 
1416 	debugf0("  HW Regs: BASE=0x%08x-%08x      LIMIT=  0x%08x-%08x\n",
1417 		high_base, low_base, high_limit, low_limit);
1418 
1419 	pvt->dram_DstNode[dram] = (low_limit & 0x7);
1420 	pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1421 
1422 	/*
1423 	 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1424 	 * memory location of the region, so low 24 bits need to be all ones.
1425 	 */
1426 	low_limit |= 0x0000FFFF;
1427 	pvt->dram_limit[dram] =
1428 		((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
1429 }
1430 
1431 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1432 {
1433 	int err = 0;
1434 
1435 	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1436 				    &pvt->dram_ctl_select_low);
1437 	if (err) {
1438 		debugf0("Reading F10_DCTL_SEL_LOW failed\n");
1439 	} else {
1440 		debugf0("DRAM_DCTL_SEL_LOW=0x%x  DctSelBaseAddr=0x%x\n",
1441 			pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
1442 
1443 		debugf0("  DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
1444 				"sel-hi-range=%s\n",
1445 			(dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
1446 			(dct_dram_enabled(pvt) ? "Enabled"   : "Disabled"),
1447 			(dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
1448 
1449 		debugf0("  DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
1450 			(dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
1451 			(dct_memory_cleared(pvt) ? "True " : "False "),
1452 			dct_sel_interleave_addr(pvt));
1453 	}
1454 
1455 	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1456 				    &pvt->dram_ctl_select_high);
1457 	if (err)
1458 		debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
1459 }
1460 
1461 /*
1462  * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1463  * Interleaving Modes.
1464  */
1465 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1466 				int hi_range_sel, u32 intlv_en)
1467 {
1468 	u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1469 
1470 	if (dct_ganging_enabled(pvt))
1471 		cs = 0;
1472 	else if (hi_range_sel)
1473 		cs = dct_sel_high;
1474 	else if (dct_interleave_enabled(pvt)) {
1475 		/*
1476 		 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1477 		 */
1478 		if (dct_sel_interleave_addr(pvt) == 0)
1479 			cs = sys_addr >> 6 & 1;
1480 		else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1481 			temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1482 
1483 			if (dct_sel_interleave_addr(pvt) & 1)
1484 				cs = (sys_addr >> 9 & 1) ^ temp;
1485 			else
1486 				cs = (sys_addr >> 6 & 1) ^ temp;
1487 		} else if (intlv_en & 4)
1488 			cs = sys_addr >> 15 & 1;
1489 		else if (intlv_en & 2)
1490 			cs = sys_addr >> 14 & 1;
1491 		else if (intlv_en & 1)
1492 			cs = sys_addr >> 13 & 1;
1493 		else
1494 			cs = sys_addr >> 12 & 1;
1495 	} else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1496 		cs = ~dct_sel_high & 1;
1497 	else
1498 		cs = 0;
1499 
1500 	return cs;
1501 }
1502 
1503 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1504 {
1505 	if (intlv_en == 1)
1506 		return 1;
1507 	else if (intlv_en == 3)
1508 		return 2;
1509 	else if (intlv_en == 7)
1510 		return 3;
1511 
1512 	return 0;
1513 }
1514 
1515 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1516 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1517 						 u32 dct_sel_base_addr,
1518 						 u64 dct_sel_base_off,
1519 						 u32 hole_valid, u32 hole_off,
1520 						 u64 dram_base)
1521 {
1522 	u64 chan_off;
1523 
1524 	if (hi_range_sel) {
1525 		if (!(dct_sel_base_addr & 0xFFFFF800) &&
1526 		   hole_valid && (sys_addr >= 0x100000000ULL))
1527 			chan_off = hole_off << 16;
1528 		else
1529 			chan_off = dct_sel_base_off;
1530 	} else {
1531 		if (hole_valid && (sys_addr >= 0x100000000ULL))
1532 			chan_off = hole_off << 16;
1533 		else
1534 			chan_off = dram_base & 0xFFFFF8000000ULL;
1535 	}
1536 
1537 	return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1538 			(chan_off & 0x0000FFFFFF800000ULL);
1539 }
1540 
1541 /* Hack for the time being - Can we get this from BIOS?? */
1542 #define	CH0SPARE_RANK	0
1543 #define	CH1SPARE_RANK	1
1544 
1545 /*
1546  * checks if the csrow passed in is marked as SPARED, if so returns the new
1547  * spare row
1548  */
1549 static inline int f10_process_possible_spare(int csrow,
1550 				u32 cs, struct amd64_pvt *pvt)
1551 {
1552 	u32 swap_done;
1553 	u32 bad_dram_cs;
1554 
1555 	/* Depending on channel, isolate respective SPARING info */
1556 	if (cs) {
1557 		swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1558 		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1559 		if (swap_done && (csrow == bad_dram_cs))
1560 			csrow = CH1SPARE_RANK;
1561 	} else {
1562 		swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1563 		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1564 		if (swap_done && (csrow == bad_dram_cs))
1565 			csrow = CH0SPARE_RANK;
1566 	}
1567 	return csrow;
1568 }
1569 
1570 /*
1571  * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1572  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1573  *
1574  * Return:
1575  *	-EINVAL:  NOT FOUND
1576  *	0..csrow = Chip-Select Row
1577  */
1578 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1579 {
1580 	struct mem_ctl_info *mci;
1581 	struct amd64_pvt *pvt;
1582 	u32 cs_base, cs_mask;
1583 	int cs_found = -EINVAL;
1584 	int csrow;
1585 
1586 	mci = mci_lookup[nid];
1587 	if (!mci)
1588 		return cs_found;
1589 
1590 	pvt = mci->pvt_info;
1591 
1592 	debugf1("InputAddr=0x%x  channelselect=%d\n", in_addr, cs);
1593 
1594 	for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
1595 
1596 		cs_base = amd64_get_dct_base(pvt, cs, csrow);
1597 		if (!(cs_base & K8_DCSB_CS_ENABLE))
1598 			continue;
1599 
1600 		/*
1601 		 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1602 		 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1603 		 * of the actual address.
1604 		 */
1605 		cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1606 
1607 		/*
1608 		 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1609 		 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1610 		 */
1611 		cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1612 
1613 		debugf1("    CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1614 				csrow, cs_base, cs_mask);
1615 
1616 		cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1617 
1618 		debugf1("              Final CSMask=0x%x\n", cs_mask);
1619 		debugf1("    (InputAddr & ~CSMask)=0x%x "
1620 				"(CSBase & ~CSMask)=0x%x\n",
1621 				(in_addr & ~cs_mask), (cs_base & ~cs_mask));
1622 
1623 		if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1624 			cs_found = f10_process_possible_spare(csrow, cs, pvt);
1625 
1626 			debugf1(" MATCH csrow=%d\n", cs_found);
1627 			break;
1628 		}
1629 	}
1630 	return cs_found;
1631 }
1632 
1633 /* For a given @dram_range, check if @sys_addr falls within it. */
1634 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1635 				  u64 sys_addr, int *nid, int *chan_sel)
1636 {
1637 	int node_id, cs_found = -EINVAL, high_range = 0;
1638 	u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1639 	u32 hole_valid, tmp, dct_sel_base, channel;
1640 	u64 dram_base, chan_addr, dct_sel_base_off;
1641 
1642 	dram_base = pvt->dram_base[dram_range];
1643 	intlv_en = pvt->dram_IntlvEn[dram_range];
1644 
1645 	node_id = pvt->dram_DstNode[dram_range];
1646 	intlv_sel = pvt->dram_IntlvSel[dram_range];
1647 
1648 	debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1649 		dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1650 
1651 	/*
1652 	 * This assumes that one node's DHAR is the same as all the other
1653 	 * nodes' DHAR.
1654 	 */
1655 	hole_off = (pvt->dhar & 0x0000FF80);
1656 	hole_valid = (pvt->dhar & 0x1);
1657 	dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1658 
1659 	debugf1("   HoleOffset=0x%x  HoleValid=0x%x IntlvSel=0x%x\n",
1660 			hole_off, hole_valid, intlv_sel);
1661 
1662 	if (intlv_en ||
1663 	    (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1664 		return -EINVAL;
1665 
1666 	dct_sel_base = dct_sel_baseaddr(pvt);
1667 
1668 	/*
1669 	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1670 	 * select between DCT0 and DCT1.
1671 	 */
1672 	if (dct_high_range_enabled(pvt) &&
1673 	   !dct_ganging_enabled(pvt) &&
1674 	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1675 		high_range = 1;
1676 
1677 	channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1678 
1679 	chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1680 					     dct_sel_base_off, hole_valid,
1681 					     hole_off, dram_base);
1682 
1683 	intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1684 
1685 	/* remove Node ID (in case of memory interleaving) */
1686 	tmp = chan_addr & 0xFC0;
1687 
1688 	chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1689 
1690 	/* remove channel interleave and hash */
1691 	if (dct_interleave_enabled(pvt) &&
1692 	   !dct_high_range_enabled(pvt) &&
1693 	   !dct_ganging_enabled(pvt)) {
1694 		if (dct_sel_interleave_addr(pvt) != 1)
1695 			chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1696 		else {
1697 			tmp = chan_addr & 0xFC0;
1698 			chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1699 					| tmp;
1700 		}
1701 	}
1702 
1703 	debugf1("   (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1704 		chan_addr, (u32)(chan_addr >> 8));
1705 
1706 	cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1707 
1708 	if (cs_found >= 0) {
1709 		*nid = node_id;
1710 		*chan_sel = channel;
1711 	}
1712 	return cs_found;
1713 }
1714 
1715 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1716 				       int *node, int *chan_sel)
1717 {
1718 	int dram_range, cs_found = -EINVAL;
1719 	u64 dram_base, dram_limit;
1720 
1721 	for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1722 
1723 		if (!pvt->dram_rw_en[dram_range])
1724 			continue;
1725 
1726 		dram_base = pvt->dram_base[dram_range];
1727 		dram_limit = pvt->dram_limit[dram_range];
1728 
1729 		if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1730 
1731 			cs_found = f10_match_to_this_node(pvt, dram_range,
1732 							  sys_addr, node,
1733 							  chan_sel);
1734 			if (cs_found >= 0)
1735 				break;
1736 		}
1737 	}
1738 	return cs_found;
1739 }
1740 
1741 /*
1742  * This the F10h reference code from AMD to map a @sys_addr to NodeID,
1743  * CSROW, Channel.
1744  *
1745  * The @sys_addr is usually an error address received from the hardware.
1746  */
1747 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1748 				     struct err_regs *info,
1749 				     u64 sys_addr)
1750 {
1751 	struct amd64_pvt *pvt = mci->pvt_info;
1752 	u32 page, offset;
1753 	unsigned short syndrome;
1754 	int nid, csrow, chan = 0;
1755 
1756 	csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1757 
1758 	if (csrow >= 0) {
1759 		error_address_to_page_and_offset(sys_addr, &page, &offset);
1760 
1761 		syndrome  = HIGH_SYNDROME(info->nbsl) << 8;
1762 		syndrome |= LOW_SYNDROME(info->nbsh);
1763 
1764 		/*
1765 		 * Is CHIPKILL on? If so, then we can attempt to use the
1766 		 * syndrome to isolate which channel the error was on.
1767 		 */
1768 		if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
1769 			chan = get_channel_from_ecc_syndrome(syndrome);
1770 
1771 		if (chan >= 0) {
1772 			edac_mc_handle_ce(mci, page, offset, syndrome,
1773 					csrow, chan, EDAC_MOD_STR);
1774 		} else {
1775 			/*
1776 			 * Channel unknown, report all channels on this
1777 			 * CSROW as failed.
1778 			 */
1779 			for (chan = 0; chan < mci->csrows[csrow].nr_channels;
1780 								chan++) {
1781 					edac_mc_handle_ce(mci, page, offset,
1782 							syndrome,
1783 							csrow, chan,
1784 							EDAC_MOD_STR);
1785 			}
1786 		}
1787 
1788 	} else {
1789 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1790 	}
1791 }
1792 
1793 /*
1794  * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
1795  * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
1796  * indicates an empty DIMM slot, as reported by Hardware on empty slots.
1797  *
1798  * Normalize to 128MB by subracting 27 bit shift.
1799  */
1800 static int map_dbam_to_csrow_size(int index)
1801 {
1802 	int mega_bytes = 0;
1803 
1804 	if (index > 0 && index <= DBAM_MAX_VALUE)
1805 		mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
1806 
1807 	return mega_bytes;
1808 }
1809 
1810 /*
1811  * debug routine to display the memory sizes of a DIMM (ganged or not) and it
1812  * CSROWs as well
1813  */
1814 static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
1815 					 int ganged)
1816 {
1817 	int dimm, size0, size1;
1818 	u32 dbam;
1819 	u32 *dcsb;
1820 
1821 	debugf1("  dbam%d: 0x%8.08x  CSROW is %s\n", ctrl,
1822 			ctrl ? pvt->dbam1 : pvt->dbam0,
1823 			ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
1824 
1825 	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1826 	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1827 
1828 	/* Dump memory sizes for DIMM and its CSROWs */
1829 	for (dimm = 0; dimm < 4; dimm++) {
1830 
1831 		size0 = 0;
1832 		if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1833 			size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1834 
1835 		size1 = 0;
1836 		if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1837 			size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1838 
1839 		debugf1("     CTRL-%d DIMM-%d=%5dMB   CSROW-%d=%5dMB "
1840 				"CSROW-%d=%5dMB\n",
1841 				ctrl,
1842 				dimm,
1843 				size0 + size1,
1844 				dimm * 2,
1845 				size0,
1846 				dimm * 2 + 1,
1847 				size1);
1848 	}
1849 }
1850 
1851 /*
1852  * Very early hardware probe on pci_probe thread to determine if this module
1853  * supports the hardware.
1854  *
1855  * Return:
1856  *      0 for OK
1857  *      1 for error
1858  */
1859 static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
1860 {
1861 	int ret = 0;
1862 
1863 	/*
1864 	 * If we are on a DDR3 machine, we don't know yet if
1865 	 * we support that properly at this time
1866 	 */
1867 	if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
1868 	    (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
1869 
1870 		amd64_printk(KERN_WARNING,
1871 			"%s() This machine is running with DDR3 memory. "
1872 			"This is not currently supported. "
1873 			"DCHR0=0x%x DCHR1=0x%x\n",
1874 			__func__, pvt->dchr0, pvt->dchr1);
1875 
1876 		amd64_printk(KERN_WARNING,
1877 			"   Contact '%s' module MAINTAINER to help add"
1878 			" support.\n",
1879 			EDAC_MOD_STR);
1880 
1881 		ret = 1;
1882 
1883 	}
1884 	return ret;
1885 }
1886 
1887 /*
1888  * There currently are 3 types type of MC devices for AMD Athlon/Opterons
1889  * (as per PCI DEVICE_IDs):
1890  *
1891  * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
1892  * DEVICE ID, even though there is differences between the different Revisions
1893  * (CG,D,E,F).
1894  *
1895  * Family F10h and F11h.
1896  *
1897  */
1898 static struct amd64_family_type amd64_family_types[] = {
1899 	[K8_CPUS] = {
1900 		.ctl_name = "RevF",
1901 		.addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1902 		.misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1903 		.ops = {
1904 			.early_channel_count = k8_early_channel_count,
1905 			.get_error_address = k8_get_error_address,
1906 			.read_dram_base_limit = k8_read_dram_base_limit,
1907 			.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1908 			.dbam_map_to_pages = k8_dbam_map_to_pages,
1909 		}
1910 	},
1911 	[F10_CPUS] = {
1912 		.ctl_name = "Family 10h",
1913 		.addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1914 		.misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1915 		.ops = {
1916 			.probe_valid_hardware = f10_probe_valid_hardware,
1917 			.early_channel_count = f10_early_channel_count,
1918 			.get_error_address = f10_get_error_address,
1919 			.read_dram_base_limit = f10_read_dram_base_limit,
1920 			.read_dram_ctl_register = f10_read_dram_ctl_register,
1921 			.map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1922 			.dbam_map_to_pages = f10_dbam_map_to_pages,
1923 		}
1924 	},
1925 	[F11_CPUS] = {
1926 		.ctl_name = "Family 11h",
1927 		.addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1928 		.misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1929 		.ops = {
1930 			.probe_valid_hardware = f10_probe_valid_hardware,
1931 			.early_channel_count = f10_early_channel_count,
1932 			.get_error_address = f10_get_error_address,
1933 			.read_dram_base_limit = f10_read_dram_base_limit,
1934 			.read_dram_ctl_register = f10_read_dram_ctl_register,
1935 			.map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1936 			.dbam_map_to_pages = f10_dbam_map_to_pages,
1937 		}
1938 	},
1939 };
1940 
1941 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1942 						unsigned int device,
1943 						struct pci_dev *related)
1944 {
1945 	struct pci_dev *dev = NULL;
1946 
1947 	dev = pci_get_device(vendor, device, dev);
1948 	while (dev) {
1949 		if ((dev->bus->number == related->bus->number) &&
1950 		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1951 			break;
1952 		dev = pci_get_device(vendor, device, dev);
1953 	}
1954 
1955 	return dev;
1956 }
1957 
1958 /*
1959  * syndrome mapping table for ECC ChipKill devices
1960  *
1961  * The comment in each row is the token (nibble) number that is in error.
1962  * The least significant nibble of the syndrome is the mask for the bits
1963  * that are in error (need to be toggled) for the particular nibble.
1964  *
1965  * Each row contains 16 entries.
1966  * The first entry (0th) is the channel number for that row of syndromes.
1967  * The remaining 15 entries are the syndromes for the respective Error
1968  * bit mask index.
1969  *
1970  * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
1971  * bit in error.
1972  * The 2nd index entry is 0x0010 that the second bit is damaged.
1973  * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
1974  * are damaged.
1975  * Thus so on until index 15, 0x1111, whose entry has the syndrome
1976  * indicating that all 4 bits are damaged.
1977  *
1978  * A search is performed on this table looking for a given syndrome.
1979  *
1980  * See the AMD documentation for ECC syndromes. This ECC table is valid
1981  * across all the versions of the AMD64 processors.
1982  *
1983  * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
1984  * COLUMN index, then search all ROWS of that column, looking for a match
1985  * with the input syndrome. The ROW value will be the token number.
1986  *
1987  * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
1988  * error.
1989  */
1990 #define NUMBER_ECC_ROWS  36
1991 static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
1992 	/* Channel 0 syndromes */
1993 	{/*0*/  0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
1994 	   0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
1995 	{/*1*/  0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
1996 	   0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
1997 	{/*2*/  0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
1998 	   0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
1999 	{/*3*/  0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
2000 	   0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
2001 	{/*4*/  0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
2002 	   0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
2003 	{/*5*/  0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
2004 	   0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
2005 	{/*6*/  0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
2006 	   0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
2007 	{/*7*/  0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
2008 	   0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
2009 	{/*8*/  0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
2010 	   0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
2011 	{/*9*/  0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
2012 	   0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
2013 	{/*a*/  0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
2014 	   0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
2015 	{/*b*/  0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
2016 	   0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
2017 	{/*c*/  0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
2018 	   0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
2019 	{/*d*/  0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
2020 	   0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
2021 	{/*e*/  0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
2022 	   0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
2023 	{/*f*/  0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
2024 	   0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
2025 
2026 	/* Channel 1 syndromes */
2027 	{/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
2028 	   0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
2029 	{/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
2030 	   0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
2031 	{/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
2032 	   0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
2033 	{/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
2034 	   0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
2035 	{/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
2036 	   0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
2037 	{/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
2038 	   0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
2039 	{/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
2040 	   0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
2041 	{/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
2042 	   0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
2043 	{/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
2044 	   0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
2045 	{/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
2046 	   0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
2047 	{/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
2048 	   0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
2049 	{/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
2050 	   0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
2051 	{/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
2052 	   0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
2053 	{/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
2054 	   0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
2055 	{/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
2056 	   0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
2057 	{/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
2058 	   0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
2059 
2060 	/* ECC bits are also in the set of tokens and they too can go bad
2061 	 * first 2 cover channel 0, while the second 2 cover channel 1
2062 	 */
2063 	{/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
2064 	   0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
2065 	{/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
2066 	   0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
2067 	{/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
2068 	   0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
2069 	{/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
2070 	   0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
2071 };
2072 
2073 /*
2074  * Given the syndrome argument, scan each of the channel tables for a syndrome
2075  * match. Depending on which table it is found, return the channel number.
2076  */
2077 static int get_channel_from_ecc_syndrome(unsigned short syndrome)
2078 {
2079 	int row;
2080 	int column;
2081 
2082 	/* Determine column to scan */
2083 	column = syndrome & 0xF;
2084 
2085 	/* Scan all rows, looking for syndrome, or end of table */
2086 	for (row = 0; row < NUMBER_ECC_ROWS; row++) {
2087 		if (ecc_chipkill_syndromes[row][column] == syndrome)
2088 			return ecc_chipkill_syndromes[row][0];
2089 	}
2090 
2091 	debugf0("syndrome(%x) not found\n", syndrome);
2092 	return -1;
2093 }
2094 
2095 /*
2096  * Check for valid error in the NB Status High register. If so, proceed to read
2097  * NB Status Low, NB Address Low and NB Address High registers and store data
2098  * into error structure.
2099  *
2100  * Returns:
2101  *	- 1: if hardware regs contains valid error info
2102  *	- 0: if no valid error is indicated
2103  */
2104 static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
2105 				     struct err_regs *regs)
2106 {
2107 	struct amd64_pvt *pvt;
2108 	struct pci_dev *misc_f3_ctl;
2109 	int err = 0;
2110 
2111 	pvt = mci->pvt_info;
2112 	misc_f3_ctl = pvt->misc_f3_ctl;
2113 
2114 	err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
2115 	if (err)
2116 		goto err_reg;
2117 
2118 	if (!(regs->nbsh & K8_NBSH_VALID_BIT))
2119 		return 0;
2120 
2121 	/* valid error, read remaining error information registers */
2122 	err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
2123 	if (err)
2124 		goto err_reg;
2125 
2126 	err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
2127 	if (err)
2128 		goto err_reg;
2129 
2130 	err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
2131 	if (err)
2132 		goto err_reg;
2133 
2134 	err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
2135 	if (err)
2136 		goto err_reg;
2137 
2138 	return 1;
2139 
2140 err_reg:
2141 	debugf0("Reading error info register failed\n");
2142 	return 0;
2143 }
2144 
2145 /*
2146  * This function is called to retrieve the error data from hardware and store it
2147  * in the info structure.
2148  *
2149  * Returns:
2150  *	- 1: if a valid error is found
2151  *	- 0: if no error is found
2152  */
2153 static int amd64_get_error_info(struct mem_ctl_info *mci,
2154 				struct err_regs *info)
2155 {
2156 	struct amd64_pvt *pvt;
2157 	struct err_regs regs;
2158 
2159 	pvt = mci->pvt_info;
2160 
2161 	if (!amd64_get_error_info_regs(mci, info))
2162 		return 0;
2163 
2164 	/*
2165 	 * Here's the problem with the K8's EDAC reporting: There are four
2166 	 * registers which report pieces of error information. They are shared
2167 	 * between CEs and UEs. Furthermore, contrary to what is stated in the
2168 	 * BKDG, the overflow bit is never used! Every error always updates the
2169 	 * reporting registers.
2170 	 *
2171 	 * Can you see the race condition? All four error reporting registers
2172 	 * must be read before a new error updates them! There is no way to read
2173 	 * all four registers atomically. The best than can be done is to detect
2174 	 * that a race has occured and then report the error without any kind of
2175 	 * precision.
2176 	 *
2177 	 * What is still positive is that errors are still reported and thus
2178 	 * problems can still be detected - just not localized because the
2179 	 * syndrome and address are spread out across registers.
2180 	 *
2181 	 * Grrrrr!!!!!  Here's hoping that AMD fixes this in some future K8 rev.
2182 	 * UEs and CEs should have separate register sets with proper overflow
2183 	 * bits that are used! At very least the problem can be fixed by
2184 	 * honoring the ErrValid bit in 'nbsh' and not updating registers - just
2185 	 * set the overflow bit - unless the current error is CE and the new
2186 	 * error is UE which would be the only situation for overwriting the
2187 	 * current values.
2188 	 */
2189 
2190 	regs = *info;
2191 
2192 	/* Use info from the second read - most current */
2193 	if (unlikely(!amd64_get_error_info_regs(mci, info)))
2194 		return 0;
2195 
2196 	/* clear the error bits in hardware */
2197 	pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
2198 
2199 	/* Check for the possible race condition */
2200 	if ((regs.nbsh != info->nbsh) ||
2201 	     (regs.nbsl != info->nbsl) ||
2202 	     (regs.nbeah != info->nbeah) ||
2203 	     (regs.nbeal != info->nbeal)) {
2204 		amd64_mc_printk(mci, KERN_WARNING,
2205 				"hardware STATUS read access race condition "
2206 				"detected!\n");
2207 		return 0;
2208 	}
2209 	return 1;
2210 }
2211 
2212 /*
2213  * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
2214  * ADDRESS and process.
2215  */
2216 static void amd64_handle_ce(struct mem_ctl_info *mci,
2217 			    struct err_regs *info)
2218 {
2219 	struct amd64_pvt *pvt = mci->pvt_info;
2220 	u64 SystemAddress;
2221 
2222 	/* Ensure that the Error Address is VALID */
2223 	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2224 		amd64_mc_printk(mci, KERN_ERR,
2225 			"HW has no ERROR_ADDRESS available\n");
2226 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
2227 		return;
2228 	}
2229 
2230 	SystemAddress = extract_error_address(mci, info);
2231 
2232 	amd64_mc_printk(mci, KERN_ERR,
2233 		"CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
2234 
2235 	pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
2236 }
2237 
2238 /* Handle any Un-correctable Errors (UEs) */
2239 static void amd64_handle_ue(struct mem_ctl_info *mci,
2240 			    struct err_regs *info)
2241 {
2242 	int csrow;
2243 	u64 SystemAddress;
2244 	u32 page, offset;
2245 	struct mem_ctl_info *log_mci, *src_mci = NULL;
2246 
2247 	log_mci = mci;
2248 
2249 	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2250 		amd64_mc_printk(mci, KERN_CRIT,
2251 			"HW has no ERROR_ADDRESS available\n");
2252 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2253 		return;
2254 	}
2255 
2256 	SystemAddress = extract_error_address(mci, info);
2257 
2258 	/*
2259 	 * Find out which node the error address belongs to. This may be
2260 	 * different from the node that detected the error.
2261 	 */
2262 	src_mci = find_mc_by_sys_addr(mci, SystemAddress);
2263 	if (!src_mci) {
2264 		amd64_mc_printk(mci, KERN_CRIT,
2265 			"ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
2266 			(unsigned long)SystemAddress);
2267 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2268 		return;
2269 	}
2270 
2271 	log_mci = src_mci;
2272 
2273 	csrow = sys_addr_to_csrow(log_mci, SystemAddress);
2274 	if (csrow < 0) {
2275 		amd64_mc_printk(mci, KERN_CRIT,
2276 			"ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
2277 			(unsigned long)SystemAddress);
2278 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2279 	} else {
2280 		error_address_to_page_and_offset(SystemAddress, &page, &offset);
2281 		edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
2282 	}
2283 }
2284 
2285 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2286 					    struct err_regs *info)
2287 {
2288 	u32 ec  = ERROR_CODE(info->nbsl);
2289 	u32 xec = EXT_ERROR_CODE(info->nbsl);
2290 	int ecc_type = info->nbsh & (0x3 << 13);
2291 
2292 	/* Bail early out if this was an 'observed' error */
2293 	if (PP(ec) == K8_NBSL_PP_OBS)
2294 		return;
2295 
2296 	/* Do only ECC errors */
2297 	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2298 		return;
2299 
2300 	if (ecc_type == 2)
2301 		amd64_handle_ce(mci, info);
2302 	else if (ecc_type == 1)
2303 		amd64_handle_ue(mci, info);
2304 
2305 	/*
2306 	 * If main error is CE then overflow must be CE.  If main error is UE
2307 	 * then overflow is unknown.  We'll call the overflow a CE - if
2308 	 * panic_on_ue is set then we're already panic'ed and won't arrive
2309 	 * here. Else, then apparently someone doesn't think that UE's are
2310 	 * catastrophic.
2311 	 */
2312 	if (info->nbsh & K8_NBSH_OVERFLOW)
2313 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow");
2314 }
2315 
2316 void amd64_decode_bus_error(int node_id, struct err_regs *regs)
2317 {
2318 	struct mem_ctl_info *mci = mci_lookup[node_id];
2319 
2320 	__amd64_decode_bus_error(mci, regs);
2321 
2322 	/*
2323 	 * Check the UE bit of the NB status high register, if set generate some
2324 	 * logs. If NOT a GART error, then process the event as a NO-INFO event.
2325 	 * If it was a GART error, skip that process.
2326 	 *
2327 	 * FIXME: this should go somewhere else, if at all.
2328 	 */
2329 	if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
2330 		edac_mc_handle_ue_no_info(mci, "UE bit is set");
2331 
2332 }
2333 
2334 /*
2335  * The main polling 'check' function, called FROM the edac core to perform the
2336  * error checking and if an error is encountered, error processing.
2337  */
2338 static void amd64_check(struct mem_ctl_info *mci)
2339 {
2340 	struct err_regs regs;
2341 
2342 	if (amd64_get_error_info(mci, &regs)) {
2343 		struct amd64_pvt *pvt = mci->pvt_info;
2344 		amd_decode_nb_mce(pvt->mc_node_id, &regs, 1);
2345 	}
2346 }
2347 
2348 /*
2349  * Input:
2350  *	1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2351  *	2) AMD Family index value
2352  *
2353  * Ouput:
2354  *	Upon return of 0, the following filled in:
2355  *
2356  *		struct pvt->addr_f1_ctl
2357  *		struct pvt->misc_f3_ctl
2358  *
2359  *	Filled in with related device funcitions of 'dram_f2_ctl'
2360  *	These devices are "reserved" via the pci_get_device()
2361  *
2362  *	Upon return of 1 (error status):
2363  *
2364  *		Nothing reserved
2365  */
2366 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2367 {
2368 	const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
2369 
2370 	/* Reserve the ADDRESS MAP Device */
2371 	pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2372 						    amd64_dev->addr_f1_ctl,
2373 						    pvt->dram_f2_ctl);
2374 
2375 	if (!pvt->addr_f1_ctl) {
2376 		amd64_printk(KERN_ERR, "error address map device not found: "
2377 			     "vendor %x device 0x%x (broken BIOS?)\n",
2378 			     PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
2379 		return 1;
2380 	}
2381 
2382 	/* Reserve the MISC Device */
2383 	pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2384 						    amd64_dev->misc_f3_ctl,
2385 						    pvt->dram_f2_ctl);
2386 
2387 	if (!pvt->misc_f3_ctl) {
2388 		pci_dev_put(pvt->addr_f1_ctl);
2389 		pvt->addr_f1_ctl = NULL;
2390 
2391 		amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2392 			     "vendor %x device 0x%x (broken BIOS?)\n",
2393 			     PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
2394 		return 1;
2395 	}
2396 
2397 	debugf1("    Addr Map device PCI Bus ID:\t%s\n",
2398 		pci_name(pvt->addr_f1_ctl));
2399 	debugf1("    DRAM MEM-CTL PCI Bus ID:\t%s\n",
2400 		pci_name(pvt->dram_f2_ctl));
2401 	debugf1("    Misc device PCI Bus ID:\t%s\n",
2402 		pci_name(pvt->misc_f3_ctl));
2403 
2404 	return 0;
2405 }
2406 
2407 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2408 {
2409 	pci_dev_put(pvt->addr_f1_ctl);
2410 	pci_dev_put(pvt->misc_f3_ctl);
2411 }
2412 
2413 /*
2414  * Retrieve the hardware registers of the memory controller (this includes the
2415  * 'Address Map' and 'Misc' device regs)
2416  */
2417 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2418 {
2419 	u64 msr_val;
2420 	int dram, err = 0;
2421 
2422 	/*
2423 	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2424 	 * those are Read-As-Zero
2425 	 */
2426 	rdmsrl(MSR_K8_TOP_MEM1, msr_val);
2427 	pvt->top_mem = msr_val >> 23;
2428 	debugf0("  TOP_MEM=0x%08llx\n", pvt->top_mem);
2429 
2430 	/* check first whether TOP_MEM2 is enabled */
2431 	rdmsrl(MSR_K8_SYSCFG, msr_val);
2432 	if (msr_val & (1U << 21)) {
2433 		rdmsrl(MSR_K8_TOP_MEM2, msr_val);
2434 		pvt->top_mem2 = msr_val >> 23;
2435 		debugf0("  TOP_MEM2=0x%08llx\n", pvt->top_mem2);
2436 	} else
2437 		debugf0("  TOP_MEM2 disabled.\n");
2438 
2439 	amd64_cpu_display_info(pvt);
2440 
2441 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2442 	if (err)
2443 		goto err_reg;
2444 
2445 	if (pvt->ops->read_dram_ctl_register)
2446 		pvt->ops->read_dram_ctl_register(pvt);
2447 
2448 	for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2449 		/*
2450 		 * Call CPU specific READ function to get the DRAM Base and
2451 		 * Limit values from the DCT.
2452 		 */
2453 		pvt->ops->read_dram_base_limit(pvt, dram);
2454 
2455 		/*
2456 		 * Only print out debug info on rows with both R and W Enabled.
2457 		 * Normal processing, compiler should optimize this whole 'if'
2458 		 * debug output block away.
2459 		 */
2460 		if (pvt->dram_rw_en[dram] != 0) {
2461 			debugf1("  DRAM_BASE[%d]: 0x%8.08x-%8.08x "
2462 				"DRAM_LIMIT:  0x%8.08x-%8.08x\n",
2463 				dram,
2464 				(u32)(pvt->dram_base[dram] >> 32),
2465 				(u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
2466 				(u32)(pvt->dram_limit[dram] >> 32),
2467 				(u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
2468 			debugf1("        IntlvEn=%s %s %s "
2469 				"IntlvSel=%d DstNode=%d\n",
2470 				pvt->dram_IntlvEn[dram] ?
2471 					"Enabled" : "Disabled",
2472 				(pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2473 				(pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2474 				pvt->dram_IntlvSel[dram],
2475 				pvt->dram_DstNode[dram]);
2476 		}
2477 	}
2478 
2479 	amd64_read_dct_base_mask(pvt);
2480 
2481 	err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2482 	if (err)
2483 		goto err_reg;
2484 
2485 	amd64_read_dbam_reg(pvt);
2486 
2487 	err = pci_read_config_dword(pvt->misc_f3_ctl,
2488 				F10_ONLINE_SPARE, &pvt->online_spare);
2489 	if (err)
2490 		goto err_reg;
2491 
2492 	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2493 	if (err)
2494 		goto err_reg;
2495 
2496 	err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2497 	if (err)
2498 		goto err_reg;
2499 
2500 	if (!dct_ganging_enabled(pvt)) {
2501 		err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
2502 						&pvt->dclr1);
2503 		if (err)
2504 			goto err_reg;
2505 
2506 		err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
2507 						&pvt->dchr1);
2508 		if (err)
2509 			goto err_reg;
2510 	}
2511 
2512 	amd64_dump_misc_regs(pvt);
2513 
2514 	return;
2515 
2516 err_reg:
2517 	debugf0("Reading an MC register failed\n");
2518 
2519 }
2520 
2521 /*
2522  * NOTE: CPU Revision Dependent code
2523  *
2524  * Input:
2525  *	@csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
2526  *	k8 private pointer to -->
2527  *			DRAM Bank Address mapping register
2528  *			node_id
2529  *			DCL register where dual_channel_active is
2530  *
2531  * The DBAM register consists of 4 sets of 4 bits each definitions:
2532  *
2533  * Bits:	CSROWs
2534  * 0-3		CSROWs 0 and 1
2535  * 4-7		CSROWs 2 and 3
2536  * 8-11		CSROWs 4 and 5
2537  * 12-15	CSROWs 6 and 7
2538  *
2539  * Values range from: 0 to 15
2540  * The meaning of the values depends on CPU revision and dual-channel state,
2541  * see relevant BKDG more info.
2542  *
2543  * The memory controller provides for total of only 8 CSROWs in its current
2544  * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2545  * single channel or two (2) DIMMs in dual channel mode.
2546  *
2547  * The following code logic collapses the various tables for CSROW based on CPU
2548  * revision.
2549  *
2550  * Returns:
2551  *	The number of PAGE_SIZE pages on the specified CSROW number it
2552  *	encompasses
2553  *
2554  */
2555 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2556 {
2557 	u32 dram_map, nr_pages;
2558 
2559 	/*
2560 	 * The math on this doesn't look right on the surface because x/2*4 can
2561 	 * be simplified to x*2 but this expression makes use of the fact that
2562 	 * it is integral math where 1/2=0. This intermediate value becomes the
2563 	 * number of bits to shift the DBAM register to extract the proper CSROW
2564 	 * field.
2565 	 */
2566 	dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2567 
2568 	nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
2569 
2570 	/*
2571 	 * If dual channel then double the memory size of single channel.
2572 	 * Channel count is 1 or 2
2573 	 */
2574 	nr_pages <<= (pvt->channel_count - 1);
2575 
2576 	debugf0("  (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
2577 	debugf0("    nr_pages= %u  channel-count = %d\n",
2578 		nr_pages, pvt->channel_count);
2579 
2580 	return nr_pages;
2581 }
2582 
2583 /*
2584  * Initialize the array of csrow attribute instances, based on the values
2585  * from pci config hardware registers.
2586  */
2587 static int amd64_init_csrows(struct mem_ctl_info *mci)
2588 {
2589 	struct csrow_info *csrow;
2590 	struct amd64_pvt *pvt;
2591 	u64 input_addr_min, input_addr_max, sys_addr;
2592 	int i, err = 0, empty = 1;
2593 
2594 	pvt = mci->pvt_info;
2595 
2596 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2597 	if (err)
2598 		debugf0("Reading K8_NBCFG failed\n");
2599 
2600 	debugf0("NBCFG= 0x%x  CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2601 		(pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2602 		(pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2603 		);
2604 
2605 	for (i = 0; i < CHIPSELECT_COUNT; i++) {
2606 		csrow = &mci->csrows[i];
2607 
2608 		if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2609 			debugf1("----CSROW %d EMPTY for node %d\n", i,
2610 				pvt->mc_node_id);
2611 			continue;
2612 		}
2613 
2614 		debugf1("----CSROW %d VALID for MC node %d\n",
2615 			i, pvt->mc_node_id);
2616 
2617 		empty = 0;
2618 		csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2619 		find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2620 		sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2621 		csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2622 		sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2623 		csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2624 		csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2625 		/* 8 bytes of resolution */
2626 
2627 		csrow->mtype = amd64_determine_memory_type(pvt);
2628 
2629 		debugf1("  for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2630 		debugf1("    input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2631 			(unsigned long)input_addr_min,
2632 			(unsigned long)input_addr_max);
2633 		debugf1("    sys_addr: 0x%lx  page_mask: 0x%lx\n",
2634 			(unsigned long)sys_addr, csrow->page_mask);
2635 		debugf1("    nr_pages: %u  first_page: 0x%lx "
2636 			"last_page: 0x%lx\n",
2637 			(unsigned)csrow->nr_pages,
2638 			csrow->first_page, csrow->last_page);
2639 
2640 		/*
2641 		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2642 		 */
2643 		if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2644 			csrow->edac_mode =
2645 			    (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2646 			    EDAC_S4ECD4ED : EDAC_SECDED;
2647 		else
2648 			csrow->edac_mode = EDAC_NONE;
2649 	}
2650 
2651 	return empty;
2652 }
2653 
2654 /*
2655  * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2656  * enable it.
2657  */
2658 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2659 {
2660 	struct amd64_pvt *pvt = mci->pvt_info;
2661 	const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
2662 	int cpu, idx = 0, err = 0;
2663 	struct msr msrs[cpumask_weight(cpumask)];
2664 	u32 value;
2665 	u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2666 
2667 	if (!ecc_enable_override)
2668 		return;
2669 
2670 	memset(msrs, 0, sizeof(msrs));
2671 
2672 	amd64_printk(KERN_WARNING,
2673 		"'ecc_enable_override' parameter is active, "
2674 		"Enabling AMD ECC hardware now: CAUTION\n");
2675 
2676 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2677 	if (err)
2678 		debugf0("Reading K8_NBCTL failed\n");
2679 
2680 	/* turn on UECCn and CECCEn bits */
2681 	pvt->old_nbctl = value & mask;
2682 	pvt->nbctl_mcgctl_saved = 1;
2683 
2684 	value |= mask;
2685 	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2686 
2687 	rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2688 
2689 	for_each_cpu(cpu, cpumask) {
2690 		if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2691 			set_bit(idx, &pvt->old_mcgctl);
2692 
2693 		msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2694 		idx++;
2695 	}
2696 	wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2697 
2698 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2699 	if (err)
2700 		debugf0("Reading K8_NBCFG failed\n");
2701 
2702 	debugf0("NBCFG(1)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
2703 		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2704 		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2705 
2706 	if (!(value & K8_NBCFG_ECC_ENABLE)) {
2707 		amd64_printk(KERN_WARNING,
2708 			"This node reports that DRAM ECC is "
2709 			"currently Disabled; ENABLING now\n");
2710 
2711 		/* Attempt to turn on DRAM ECC Enable */
2712 		value |= K8_NBCFG_ECC_ENABLE;
2713 		pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2714 
2715 		err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2716 		if (err)
2717 			debugf0("Reading K8_NBCFG failed\n");
2718 
2719 		if (!(value & K8_NBCFG_ECC_ENABLE)) {
2720 			amd64_printk(KERN_WARNING,
2721 				"Hardware rejects Enabling DRAM ECC checking\n"
2722 				"Check memory DIMM configuration\n");
2723 		} else {
2724 			amd64_printk(KERN_DEBUG,
2725 				"Hardware accepted DRAM ECC Enable\n");
2726 		}
2727 	}
2728 	debugf0("NBCFG(2)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
2729 		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2730 		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2731 
2732 	pvt->ctl_error_info.nbcfg = value;
2733 }
2734 
2735 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2736 {
2737 	const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
2738 	int cpu, idx = 0, err = 0;
2739 	struct msr msrs[cpumask_weight(cpumask)];
2740 	u32 value;
2741 	u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2742 
2743 	if (!pvt->nbctl_mcgctl_saved)
2744 		return;
2745 
2746 	memset(msrs, 0, sizeof(msrs));
2747 
2748 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2749 	if (err)
2750 		debugf0("Reading K8_NBCTL failed\n");
2751 	value &= ~mask;
2752 	value |= pvt->old_nbctl;
2753 
2754 	/* restore the NB Enable MCGCTL bit */
2755 	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2756 
2757 	rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2758 
2759 	for_each_cpu(cpu, cpumask) {
2760 		msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2761 		msrs[idx].l |=
2762 			test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
2763 		idx++;
2764 	}
2765 
2766 	wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2767 }
2768 
2769 static void check_mcg_ctl(void *ret)
2770 {
2771 	u64 msr_val = 0;
2772 	u8 nbe;
2773 
2774 	rdmsrl(MSR_IA32_MCG_CTL, msr_val);
2775 	nbe = msr_val & K8_MSR_MCGCTL_NBE;
2776 
2777 	debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2778 		raw_smp_processor_id(), msr_val,
2779 		(nbe ? "enabled" : "disabled"));
2780 
2781 	if (!nbe)
2782 		*(int *)ret = 0;
2783 }
2784 
2785 /* check MCG_CTL on all the cpus on this node */
2786 static int amd64_mcg_ctl_enabled_on_cpus(const cpumask_t *mask)
2787 {
2788 	int ret = 1;
2789 	preempt_disable();
2790 	smp_call_function_many(mask, check_mcg_ctl, &ret, 1);
2791 	preempt_enable();
2792 
2793 	return ret;
2794 }
2795 
2796 /*
2797  * EDAC requires that the BIOS have ECC enabled before taking over the
2798  * processing of ECC errors. This is because the BIOS can properly initialize
2799  * the memory system completely. A command line option allows to force-enable
2800  * hardware ECC later in amd64_enable_ecc_error_reporting().
2801  */
2802 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2803 {
2804 	u32 value;
2805 	int err = 0, ret = 0;
2806 	u8 ecc_enabled = 0;
2807 
2808 	err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2809 	if (err)
2810 		debugf0("Reading K8_NBCTL failed\n");
2811 
2812 	ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2813 
2814 	ret = amd64_mcg_ctl_enabled_on_cpus(cpumask_of_node(pvt->mc_node_id));
2815 
2816 	debugf0("K8_NBCFG=0x%x,  DRAM ECC is %s\n", value,
2817 			(value & K8_NBCFG_ECC_ENABLE ? "enabled" : "disabled"));
2818 
2819 	if (!ecc_enabled || !ret) {
2820 		if (!ecc_enabled) {
2821 			amd64_printk(KERN_WARNING, "This node reports that "
2822 						   "Memory ECC is currently "
2823 						   "disabled.\n");
2824 
2825 			amd64_printk(KERN_WARNING, "bit 0x%lx in register "
2826 				"F3x%x of the MISC_CONTROL device (%s) "
2827 				"should be enabled\n", K8_NBCFG_ECC_ENABLE,
2828 				K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2829 		}
2830 		if (!ret) {
2831 			amd64_printk(KERN_WARNING, "bit 0x%016lx in MSR 0x%08x "
2832 					"of node %d should be enabled\n",
2833 					K8_MSR_MCGCTL_NBE, MSR_IA32_MCG_CTL,
2834 					pvt->mc_node_id);
2835 		}
2836 		if (!ecc_enable_override) {
2837 			amd64_printk(KERN_WARNING, "WARNING: ECC is NOT "
2838 				"currently enabled by the BIOS. Module "
2839 				"will NOT be loaded.\n"
2840 				"    Either Enable ECC in the BIOS, "
2841 				"or use the 'ecc_enable_override' "
2842 				"parameter.\n"
2843 				"    Might be a BIOS bug, if BIOS says "
2844 				"ECC is enabled\n"
2845 				"    Use of the override can cause "
2846 				"unknown side effects.\n");
2847 			ret = -ENODEV;
2848 		} else
2849 			/*
2850 			 * enable further driver loading if ECC enable is
2851 			 * overridden.
2852 			 */
2853 			ret = 0;
2854 	} else {
2855 		amd64_printk(KERN_INFO,
2856 			"ECC is enabled by BIOS, Proceeding "
2857 			"with EDAC module initialization\n");
2858 
2859 		/* Signal good ECC status */
2860 		ret = 0;
2861 
2862 		/* CLEAR the override, since BIOS controlled it */
2863 		ecc_enable_override = 0;
2864 	}
2865 
2866 	return ret;
2867 }
2868 
2869 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2870 					  ARRAY_SIZE(amd64_inj_attrs) +
2871 					  1];
2872 
2873 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2874 
2875 static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2876 {
2877 	unsigned int i = 0, j = 0;
2878 
2879 	for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2880 		sysfs_attrs[i] = amd64_dbg_attrs[i];
2881 
2882 	for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2883 		sysfs_attrs[i] = amd64_inj_attrs[j];
2884 
2885 	sysfs_attrs[i] = terminator;
2886 
2887 	mci->mc_driver_sysfs_attributes = sysfs_attrs;
2888 }
2889 
2890 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2891 {
2892 	struct amd64_pvt *pvt = mci->pvt_info;
2893 
2894 	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2895 	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
2896 
2897 	if (pvt->nbcap & K8_NBCAP_SECDED)
2898 		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2899 
2900 	if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2901 		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2902 
2903 	mci->edac_cap		= amd64_determine_edac_cap(pvt);
2904 	mci->mod_name		= EDAC_MOD_STR;
2905 	mci->mod_ver		= EDAC_AMD64_VERSION;
2906 	mci->ctl_name		= get_amd_family_name(pvt->mc_type_index);
2907 	mci->dev_name		= pci_name(pvt->dram_f2_ctl);
2908 	mci->ctl_page_to_phys	= NULL;
2909 
2910 	/* IMPORTANT: Set the polling 'check' function in this module */
2911 	mci->edac_check		= amd64_check;
2912 
2913 	/* memory scrubber interface */
2914 	mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2915 	mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2916 }
2917 
2918 /*
2919  * Init stuff for this DRAM Controller device.
2920  *
2921  * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
2922  * Space feature MUST be enabled on ALL Processors prior to actually reading
2923  * from the ECS registers. Since the loading of the module can occur on any
2924  * 'core', and cores don't 'see' all the other processors ECS data when the
2925  * others are NOT enabled. Our solution is to first enable ECS access in this
2926  * routine on all processors, gather some data in a amd64_pvt structure and
2927  * later come back in a finish-setup function to perform that final
2928  * initialization. See also amd64_init_2nd_stage() for that.
2929  */
2930 static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
2931 				    int mc_type_index)
2932 {
2933 	struct amd64_pvt *pvt = NULL;
2934 	int err = 0, ret;
2935 
2936 	ret = -ENOMEM;
2937 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2938 	if (!pvt)
2939 		goto err_exit;
2940 
2941 	pvt->mc_node_id = get_node_id(dram_f2_ctl);
2942 
2943 	pvt->dram_f2_ctl	= dram_f2_ctl;
2944 	pvt->ext_model		= boot_cpu_data.x86_model >> 4;
2945 	pvt->mc_type_index	= mc_type_index;
2946 	pvt->ops		= family_ops(mc_type_index);
2947 	pvt->old_mcgctl		= 0;
2948 
2949 	/*
2950 	 * We have the dram_f2_ctl device as an argument, now go reserve its
2951 	 * sibling devices from the PCI system.
2952 	 */
2953 	ret = -ENODEV;
2954 	err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
2955 	if (err)
2956 		goto err_free;
2957 
2958 	ret = -EINVAL;
2959 	err = amd64_check_ecc_enabled(pvt);
2960 	if (err)
2961 		goto err_put;
2962 
2963 	/*
2964 	 * Key operation here: setup of HW prior to performing ops on it. Some
2965 	 * setup is required to access ECS data. After this is performed, the
2966 	 * 'teardown' function must be called upon error and normal exit paths.
2967 	 */
2968 	if (boot_cpu_data.x86 >= 0x10)
2969 		amd64_setup(pvt);
2970 
2971 	/*
2972 	 * Save the pointer to the private data for use in 2nd initialization
2973 	 * stage
2974 	 */
2975 	pvt_lookup[pvt->mc_node_id] = pvt;
2976 
2977 	return 0;
2978 
2979 err_put:
2980 	amd64_free_mc_sibling_devices(pvt);
2981 
2982 err_free:
2983 	kfree(pvt);
2984 
2985 err_exit:
2986 	return ret;
2987 }
2988 
2989 /*
2990  * This is the finishing stage of the init code. Needs to be performed after all
2991  * MCs' hardware have been prepped for accessing extended config space.
2992  */
2993 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2994 {
2995 	int node_id = pvt->mc_node_id;
2996 	struct mem_ctl_info *mci;
2997 	int ret, err = 0;
2998 
2999 	amd64_read_mc_registers(pvt);
3000 
3001 	ret = -ENODEV;
3002 	if (pvt->ops->probe_valid_hardware) {
3003 		err = pvt->ops->probe_valid_hardware(pvt);
3004 		if (err)
3005 			goto err_exit;
3006 	}
3007 
3008 	/*
3009 	 * We need to determine how many memory channels there are. Then use
3010 	 * that information for calculating the size of the dynamic instance
3011 	 * tables in the 'mci' structure
3012 	 */
3013 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
3014 	if (pvt->channel_count < 0)
3015 		goto err_exit;
3016 
3017 	ret = -ENOMEM;
3018 	mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
3019 	if (!mci)
3020 		goto err_exit;
3021 
3022 	mci->pvt_info = pvt;
3023 
3024 	mci->dev = &pvt->dram_f2_ctl->dev;
3025 	amd64_setup_mci_misc_attributes(mci);
3026 
3027 	if (amd64_init_csrows(mci))
3028 		mci->edac_cap = EDAC_FLAG_NONE;
3029 
3030 	amd64_enable_ecc_error_reporting(mci);
3031 	amd64_set_mc_sysfs_attributes(mci);
3032 
3033 	ret = -ENODEV;
3034 	if (edac_mc_add_mc(mci)) {
3035 		debugf1("failed edac_mc_add_mc()\n");
3036 		goto err_add_mc;
3037 	}
3038 
3039 	mci_lookup[node_id] = mci;
3040 	pvt_lookup[node_id] = NULL;
3041 
3042 	/* register stuff with EDAC MCE */
3043 	if (report_gart_errors)
3044 		amd_report_gart_errors(true);
3045 
3046 	amd_register_ecc_decoder(amd64_decode_bus_error);
3047 
3048 	return 0;
3049 
3050 err_add_mc:
3051 	edac_mc_free(mci);
3052 
3053 err_exit:
3054 	debugf0("failure to init 2nd stage: ret=%d\n", ret);
3055 
3056 	amd64_restore_ecc_error_reporting(pvt);
3057 
3058 	if (boot_cpu_data.x86 > 0xf)
3059 		amd64_teardown(pvt);
3060 
3061 	amd64_free_mc_sibling_devices(pvt);
3062 
3063 	kfree(pvt_lookup[pvt->mc_node_id]);
3064 	pvt_lookup[node_id] = NULL;
3065 
3066 	return ret;
3067 }
3068 
3069 
3070 static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
3071 				 const struct pci_device_id *mc_type)
3072 {
3073 	int ret = 0;
3074 
3075 	debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
3076 		get_amd_family_name(mc_type->driver_data));
3077 
3078 	ret = pci_enable_device(pdev);
3079 	if (ret < 0)
3080 		ret = -EIO;
3081 	else
3082 		ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
3083 
3084 	if (ret < 0)
3085 		debugf0("ret=%d\n", ret);
3086 
3087 	return ret;
3088 }
3089 
3090 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
3091 {
3092 	struct mem_ctl_info *mci;
3093 	struct amd64_pvt *pvt;
3094 
3095 	/* Remove from EDAC CORE tracking list */
3096 	mci = edac_mc_del_mc(&pdev->dev);
3097 	if (!mci)
3098 		return;
3099 
3100 	pvt = mci->pvt_info;
3101 
3102 	amd64_restore_ecc_error_reporting(pvt);
3103 
3104 	if (boot_cpu_data.x86 > 0xf)
3105 		amd64_teardown(pvt);
3106 
3107 	amd64_free_mc_sibling_devices(pvt);
3108 
3109 	kfree(pvt);
3110 	mci->pvt_info = NULL;
3111 
3112 	mci_lookup[pvt->mc_node_id] = NULL;
3113 
3114 	/* unregister from EDAC MCE */
3115 	amd_report_gart_errors(false);
3116 	amd_unregister_ecc_decoder(amd64_decode_bus_error);
3117 
3118 	/* Free the EDAC CORE resources */
3119 	edac_mc_free(mci);
3120 }
3121 
3122 /*
3123  * This table is part of the interface for loading drivers for PCI devices. The
3124  * PCI core identifies what devices are on a system during boot, and then
3125  * inquiry this table to see if this driver is for a given device found.
3126  */
3127 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
3128 	{
3129 		.vendor		= PCI_VENDOR_ID_AMD,
3130 		.device		= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
3131 		.subvendor	= PCI_ANY_ID,
3132 		.subdevice	= PCI_ANY_ID,
3133 		.class		= 0,
3134 		.class_mask	= 0,
3135 		.driver_data	= K8_CPUS
3136 	},
3137 	{
3138 		.vendor		= PCI_VENDOR_ID_AMD,
3139 		.device		= PCI_DEVICE_ID_AMD_10H_NB_DRAM,
3140 		.subvendor	= PCI_ANY_ID,
3141 		.subdevice	= PCI_ANY_ID,
3142 		.class		= 0,
3143 		.class_mask	= 0,
3144 		.driver_data	= F10_CPUS
3145 	},
3146 	{
3147 		.vendor		= PCI_VENDOR_ID_AMD,
3148 		.device		= PCI_DEVICE_ID_AMD_11H_NB_DRAM,
3149 		.subvendor	= PCI_ANY_ID,
3150 		.subdevice	= PCI_ANY_ID,
3151 		.class		= 0,
3152 		.class_mask	= 0,
3153 		.driver_data	= F11_CPUS
3154 	},
3155 	{0, }
3156 };
3157 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
3158 
3159 static struct pci_driver amd64_pci_driver = {
3160 	.name		= EDAC_MOD_STR,
3161 	.probe		= amd64_init_one_instance,
3162 	.remove		= __devexit_p(amd64_remove_one_instance),
3163 	.id_table	= amd64_pci_table,
3164 };
3165 
3166 static void amd64_setup_pci_device(void)
3167 {
3168 	struct mem_ctl_info *mci;
3169 	struct amd64_pvt *pvt;
3170 
3171 	if (amd64_ctl_pci)
3172 		return;
3173 
3174 	mci = mci_lookup[0];
3175 	if (mci) {
3176 
3177 		pvt = mci->pvt_info;
3178 		amd64_ctl_pci =
3179 			edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
3180 						    EDAC_MOD_STR);
3181 
3182 		if (!amd64_ctl_pci) {
3183 			pr_warning("%s(): Unable to create PCI control\n",
3184 				   __func__);
3185 
3186 			pr_warning("%s(): PCI error report via EDAC not set\n",
3187 				   __func__);
3188 			}
3189 	}
3190 }
3191 
3192 static int __init amd64_edac_init(void)
3193 {
3194 	int nb, err = -ENODEV;
3195 
3196 	edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
3197 
3198 	opstate_init();
3199 
3200 	if (cache_k8_northbridges() < 0)
3201 		goto err_exit;
3202 
3203 	err = pci_register_driver(&amd64_pci_driver);
3204 	if (err)
3205 		return err;
3206 
3207 	/*
3208 	 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
3209 	 * amd64_pvt structs. These will be used in the 2nd stage init function
3210 	 * to finish initialization of the MC instances.
3211 	 */
3212 	for (nb = 0; nb < num_k8_northbridges; nb++) {
3213 		if (!pvt_lookup[nb])
3214 			continue;
3215 
3216 		err = amd64_init_2nd_stage(pvt_lookup[nb]);
3217 		if (err)
3218 			goto err_2nd_stage;
3219 	}
3220 
3221 	amd64_setup_pci_device();
3222 
3223 	return 0;
3224 
3225 err_2nd_stage:
3226 	debugf0("2nd stage failed\n");
3227 
3228 err_exit:
3229 	pci_unregister_driver(&amd64_pci_driver);
3230 
3231 	return err;
3232 }
3233 
3234 static void __exit amd64_edac_exit(void)
3235 {
3236 	if (amd64_ctl_pci)
3237 		edac_pci_release_generic_ctl(amd64_ctl_pci);
3238 
3239 	pci_unregister_driver(&amd64_pci_driver);
3240 }
3241 
3242 module_init(amd64_edac_init);
3243 module_exit(amd64_edac_exit);
3244 
3245 MODULE_LICENSE("GPL");
3246 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3247 		"Dave Peterson, Thayne Harbaugh");
3248 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3249 		EDAC_AMD64_VERSION);
3250 
3251 module_param(edac_op_state, int, 0444);
3252 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3253