xref: /linux/drivers/edac/pnd2_edac.c (revision d195c39052d1da278a00a6744ce59c383b67b191)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Pondicherry2 memory controller.
4  *
5  * Copyright (c) 2016, Intel Corporation.
6  *
7  * [Derived from sb_edac.c]
8  *
9  * Translation of system physical addresses to DIMM addresses
10  * is a two stage process:
11  *
12  * First the Pondicherry 2 memory controller handles slice and channel interleaving
13  * in "sys2pmi()". This is (almost) completley common between platforms.
14  *
15  * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
16  * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
17  */
18 
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25 #include <linux/edac.h>
26 #include <linux/mmzone.h>
27 #include <linux/smp.h>
28 #include <linux/bitmap.h>
29 #include <linux/math64.h>
30 #include <linux/mod_devicetable.h>
31 #include <asm/cpu_device_id.h>
32 #include <asm/intel-family.h>
33 #include <asm/processor.h>
34 #include <asm/mce.h>
35 
36 #include "edac_mc.h"
37 #include "edac_module.h"
38 #include "pnd2_edac.h"
39 
40 #define EDAC_MOD_STR		"pnd2_edac"
41 
42 #define APL_NUM_CHANNELS	4
43 #define DNV_NUM_CHANNELS	2
44 #define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
45 
46 enum type {
47 	APL,
48 	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
49 };
50 
51 struct dram_addr {
52 	int chan;
53 	int dimm;
54 	int rank;
55 	int bank;
56 	int row;
57 	int col;
58 };
59 
60 struct pnd2_pvt {
61 	int dimm_geom[APL_NUM_CHANNELS];
62 	u64 tolm, tohm;
63 };
64 
65 /*
66  * System address space is divided into multiple regions with
67  * different interleave rules in each. The as0/as1 regions
68  * have no interleaving at all. The as2 region is interleaved
69  * between two channels. The mot region is magic and may overlap
70  * other regions, with its interleave rules taking precedence.
71  * Addresses not in any of these regions are interleaved across
72  * all four channels.
73  */
74 static struct region {
75 	u64	base;
76 	u64	limit;
77 	u8	enabled;
78 } mot, as0, as1, as2;
79 
80 static struct dunit_ops {
81 	char *name;
82 	enum type type;
83 	int pmiaddr_shift;
84 	int pmiidx_shift;
85 	int channels;
86 	int dimms_per_channel;
87 	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
88 	int (*get_registers)(void);
89 	int (*check_ecc)(void);
90 	void (*mk_region)(char *name, struct region *rp, void *asym);
91 	void (*get_dimm_config)(struct mem_ctl_info *mci);
92 	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
93 				   struct dram_addr *daddr, char *msg);
94 } *ops;
95 
96 static struct mem_ctl_info *pnd2_mci;
97 
98 #define PND2_MSG_SIZE	256
99 
100 /* Debug macros */
101 #define pnd2_printk(level, fmt, arg...)			\
102 	edac_printk(level, "pnd2", fmt, ##arg)
103 
104 #define pnd2_mc_printk(mci, level, fmt, arg...)	\
105 	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
106 
107 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
108 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
109 #define SELECTOR_DISABLED (-1)
110 #define _4GB (1ul << 32)
111 
112 #define PMI_ADDRESS_WIDTH	31
113 #define PND_MAX_PHYS_BIT	39
114 
115 #define APL_ASYMSHIFT		28
116 #define DNV_ASYMSHIFT		31
117 #define CH_HASH_MASK_LSB	6
118 #define SLICE_HASH_MASK_LSB	6
119 #define MOT_SLC_INTLV_BIT	12
120 #define LOG2_PMI_ADDR_GRANULARITY	5
121 #define MOT_SHIFT	24
122 
123 #define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
124 #define U64_LSHIFT(val, s)	((u64)(val) << (s))
125 
126 /*
127  * On Apollo Lake we access memory controller registers via a
128  * side-band mailbox style interface in a hidden PCI device
129  * configuration space.
130  */
131 static struct pci_bus	*p2sb_bus;
132 #define P2SB_DEVFN	PCI_DEVFN(0xd, 0)
133 #define P2SB_ADDR_OFF	0xd0
134 #define P2SB_DATA_OFF	0xd4
135 #define P2SB_STAT_OFF	0xd8
136 #define P2SB_ROUT_OFF	0xda
137 #define P2SB_EADD_OFF	0xdc
138 #define P2SB_HIDE_OFF	0xe1
139 
140 #define P2SB_BUSY	1
141 
142 #define P2SB_READ(size, off, ptr) \
143 	pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
144 #define P2SB_WRITE(size, off, val) \
145 	pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
146 
147 static bool p2sb_is_busy(u16 *status)
148 {
149 	P2SB_READ(word, P2SB_STAT_OFF, status);
150 
151 	return !!(*status & P2SB_BUSY);
152 }
153 
154 static int _apl_rd_reg(int port, int off, int op, u32 *data)
155 {
156 	int retries = 0xff, ret;
157 	u16 status;
158 	u8 hidden;
159 
160 	/* Unhide the P2SB device, if it's hidden */
161 	P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
162 	if (hidden)
163 		P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
164 
165 	if (p2sb_is_busy(&status)) {
166 		ret = -EAGAIN;
167 		goto out;
168 	}
169 
170 	P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
171 	P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
172 	P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
173 	P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
174 	P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
175 
176 	while (p2sb_is_busy(&status)) {
177 		if (retries-- == 0) {
178 			ret = -EBUSY;
179 			goto out;
180 		}
181 	}
182 
183 	P2SB_READ(dword, P2SB_DATA_OFF, data);
184 	ret = (status >> 1) & 0x3;
185 out:
186 	/* Hide the P2SB device, if it was hidden before */
187 	if (hidden)
188 		P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
189 
190 	return ret;
191 }
192 
193 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
194 {
195 	int ret = 0;
196 
197 	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
198 	switch (sz) {
199 	case 8:
200 		ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
201 		/* fall through */
202 	case 4:
203 		ret |= _apl_rd_reg(port, off, op, (u32 *)data);
204 		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
205 					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
206 		break;
207 	}
208 
209 	return ret;
210 }
211 
212 static u64 get_mem_ctrl_hub_base_addr(void)
213 {
214 	struct b_cr_mchbar_lo_pci lo;
215 	struct b_cr_mchbar_hi_pci hi;
216 	struct pci_dev *pdev;
217 
218 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
219 	if (pdev) {
220 		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
221 		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
222 		pci_dev_put(pdev);
223 	} else {
224 		return 0;
225 	}
226 
227 	if (!lo.enable) {
228 		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
229 		return 0;
230 	}
231 
232 	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
233 }
234 
235 static u64 get_sideband_reg_base_addr(void)
236 {
237 	struct pci_dev *pdev;
238 	u32 hi, lo;
239 	u8 hidden;
240 
241 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
242 	if (pdev) {
243 		/* Unhide the P2SB device, if it's hidden */
244 		pci_read_config_byte(pdev, 0xe1, &hidden);
245 		if (hidden)
246 			pci_write_config_byte(pdev, 0xe1, 0);
247 
248 		pci_read_config_dword(pdev, 0x10, &lo);
249 		pci_read_config_dword(pdev, 0x14, &hi);
250 		lo &= 0xfffffff0;
251 
252 		/* Hide the P2SB device, if it was hidden before */
253 		if (hidden)
254 			pci_write_config_byte(pdev, 0xe1, hidden);
255 
256 		pci_dev_put(pdev);
257 		return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
258 	} else {
259 		return 0xfd000000;
260 	}
261 }
262 
263 #define DNV_MCHBAR_SIZE  0x8000
264 #define DNV_SB_PORT_SIZE 0x10000
265 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
266 {
267 	struct pci_dev *pdev;
268 	char *base;
269 	u64 addr;
270 	unsigned long size;
271 
272 	if (op == 4) {
273 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
274 		if (!pdev)
275 			return -ENODEV;
276 
277 		pci_read_config_dword(pdev, off, data);
278 		pci_dev_put(pdev);
279 	} else {
280 		/* MMIO via memory controller hub base address */
281 		if (op == 0 && port == 0x4c) {
282 			addr = get_mem_ctrl_hub_base_addr();
283 			if (!addr)
284 				return -ENODEV;
285 			size = DNV_MCHBAR_SIZE;
286 		} else {
287 			/* MMIO via sideband register base address */
288 			addr = get_sideband_reg_base_addr();
289 			if (!addr)
290 				return -ENODEV;
291 			addr += (port << 16);
292 			size = DNV_SB_PORT_SIZE;
293 		}
294 
295 		base = ioremap((resource_size_t)addr, size);
296 		if (!base)
297 			return -ENODEV;
298 
299 		if (sz == 8)
300 			*(u32 *)(data + 4) = *(u32 *)(base + off + 4);
301 		*(u32 *)data = *(u32 *)(base + off);
302 
303 		iounmap(base);
304 	}
305 
306 	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
307 			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
308 
309 	return 0;
310 }
311 
312 #define RD_REGP(regp, regname, port)	\
313 	ops->rd_reg(port,					\
314 		regname##_offset,				\
315 		regname##_r_opcode,				\
316 		regp, sizeof(struct regname),	\
317 		#regname)
318 
319 #define RD_REG(regp, regname)			\
320 	ops->rd_reg(regname ## _port,		\
321 		regname##_offset,				\
322 		regname##_r_opcode,				\
323 		regp, sizeof(struct regname),	\
324 		#regname)
325 
326 static u64 top_lm, top_hm;
327 static bool two_slices;
328 static bool two_channels; /* Both PMI channels in one slice enabled */
329 
330 static u8 sym_chan_mask;
331 static u8 asym_chan_mask;
332 static u8 chan_mask;
333 
334 static int slice_selector = -1;
335 static int chan_selector = -1;
336 static u64 slice_hash_mask;
337 static u64 chan_hash_mask;
338 
339 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
340 {
341 	rp->enabled = 1;
342 	rp->base = base;
343 	rp->limit = limit;
344 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
345 }
346 
347 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
348 {
349 	if (mask == 0) {
350 		pr_info(FW_BUG "MOT mask cannot be zero\n");
351 		return;
352 	}
353 	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
354 		pr_info(FW_BUG "MOT mask not power of two\n");
355 		return;
356 	}
357 	if (base & ~mask) {
358 		pr_info(FW_BUG "MOT region base/mask alignment error\n");
359 		return;
360 	}
361 	rp->base = base;
362 	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
363 	rp->enabled = 1;
364 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
365 }
366 
367 static bool in_region(struct region *rp, u64 addr)
368 {
369 	if (!rp->enabled)
370 		return false;
371 
372 	return rp->base <= addr && addr <= rp->limit;
373 }
374 
375 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
376 {
377 	int mask = 0;
378 
379 	if (!p->slice_0_mem_disabled)
380 		mask |= p->sym_slice0_channel_enabled;
381 
382 	if (!p->slice_1_disabled)
383 		mask |= p->sym_slice1_channel_enabled << 2;
384 
385 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
386 		mask &= 0x5;
387 
388 	return mask;
389 }
390 
391 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
392 			 struct b_cr_asym_mem_region0_mchbar *as0,
393 			 struct b_cr_asym_mem_region1_mchbar *as1,
394 			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
395 {
396 	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
397 	int mask = 0;
398 
399 	if (as2way->asym_2way_interleave_enable)
400 		mask = intlv[as2way->asym_2way_intlv_mode];
401 	if (as0->slice0_asym_enable)
402 		mask |= (1 << as0->slice0_asym_channel_select);
403 	if (as1->slice1_asym_enable)
404 		mask |= (4 << as1->slice1_asym_channel_select);
405 	if (p->slice_0_mem_disabled)
406 		mask &= 0xc;
407 	if (p->slice_1_disabled)
408 		mask &= 0x3;
409 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
410 		mask &= 0x5;
411 
412 	return mask;
413 }
414 
415 static struct b_cr_tolud_pci tolud;
416 static struct b_cr_touud_lo_pci touud_lo;
417 static struct b_cr_touud_hi_pci touud_hi;
418 static struct b_cr_asym_mem_region0_mchbar asym0;
419 static struct b_cr_asym_mem_region1_mchbar asym1;
420 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
421 static struct b_cr_mot_out_base_mchbar mot_base;
422 static struct b_cr_mot_out_mask_mchbar mot_mask;
423 static struct b_cr_slice_channel_hash chash;
424 
425 /* Apollo Lake dunit */
426 /*
427  * Validated on board with just two DIMMs in the [0] and [2] positions
428  * in this array. Other port number matches documentation, but caution
429  * advised.
430  */
431 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
432 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
433 
434 /* Denverton dunit */
435 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
436 static struct d_cr_dsch dsch;
437 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
438 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
439 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
440 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
441 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
442 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
443 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
444 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
445 
446 static void apl_mk_region(char *name, struct region *rp, void *asym)
447 {
448 	struct b_cr_asym_mem_region0_mchbar *a = asym;
449 
450 	mk_region(name, rp,
451 			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
452 			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
453 			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
454 }
455 
456 static void dnv_mk_region(char *name, struct region *rp, void *asym)
457 {
458 	struct b_cr_asym_mem_region_denverton *a = asym;
459 
460 	mk_region(name, rp,
461 			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
462 			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
463 			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
464 }
465 
466 static int apl_get_registers(void)
467 {
468 	int ret = -ENODEV;
469 	int i;
470 
471 	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
472 		return -ENODEV;
473 
474 	/*
475 	 * RD_REGP() will fail for unpopulated or non-existent
476 	 * DIMM slots. Return success if we find at least one DIMM.
477 	 */
478 	for (i = 0; i < APL_NUM_CHANNELS; i++)
479 		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
480 			ret = 0;
481 
482 	return ret;
483 }
484 
485 static int dnv_get_registers(void)
486 {
487 	int i;
488 
489 	if (RD_REG(&dsch, d_cr_dsch))
490 		return -ENODEV;
491 
492 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
493 		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
494 			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
495 			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
496 			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
497 			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
498 			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
499 			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
500 			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
501 			return -ENODEV;
502 
503 	return 0;
504 }
505 
506 /*
507  * Read all the h/w config registers once here (they don't
508  * change at run time. Figure out which address ranges have
509  * which interleave characteristics.
510  */
511 static int get_registers(void)
512 {
513 	const int intlv[] = { 10, 11, 12, 12 };
514 
515 	if (RD_REG(&tolud, b_cr_tolud_pci) ||
516 		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
517 		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
518 		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
519 		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
520 		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
521 		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
522 		RD_REG(&chash, b_cr_slice_channel_hash))
523 		return -ENODEV;
524 
525 	if (ops->get_registers())
526 		return -ENODEV;
527 
528 	if (ops->type == DNV) {
529 		/* PMI channel idx (always 0) for asymmetric region */
530 		asym0.slice0_asym_channel_select = 0;
531 		asym1.slice1_asym_channel_select = 0;
532 		/* PMI channel bitmap (always 1) for symmetric region */
533 		chash.sym_slice0_channel_enabled = 0x1;
534 		chash.sym_slice1_channel_enabled = 0x1;
535 	}
536 
537 	if (asym0.slice0_asym_enable)
538 		ops->mk_region("as0", &as0, &asym0);
539 
540 	if (asym1.slice1_asym_enable)
541 		ops->mk_region("as1", &as1, &asym1);
542 
543 	if (asym_2way.asym_2way_interleave_enable) {
544 		mk_region("as2way", &as2,
545 				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
546 				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
547 				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
548 	}
549 
550 	if (mot_base.imr_en) {
551 		mk_region_mask("mot", &mot,
552 					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
553 					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
554 	}
555 
556 	top_lm = U64_LSHIFT(tolud.tolud, 20);
557 	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
558 
559 	two_slices = !chash.slice_1_disabled &&
560 				 !chash.slice_0_mem_disabled &&
561 				 (chash.sym_slice0_channel_enabled != 0) &&
562 				 (chash.sym_slice1_channel_enabled != 0);
563 	two_channels = !chash.ch_1_disabled &&
564 				 !chash.enable_pmi_dual_data_mode &&
565 				 ((chash.sym_slice0_channel_enabled == 3) ||
566 				 (chash.sym_slice1_channel_enabled == 3));
567 
568 	sym_chan_mask = gen_sym_mask(&chash);
569 	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
570 	chan_mask = sym_chan_mask | asym_chan_mask;
571 
572 	if (two_slices && !two_channels) {
573 		if (chash.hvm_mode)
574 			slice_selector = 29;
575 		else
576 			slice_selector = intlv[chash.interleave_mode];
577 	} else if (!two_slices && two_channels) {
578 		if (chash.hvm_mode)
579 			chan_selector = 29;
580 		else
581 			chan_selector = intlv[chash.interleave_mode];
582 	} else if (two_slices && two_channels) {
583 		if (chash.hvm_mode) {
584 			slice_selector = 29;
585 			chan_selector = 30;
586 		} else {
587 			slice_selector = intlv[chash.interleave_mode];
588 			chan_selector = intlv[chash.interleave_mode] + 1;
589 		}
590 	}
591 
592 	if (two_slices) {
593 		if (!chash.hvm_mode)
594 			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
595 		if (!two_channels)
596 			slice_hash_mask |= BIT_ULL(slice_selector);
597 	}
598 
599 	if (two_channels) {
600 		if (!chash.hvm_mode)
601 			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
602 		if (!two_slices)
603 			chan_hash_mask |= BIT_ULL(chan_selector);
604 	}
605 
606 	return 0;
607 }
608 
609 /* Get a contiguous memory address (remove the MMIO gap) */
610 static u64 remove_mmio_gap(u64 sys)
611 {
612 	return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
613 }
614 
615 /* Squeeze out one address bit, shift upper part down to fill gap */
616 static void remove_addr_bit(u64 *addr, int bitidx)
617 {
618 	u64	mask;
619 
620 	if (bitidx == -1)
621 		return;
622 
623 	mask = (1ull << bitidx) - 1;
624 	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
625 }
626 
627 /* XOR all the bits from addr specified in mask */
628 static int hash_by_mask(u64 addr, u64 mask)
629 {
630 	u64 result = addr & mask;
631 
632 	result = (result >> 32) ^ result;
633 	result = (result >> 16) ^ result;
634 	result = (result >> 8) ^ result;
635 	result = (result >> 4) ^ result;
636 	result = (result >> 2) ^ result;
637 	result = (result >> 1) ^ result;
638 
639 	return (int)result & 1;
640 }
641 
642 /*
643  * First stage decode. Take the system address and figure out which
644  * second stage will deal with it based on interleave modes.
645  */
646 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
647 {
648 	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
649 	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
650 						MOT_CHAN_INTLV_BIT_1SLC_2CH;
651 	int slice_intlv_bit_rm = SELECTOR_DISABLED;
652 	int chan_intlv_bit_rm = SELECTOR_DISABLED;
653 	/* Determine if address is in the MOT region. */
654 	bool mot_hit = in_region(&mot, addr);
655 	/* Calculate the number of symmetric regions enabled. */
656 	int sym_channels = hweight8(sym_chan_mask);
657 
658 	/*
659 	 * The amount we need to shift the asym base can be determined by the
660 	 * number of enabled symmetric channels.
661 	 * NOTE: This can only work because symmetric memory is not supposed
662 	 * to do a 3-way interleave.
663 	 */
664 	int sym_chan_shift = sym_channels >> 1;
665 
666 	/* Give up if address is out of range, or in MMIO gap */
667 	if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
668 	   (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
669 		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
670 		return -EINVAL;
671 	}
672 
673 	/* Get a contiguous memory address (remove the MMIO gap) */
674 	contig_addr = remove_mmio_gap(addr);
675 
676 	if (in_region(&as0, addr)) {
677 		*pmiidx = asym0.slice0_asym_channel_select;
678 
679 		contig_base = remove_mmio_gap(as0.base);
680 		contig_offset = contig_addr - contig_base;
681 		contig_base_adj = (contig_base >> sym_chan_shift) *
682 						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
683 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
684 	} else if (in_region(&as1, addr)) {
685 		*pmiidx = 2u + asym1.slice1_asym_channel_select;
686 
687 		contig_base = remove_mmio_gap(as1.base);
688 		contig_offset = contig_addr - contig_base;
689 		contig_base_adj = (contig_base >> sym_chan_shift) *
690 						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
691 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
692 	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
693 		bool channel1;
694 
695 		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
696 		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
697 		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
698 			hash_by_mask(contig_addr, chan_hash_mask);
699 		*pmiidx |= (u32)channel1;
700 
701 		contig_base = remove_mmio_gap(as2.base);
702 		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
703 		contig_offset = contig_addr - contig_base;
704 		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
705 		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
706 	} else {
707 		/* Otherwise we're in normal, boring symmetric mode. */
708 		*pmiidx = 0u;
709 
710 		if (two_slices) {
711 			bool slice1;
712 
713 			if (mot_hit) {
714 				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
715 				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
716 			} else {
717 				slice_intlv_bit_rm = slice_selector;
718 				slice1 = hash_by_mask(addr, slice_hash_mask);
719 			}
720 
721 			*pmiidx = (u32)slice1 << 1;
722 		}
723 
724 		if (two_channels) {
725 			bool channel1;
726 
727 			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
728 							MOT_CHAN_INTLV_BIT_1SLC_2CH;
729 
730 			if (mot_hit) {
731 				chan_intlv_bit_rm = mot_intlv_bit;
732 				channel1 = (addr >> mot_intlv_bit) & 1;
733 			} else {
734 				chan_intlv_bit_rm = chan_selector;
735 				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
736 			}
737 
738 			*pmiidx |= (u32)channel1;
739 		}
740 	}
741 
742 	/* Remove the chan_selector bit first */
743 	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
744 	/* Remove the slice bit (we remove it second because it must be lower */
745 	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
746 	*pmiaddr = contig_addr;
747 
748 	return 0;
749 }
750 
751 /* Translate PMI address to memory (rank, row, bank, column) */
752 #define C(n) (0x10 | (n))	/* column */
753 #define B(n) (0x20 | (n))	/* bank */
754 #define R(n) (0x40 | (n))	/* row */
755 #define RS   (0x80)			/* rank */
756 
757 /* addrdec values */
758 #define AMAP_1KB	0
759 #define AMAP_2KB	1
760 #define AMAP_4KB	2
761 #define AMAP_RSVD	3
762 
763 /* dden values */
764 #define DEN_4Gb		0
765 #define DEN_8Gb		2
766 
767 /* dwid values */
768 #define X8		0
769 #define X16		1
770 
771 static struct dimm_geometry {
772 	u8	addrdec;
773 	u8	dden;
774 	u8	dwid;
775 	u8	rowbits, colbits;
776 	u16	bits[PMI_ADDRESS_WIDTH];
777 } dimms[] = {
778 	{
779 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
780 		.rowbits = 15, .colbits = 10,
781 		.bits = {
782 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
783 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
784 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
785 			0,     0,     0,     0
786 		}
787 	},
788 	{
789 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
790 		.rowbits = 16, .colbits = 10,
791 		.bits = {
792 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
793 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
794 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
795 			R(15), 0,     0,     0
796 		}
797 	},
798 	{
799 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
800 		.rowbits = 16, .colbits = 10,
801 		.bits = {
802 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
803 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
804 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
805 			R(15), 0,     0,     0
806 		}
807 	},
808 	{
809 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
810 		.rowbits = 16, .colbits = 11,
811 		.bits = {
812 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
813 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
814 			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
815 			R(14), R(15), 0,     0
816 		}
817 	},
818 	{
819 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
820 		.rowbits = 15, .colbits = 10,
821 		.bits = {
822 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
823 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
824 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
825 			0,     0,     0,     0
826 		}
827 	},
828 	{
829 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
830 		.rowbits = 16, .colbits = 10,
831 		.bits = {
832 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
833 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
834 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
835 			R(15), 0,     0,     0
836 		}
837 	},
838 	{
839 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
840 		.rowbits = 16, .colbits = 10,
841 		.bits = {
842 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
843 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
844 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
845 			R(15), 0,     0,     0
846 		}
847 	},
848 	{
849 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
850 		.rowbits = 16, .colbits = 11,
851 		.bits = {
852 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
853 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
854 			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
855 			R(14), R(15), 0,     0
856 		}
857 	},
858 	{
859 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
860 		.rowbits = 15, .colbits = 10,
861 		.bits = {
862 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
863 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
864 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
865 			0,     0,     0,     0
866 		}
867 	},
868 	{
869 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
870 		.rowbits = 16, .colbits = 10,
871 		.bits = {
872 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
873 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
874 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
875 			R(15), 0,     0,     0
876 		}
877 	},
878 	{
879 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
880 		.rowbits = 16, .colbits = 10,
881 		.bits = {
882 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
883 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
884 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
885 			R(15), 0,     0,     0
886 		}
887 	},
888 	{
889 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
890 		.rowbits = 16, .colbits = 11,
891 		.bits = {
892 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
893 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
894 			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
895 			R(14), R(15), 0,     0
896 		}
897 	}
898 };
899 
900 static int bank_hash(u64 pmiaddr, int idx, int shft)
901 {
902 	int bhash = 0;
903 
904 	switch (idx) {
905 	case 0:
906 		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
907 		break;
908 	case 1:
909 		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
910 		bhash ^= ((pmiaddr >> 22) & 1) << 1;
911 		break;
912 	case 2:
913 		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
914 		break;
915 	}
916 
917 	return bhash;
918 }
919 
920 static int rank_hash(u64 pmiaddr)
921 {
922 	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
923 }
924 
925 /* Second stage decode. Compute rank, bank, row & column. */
926 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
927 		       struct dram_addr *daddr, char *msg)
928 {
929 	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
930 	struct pnd2_pvt *pvt = mci->pvt_info;
931 	int g = pvt->dimm_geom[pmiidx];
932 	struct dimm_geometry *d = &dimms[g];
933 	int column = 0, bank = 0, row = 0, rank = 0;
934 	int i, idx, type, skiprs = 0;
935 
936 	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
937 		int	bit = (pmiaddr >> i) & 1;
938 
939 		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
940 			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
941 			return -EINVAL;
942 		}
943 
944 		type = d->bits[i + skiprs] & ~0xf;
945 		idx = d->bits[i + skiprs] & 0xf;
946 
947 		/*
948 		 * On single rank DIMMs ignore the rank select bit
949 		 * and shift remainder of "bits[]" down one place.
950 		 */
951 		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
952 			skiprs = 1;
953 			type = d->bits[i + skiprs] & ~0xf;
954 			idx = d->bits[i + skiprs] & 0xf;
955 		}
956 
957 		switch (type) {
958 		case C(0):
959 			column |= (bit << idx);
960 			break;
961 		case B(0):
962 			bank |= (bit << idx);
963 			if (cr_drp0->bahen)
964 				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
965 			break;
966 		case R(0):
967 			row |= (bit << idx);
968 			break;
969 		case RS:
970 			rank = bit;
971 			if (cr_drp0->rsien)
972 				rank ^= rank_hash(pmiaddr);
973 			break;
974 		default:
975 			if (bit) {
976 				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
977 				return -EINVAL;
978 			}
979 			goto done;
980 		}
981 	}
982 
983 done:
984 	daddr->col = column;
985 	daddr->bank = bank;
986 	daddr->row = row;
987 	daddr->rank = rank;
988 	daddr->dimm = 0;
989 
990 	return 0;
991 }
992 
993 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
994 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
995 
996 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
997 					   struct dram_addr *daddr, char *msg)
998 {
999 	/* Rank 0 or 1 */
1000 	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1001 	/* Rank 2 or 3 */
1002 	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1003 
1004 	/*
1005 	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1006 	 * flip them if DIMM1 is larger than DIMM0.
1007 	 */
1008 	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1009 
1010 	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1011 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1012 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1013 	if (dsch.ddr4en)
1014 		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1015 	if (dmap1[pmiidx].bxor) {
1016 		if (dsch.ddr4en) {
1017 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1018 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1019 			if (dsch.chan_width == 0)
1020 				/* 64/72 bit dram channel width */
1021 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1022 			else
1023 				/* 32/40 bit dram channel width */
1024 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1025 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1026 		} else {
1027 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1028 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1029 			if (dsch.chan_width == 0)
1030 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1031 			else
1032 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1033 		}
1034 	}
1035 
1036 	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1037 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1038 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1039 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1040 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1041 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1042 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1043 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1044 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1045 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1046 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1047 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1048 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1049 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1050 	if (dmap4[pmiidx].row14 != 31)
1051 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1052 	if (dmap4[pmiidx].row15 != 31)
1053 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1054 	if (dmap4[pmiidx].row16 != 31)
1055 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1056 	if (dmap4[pmiidx].row17 != 31)
1057 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1058 
1059 	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1060 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1061 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1062 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1063 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1064 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1065 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1066 	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1067 		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1068 
1069 	return 0;
1070 }
1071 
1072 static int check_channel(int ch)
1073 {
1074 	if (drp0[ch].dramtype != 0) {
1075 		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1076 		return 1;
1077 	} else if (drp0[ch].eccen == 0) {
1078 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1079 		return 1;
1080 	}
1081 	return 0;
1082 }
1083 
1084 static int apl_check_ecc_active(void)
1085 {
1086 	int	i, ret = 0;
1087 
1088 	/* Check dramtype and ECC mode for each present DIMM */
1089 	for (i = 0; i < APL_NUM_CHANNELS; i++)
1090 		if (chan_mask & BIT(i))
1091 			ret += check_channel(i);
1092 	return ret ? -EINVAL : 0;
1093 }
1094 
1095 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1096 
1097 static int check_unit(int ch)
1098 {
1099 	struct d_cr_drp *d = &drp[ch];
1100 
1101 	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1102 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1103 		return 1;
1104 	}
1105 	return 0;
1106 }
1107 
1108 static int dnv_check_ecc_active(void)
1109 {
1110 	int	i, ret = 0;
1111 
1112 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1113 		ret += check_unit(i);
1114 	return ret ? -EINVAL : 0;
1115 }
1116 
1117 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1118 								 struct dram_addr *daddr, char *msg)
1119 {
1120 	u64	pmiaddr;
1121 	u32	pmiidx;
1122 	int	ret;
1123 
1124 	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1125 	if (ret)
1126 		return ret;
1127 
1128 	pmiaddr >>= ops->pmiaddr_shift;
1129 	/* pmi channel idx to dimm channel idx */
1130 	pmiidx >>= ops->pmiidx_shift;
1131 	daddr->chan = pmiidx;
1132 
1133 	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1134 	if (ret)
1135 		return ret;
1136 
1137 	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1138 			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1139 
1140 	return 0;
1141 }
1142 
1143 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1144 				  struct dram_addr *daddr)
1145 {
1146 	enum hw_event_mc_err_type tp_event;
1147 	char *optype, msg[PND2_MSG_SIZE];
1148 	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1149 	bool overflow = m->status & MCI_STATUS_OVER;
1150 	bool uc_err = m->status & MCI_STATUS_UC;
1151 	bool recov = m->status & MCI_STATUS_S;
1152 	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1153 	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1154 	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1155 	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1156 	int rc;
1157 
1158 	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1159 						 HW_EVENT_ERR_CORRECTED;
1160 
1161 	/*
1162 	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1163 	 * memory errors should fit in this mask:
1164 	 *	000f 0000 1mmm cccc (binary)
1165 	 * where:
1166 	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1167 	 *	    won't be shown
1168 	 *	mmm = error type
1169 	 *	cccc = channel
1170 	 * If the mask doesn't match, report an error to the parsing logic
1171 	 */
1172 	if (!((errcode & 0xef80) == 0x80)) {
1173 		optype = "Can't parse: it is not a mem";
1174 	} else {
1175 		switch (optypenum) {
1176 		case 0:
1177 			optype = "generic undef request error";
1178 			break;
1179 		case 1:
1180 			optype = "memory read error";
1181 			break;
1182 		case 2:
1183 			optype = "memory write error";
1184 			break;
1185 		case 3:
1186 			optype = "addr/cmd error";
1187 			break;
1188 		case 4:
1189 			optype = "memory scrubbing error";
1190 			break;
1191 		default:
1192 			optype = "reserved";
1193 			break;
1194 		}
1195 	}
1196 
1197 	/* Only decode errors with an valid address (ADDRV) */
1198 	if (!(m->status & MCI_STATUS_ADDRV))
1199 		return;
1200 
1201 	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1202 	if (rc)
1203 		goto address_error;
1204 
1205 	snprintf(msg, sizeof(msg),
1206 		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1207 		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1208 		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1209 
1210 	edac_dbg(0, "%s\n", msg);
1211 
1212 	/* Call the helper to output message */
1213 	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1214 						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1215 
1216 	return;
1217 
1218 address_error:
1219 	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1220 }
1221 
1222 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1223 {
1224 	struct pnd2_pvt	*pvt = mci->pvt_info;
1225 	struct dimm_info *dimm;
1226 	struct d_cr_drp0 *d;
1227 	u64	capacity;
1228 	int	i, g;
1229 
1230 	for (i = 0; i < APL_NUM_CHANNELS; i++) {
1231 		if (!(chan_mask & BIT(i)))
1232 			continue;
1233 
1234 		dimm = edac_get_dimm(mci, i, 0, 0);
1235 		if (!dimm) {
1236 			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1237 			continue;
1238 		}
1239 
1240 		d = &drp0[i];
1241 		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1242 			if (dimms[g].addrdec == d->addrdec &&
1243 			    dimms[g].dden == d->dden &&
1244 			    dimms[g].dwid == d->dwid)
1245 				break;
1246 
1247 		if (g == ARRAY_SIZE(dimms)) {
1248 			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1249 			continue;
1250 		}
1251 
1252 		pvt->dimm_geom[i] = g;
1253 		capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1254 				   (1ul << dimms[g].colbits);
1255 		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1256 		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1257 		dimm->grain = 32;
1258 		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1259 		dimm->mtype = MEM_DDR3;
1260 		dimm->edac_mode = EDAC_SECDED;
1261 		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1262 	}
1263 }
1264 
1265 static const int dnv_dtypes[] = {
1266 	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1267 };
1268 
1269 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1270 {
1271 	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1272 	struct dimm_info *dimm;
1273 	struct d_cr_drp *d;
1274 	u64	capacity;
1275 
1276 	if (dsch.ddr4en) {
1277 		memtype = MEM_DDR4;
1278 		banks = 16;
1279 		colbits = 10;
1280 	} else {
1281 		memtype = MEM_DDR3;
1282 		banks = 8;
1283 	}
1284 
1285 	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1286 		if (dmap4[i].row14 == 31)
1287 			rowbits = 14;
1288 		else if (dmap4[i].row15 == 31)
1289 			rowbits = 15;
1290 		else if (dmap4[i].row16 == 31)
1291 			rowbits = 16;
1292 		else if (dmap4[i].row17 == 31)
1293 			rowbits = 17;
1294 		else
1295 			rowbits = 18;
1296 
1297 		if (memtype == MEM_DDR3) {
1298 			if (dmap1[i].ca11 != 0x3f)
1299 				colbits = 12;
1300 			else
1301 				colbits = 10;
1302 		}
1303 
1304 		d = &drp[i];
1305 		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1306 		ranks_of_dimm[0] = d->rken0 + d->rken1;
1307 		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1308 		ranks_of_dimm[1] = d->rken2 + d->rken3;
1309 
1310 		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1311 			if (!ranks_of_dimm[j])
1312 				continue;
1313 
1314 			dimm = edac_get_dimm(mci, i, j, 0);
1315 			if (!dimm) {
1316 				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1317 				continue;
1318 			}
1319 
1320 			capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1321 			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1322 			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1323 			dimm->grain = 32;
1324 			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1325 			dimm->mtype = memtype;
1326 			dimm->edac_mode = EDAC_SECDED;
1327 			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1328 		}
1329 	}
1330 }
1331 
1332 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1333 {
1334 	struct edac_mc_layer layers[2];
1335 	struct mem_ctl_info *mci;
1336 	struct pnd2_pvt *pvt;
1337 	int rc;
1338 
1339 	rc = ops->check_ecc();
1340 	if (rc < 0)
1341 		return rc;
1342 
1343 	/* Allocate a new MC control structure */
1344 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1345 	layers[0].size = ops->channels;
1346 	layers[0].is_virt_csrow = false;
1347 	layers[1].type = EDAC_MC_LAYER_SLOT;
1348 	layers[1].size = ops->dimms_per_channel;
1349 	layers[1].is_virt_csrow = true;
1350 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1351 	if (!mci)
1352 		return -ENOMEM;
1353 
1354 	pvt = mci->pvt_info;
1355 	memset(pvt, 0, sizeof(*pvt));
1356 
1357 	mci->mod_name = EDAC_MOD_STR;
1358 	mci->dev_name = ops->name;
1359 	mci->ctl_name = "Pondicherry2";
1360 
1361 	/* Get dimm basic config and the memory layout */
1362 	ops->get_dimm_config(mci);
1363 
1364 	if (edac_mc_add_mc(mci)) {
1365 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1366 		edac_mc_free(mci);
1367 		return -EINVAL;
1368 	}
1369 
1370 	*ppmci = mci;
1371 
1372 	return 0;
1373 }
1374 
1375 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1376 {
1377 	if (unlikely(!mci || !mci->pvt_info)) {
1378 		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1379 		return;
1380 	}
1381 
1382 	/* Remove MC sysfs nodes */
1383 	edac_mc_del_mc(NULL);
1384 	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1385 	edac_mc_free(mci);
1386 }
1387 
1388 /*
1389  * Callback function registered with core kernel mce code.
1390  * Called once for each logged error.
1391  */
1392 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1393 {
1394 	struct mce *mce = (struct mce *)data;
1395 	struct mem_ctl_info *mci;
1396 	struct dram_addr daddr;
1397 	char *type;
1398 
1399 	if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1400 		return NOTIFY_DONE;
1401 
1402 	mci = pnd2_mci;
1403 	if (!mci)
1404 		return NOTIFY_DONE;
1405 
1406 	/*
1407 	 * Just let mcelog handle it if the error is
1408 	 * outside the memory controller. A memory error
1409 	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1410 	 * bit 12 has an special meaning.
1411 	 */
1412 	if ((mce->status & 0xefff) >> 7 != 1)
1413 		return NOTIFY_DONE;
1414 
1415 	if (mce->mcgstatus & MCG_STATUS_MCIP)
1416 		type = "Exception";
1417 	else
1418 		type = "Event";
1419 
1420 	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1421 	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1422 				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1423 	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1424 	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1425 	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1426 	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1427 				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1428 
1429 	pnd2_mce_output_error(mci, mce, &daddr);
1430 
1431 	/* Advice mcelog that the error were handled */
1432 	return NOTIFY_STOP;
1433 }
1434 
1435 static struct notifier_block pnd2_mce_dec = {
1436 	.notifier_call	= pnd2_mce_check_error,
1437 };
1438 
1439 #ifdef CONFIG_EDAC_DEBUG
1440 /*
1441  * Write an address to this file to exercise the address decode
1442  * logic in this driver.
1443  */
1444 static u64 pnd2_fake_addr;
1445 #define PND2_BLOB_SIZE 1024
1446 static char pnd2_result[PND2_BLOB_SIZE];
1447 static struct dentry *pnd2_test;
1448 static struct debugfs_blob_wrapper pnd2_blob = {
1449 	.data = pnd2_result,
1450 	.size = 0
1451 };
1452 
1453 static int debugfs_u64_set(void *data, u64 val)
1454 {
1455 	struct dram_addr daddr;
1456 	struct mce m;
1457 
1458 	*(u64 *)data = val;
1459 	m.mcgstatus = 0;
1460 	/* ADDRV + MemRd + Unknown channel */
1461 	m.status = MCI_STATUS_ADDRV + 0x9f;
1462 	m.addr = val;
1463 	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1464 	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1465 			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1466 			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1467 	pnd2_blob.size = strlen(pnd2_blob.data);
1468 
1469 	return 0;
1470 }
1471 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1472 
1473 static void setup_pnd2_debug(void)
1474 {
1475 	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1476 	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1477 							 &pnd2_fake_addr, &fops_u64_wo);
1478 	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1479 }
1480 
1481 static void teardown_pnd2_debug(void)
1482 {
1483 	debugfs_remove_recursive(pnd2_test);
1484 }
1485 #else
1486 static void setup_pnd2_debug(void)	{}
1487 static void teardown_pnd2_debug(void)	{}
1488 #endif /* CONFIG_EDAC_DEBUG */
1489 
1490 
1491 static int pnd2_probe(void)
1492 {
1493 	int rc;
1494 
1495 	edac_dbg(2, "\n");
1496 	rc = get_registers();
1497 	if (rc)
1498 		return rc;
1499 
1500 	return pnd2_register_mci(&pnd2_mci);
1501 }
1502 
1503 static void pnd2_remove(void)
1504 {
1505 	edac_dbg(0, "\n");
1506 	pnd2_unregister_mci(pnd2_mci);
1507 }
1508 
1509 static struct dunit_ops apl_ops = {
1510 		.name			= "pnd2/apl",
1511 		.type			= APL,
1512 		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1513 		.pmiidx_shift		= 0,
1514 		.channels		= APL_NUM_CHANNELS,
1515 		.dimms_per_channel	= 1,
1516 		.rd_reg			= apl_rd_reg,
1517 		.get_registers		= apl_get_registers,
1518 		.check_ecc		= apl_check_ecc_active,
1519 		.mk_region		= apl_mk_region,
1520 		.get_dimm_config	= apl_get_dimm_config,
1521 		.pmi2mem		= apl_pmi2mem,
1522 };
1523 
1524 static struct dunit_ops dnv_ops = {
1525 		.name			= "pnd2/dnv",
1526 		.type			= DNV,
1527 		.pmiaddr_shift		= 0,
1528 		.pmiidx_shift		= 1,
1529 		.channels		= DNV_NUM_CHANNELS,
1530 		.dimms_per_channel	= 2,
1531 		.rd_reg			= dnv_rd_reg,
1532 		.get_registers		= dnv_get_registers,
1533 		.check_ecc		= dnv_check_ecc_active,
1534 		.mk_region		= dnv_mk_region,
1535 		.get_dimm_config	= dnv_get_dimm_config,
1536 		.pmi2mem		= dnv_pmi2mem,
1537 };
1538 
1539 static const struct x86_cpu_id pnd2_cpuids[] = {
1540 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	&apl_ops),
1541 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,	&dnv_ops),
1542 	{ }
1543 };
1544 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1545 
1546 static int __init pnd2_init(void)
1547 {
1548 	const struct x86_cpu_id *id;
1549 	const char *owner;
1550 	int rc;
1551 
1552 	edac_dbg(2, "\n");
1553 
1554 	owner = edac_get_owner();
1555 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1556 		return -EBUSY;
1557 
1558 	id = x86_match_cpu(pnd2_cpuids);
1559 	if (!id)
1560 		return -ENODEV;
1561 
1562 	ops = (struct dunit_ops *)id->driver_data;
1563 
1564 	if (ops->type == APL) {
1565 		p2sb_bus = pci_find_bus(0, 0);
1566 		if (!p2sb_bus)
1567 			return -ENODEV;
1568 	}
1569 
1570 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1571 	opstate_init();
1572 
1573 	rc = pnd2_probe();
1574 	if (rc < 0) {
1575 		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1576 		return rc;
1577 	}
1578 
1579 	if (!pnd2_mci)
1580 		return -ENODEV;
1581 
1582 	mce_register_decode_chain(&pnd2_mce_dec);
1583 	setup_pnd2_debug();
1584 
1585 	return 0;
1586 }
1587 
1588 static void __exit pnd2_exit(void)
1589 {
1590 	edac_dbg(2, "\n");
1591 	teardown_pnd2_debug();
1592 	mce_unregister_decode_chain(&pnd2_mce_dec);
1593 	pnd2_remove();
1594 }
1595 
1596 module_init(pnd2_init);
1597 module_exit(pnd2_exit);
1598 
1599 module_param(edac_op_state, int, 0444);
1600 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1601 
1602 MODULE_LICENSE("GPL v2");
1603 MODULE_AUTHOR("Tony Luck");
1604 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1605