xref: /linux/drivers/edac/i10nm_base.c (revision d7223aed30cd77be31dabd635e709828f3255366)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Intel(R) 10nm server memory controller.
4  * Copyright (c) 2019, Intel Corporation.
5  *
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/io.h>
10 #include <asm/cpu_device_id.h>
11 #include <asm/intel-family.h>
12 #include <asm/mce.h>
13 #include "edac_module.h"
14 #include "skx_common.h"
15 
16 #define I10NM_REVISION	"v0.0.6"
17 #define EDAC_MOD_STR	"i10nm_edac"
18 
19 /* Debug macros */
20 #define i10nm_printk(level, fmt, arg...)	\
21 	edac_printk(level, "i10nm", fmt, ##arg)
22 
23 #define I10NM_GET_SCK_BAR(d, reg)	\
24 	pci_read_config_dword((d)->uracu, 0xd0, &(reg))
25 #define I10NM_GET_IMC_BAR(d, i, reg)		\
26 	pci_read_config_dword((d)->uracu,	\
27 	(res_cfg->type == GNR ? 0xd4 : 0xd8) + (i) * 4, &(reg))
28 #define I10NM_GET_SAD(d, offset, i, reg)\
29 	pci_read_config_dword((d)->sad_all, (offset) + (i) * \
30 	(res_cfg->type == GNR ? 12 : 8), &(reg))
31 #define I10NM_GET_HBM_IMC_BAR(d, reg)	\
32 	pci_read_config_dword((d)->uracu, 0xd4, &(reg))
33 #define I10NM_GET_CAPID3_CFG(d, reg)	\
34 	pci_read_config_dword((d)->pcu_cr3,	\
35 	res_cfg->type == GNR ? 0x290 : 0x90, &(reg))
36 #define I10NM_GET_CAPID5_CFG(d, reg)	\
37 	pci_read_config_dword((d)->pcu_cr3,	\
38 	res_cfg->type == GNR ? 0x298 : 0x98, &(reg))
39 #define I10NM_GET_DIMMMTR(m, i, j)	\
40 	readl((m)->mbase + ((m)->hbm_mc ? 0x80c :	\
41 	(res_cfg->type == GNR ? 0xc0c : 0x2080c)) +	\
42 	(i) * (m)->chan_mmio_sz + (j) * 4)
43 #define I10NM_GET_MCDDRTCFG(m, i)	\
44 	readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
45 	(i) * (m)->chan_mmio_sz)
46 #define I10NM_GET_MCMTR(m, i)		\
47 	readl((m)->mbase + ((m)->hbm_mc ? 0xef8 :	\
48 	(res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) +	\
49 	(i) * (m)->chan_mmio_sz)
50 #define I10NM_GET_REG32(m, i, offset)	\
51 	readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
52 #define I10NM_GET_REG64(m, i, offset)	\
53 	readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
54 #define I10NM_SET_REG32(m, i, offset, v)	\
55 	writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
56 
57 #define I10NM_GET_SCK_MMIO_BASE(reg)	(GET_BITFIELD(reg, 0, 28) << 23)
58 #define I10NM_GET_IMC_MMIO_OFFSET(reg)	(GET_BITFIELD(reg, 0, 10) << 12)
59 #define I10NM_GET_IMC_MMIO_SIZE(reg)	((GET_BITFIELD(reg, 13, 23) - \
60 					 GET_BITFIELD(reg, 0, 10) + 1) << 12)
61 #define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg)	\
62 	((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
63 
64 #define I10NM_GNR_IMC_MMIO_OFFSET	0x24c000
65 #define I10NM_GNR_D_IMC_MMIO_OFFSET	0x206000
66 #define I10NM_GNR_IMC_MMIO_SIZE		0x4000
67 #define I10NM_HBM_IMC_MMIO_SIZE		0x9000
68 #define I10NM_DDR_IMC_CH_CNT(reg)	GET_BITFIELD(reg, 21, 24)
69 #define I10NM_IS_HBM_PRESENT(reg)	GET_BITFIELD(reg, 27, 30)
70 #define I10NM_IS_HBM_IMC(reg)		GET_BITFIELD(reg, 29, 29)
71 
72 #define I10NM_MAX_SAD			16
73 #define I10NM_SAD_ENABLE(reg)		GET_BITFIELD(reg, 0, 0)
74 #define I10NM_SAD_NM_CACHEABLE(reg)	GET_BITFIELD(reg, 5, 5)
75 
76 static struct list_head *i10nm_edac_list;
77 
78 static struct res_config *res_cfg;
79 static int retry_rd_err_log;
80 static int decoding_via_mca;
81 static bool mem_cfg_2lm;
82 
83 static struct reg_rrl icx_reg_rrl_ddr = {
84 	.set_num = 2,
85 	.reg_num = 6,
86 	.modes = {LRE_SCRUB, LRE_DEMAND},
87 	.offsets = {
88 		{0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8},
89 		{0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0},
90 	},
91 	.widths		= {4, 4, 4, 4, 4, 8},
92 	.v_mask		= BIT(0),
93 	.uc_mask	= BIT(1),
94 	.over_mask	= BIT(2),
95 	.en_patspr_mask	= BIT(13),
96 	.noover_mask	= BIT(14),
97 	.en_mask	= BIT(15),
98 
99 	.cecnt_num	= 4,
100 	.cecnt_offsets	= {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
101 	.cecnt_widths	= {4, 4, 4, 4},
102 };
103 
104 static struct reg_rrl spr_reg_rrl_ddr = {
105 	.set_num = 3,
106 	.reg_num = 6,
107 	.modes = {LRE_SCRUB, LRE_DEMAND, FRE_DEMAND},
108 	.offsets = {
109 		{0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8},
110 		{0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0},
111 		{0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10},
112 	},
113 	.widths		= {4, 4, 8, 4, 4, 8},
114 	.v_mask		= BIT(0),
115 	.uc_mask	= BIT(1),
116 	.over_mask	= BIT(2),
117 	.en_patspr_mask	= BIT(13),
118 	.noover_mask	= BIT(14),
119 	.en_mask	= BIT(15),
120 
121 	.cecnt_num	= 4,
122 	.cecnt_offsets	= {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
123 	.cecnt_widths	= {4, 4, 4, 4},
124 };
125 
126 static struct reg_rrl spr_reg_rrl_hbm_pch0 = {
127 	.set_num = 2,
128 	.reg_num = 6,
129 	.modes = {LRE_SCRUB, LRE_DEMAND},
130 	.offsets = {
131 		{0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8},
132 		{0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0},
133 	},
134 	.widths		= {4, 4, 8, 4, 4, 8},
135 	.v_mask		= BIT(0),
136 	.uc_mask	= BIT(1),
137 	.over_mask	= BIT(2),
138 	.en_patspr_mask	= BIT(13),
139 	.noover_mask	= BIT(14),
140 	.en_mask	= BIT(15),
141 
142 	.cecnt_num	= 4,
143 	.cecnt_offsets	= {0x2818, 0x281c, 0x2820, 0x2824},
144 	.cecnt_widths	= {4, 4, 4, 4},
145 };
146 
147 static struct reg_rrl spr_reg_rrl_hbm_pch1 = {
148 	.set_num = 2,
149 	.reg_num = 6,
150 	.modes = {LRE_SCRUB, LRE_DEMAND},
151 	.offsets = {
152 		{0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8},
153 		{0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0},
154 	},
155 	.widths		= {4, 4, 8, 4, 4, 8},
156 	.v_mask		= BIT(0),
157 	.uc_mask	= BIT(1),
158 	.over_mask	= BIT(2),
159 	.en_patspr_mask	= BIT(13),
160 	.noover_mask	= BIT(14),
161 	.en_mask	= BIT(15),
162 
163 	.cecnt_num	= 4,
164 	.cecnt_offsets	= {0x2c18, 0x2c1c, 0x2c20, 0x2c24},
165 	.cecnt_widths	= {4, 4, 4, 4},
166 };
167 
168 static struct reg_rrl gnr_reg_rrl_ddr = {
169 	.set_num = 4,
170 	.reg_num = 6,
171 	.modes = {FRE_SCRUB, FRE_DEMAND, LRE_SCRUB, LRE_DEMAND},
172 	.offsets = {
173 		{0x2f10, 0x2f20, 0x2f30, 0x2f50, 0x2f60, 0xba0},
174 		{0x2f14, 0x2f24, 0x2f38, 0x2f54, 0x2f64, 0xba8},
175 		{0x2f18, 0x2f28, 0x2f40, 0x2f58, 0x2f68, 0xbb0},
176 		{0x2f1c, 0x2f2c, 0x2f48, 0x2f5c, 0x2f6c, 0xbb8},
177 	},
178 	.widths		= {4, 4, 8, 4, 4, 8},
179 	.v_mask		= BIT(0),
180 	.uc_mask	= BIT(1),
181 	.over_mask	= BIT(2),
182 	.en_patspr_mask	= BIT(14),
183 	.noover_mask	= BIT(15),
184 	.en_mask	= BIT(12),
185 
186 	.cecnt_num	= 8,
187 	.cecnt_offsets	= {0x2c10, 0x2c14, 0x2c18, 0x2c1c, 0x2c20, 0x2c24, 0x2c28, 0x2c2c},
188 	.cecnt_widths	= {4, 4, 4, 4, 4, 4, 4, 4},
189 };
190 
read_imc_reg(struct skx_imc * imc,int chan,u32 offset,u8 width)191 static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
192 {
193 	switch (width) {
194 	case 4:
195 		return I10NM_GET_REG32(imc, chan, offset);
196 	case 8:
197 		return I10NM_GET_REG64(imc, chan, offset);
198 	default:
199 		i10nm_printk(KERN_ERR, "Invalid readd RRL 0x%x width %d\n", offset, width);
200 		return 0;
201 	}
202 }
203 
write_imc_reg(struct skx_imc * imc,int chan,u32 offset,u8 width,u64 val)204 static void write_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width, u64 val)
205 {
206 	switch (width) {
207 	case 4:
208 		return I10NM_SET_REG32(imc, chan, offset, (u32)val);
209 	default:
210 		i10nm_printk(KERN_ERR, "Invalid write RRL 0x%x width %d\n", offset, width);
211 	}
212 }
213 
enable_rrl(struct skx_imc * imc,int chan,struct reg_rrl * rrl,int rrl_set,bool enable,u32 * rrl_ctl)214 static void enable_rrl(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
215 		       int rrl_set, bool enable, u32 *rrl_ctl)
216 {
217 	enum rrl_mode mode = rrl->modes[rrl_set];
218 	u32 offset = rrl->offsets[rrl_set][0], v;
219 	u8 width = rrl->widths[0];
220 	bool first, scrub;
221 
222 	/* First or last read error. */
223 	first = (mode == FRE_SCRUB || mode == FRE_DEMAND);
224 	/* Patrol scrub or on-demand read error. */
225 	scrub = (mode == FRE_SCRUB || mode == LRE_SCRUB);
226 
227 	v = read_imc_reg(imc, chan, offset, width);
228 
229 	if (enable) {
230 		/* Save default configurations. */
231 		*rrl_ctl = v;
232 		v &= ~rrl->uc_mask;
233 
234 		if (first)
235 			v |= rrl->noover_mask;
236 		else
237 			v &= ~rrl->noover_mask;
238 
239 		if (scrub)
240 			v |= rrl->en_patspr_mask;
241 		else
242 			v &= ~rrl->en_patspr_mask;
243 
244 		v |= rrl->en_mask;
245 	} else {
246 		/* Restore default configurations. */
247 		if (*rrl_ctl & rrl->uc_mask)
248 			v |= rrl->uc_mask;
249 
250 		if (first) {
251 			if (!(*rrl_ctl & rrl->noover_mask))
252 				v &= ~rrl->noover_mask;
253 		} else {
254 			if (*rrl_ctl & rrl->noover_mask)
255 				v |= rrl->noover_mask;
256 		}
257 
258 		if (scrub) {
259 			if (!(*rrl_ctl & rrl->en_patspr_mask))
260 				v &= ~rrl->en_patspr_mask;
261 		} else {
262 			if (*rrl_ctl & rrl->en_patspr_mask)
263 				v |= rrl->en_patspr_mask;
264 		}
265 
266 		if (!(*rrl_ctl & rrl->en_mask))
267 			v &= ~rrl->en_mask;
268 	}
269 
270 	write_imc_reg(imc, chan, offset, width, v);
271 }
272 
enable_rrls(struct skx_imc * imc,int chan,struct reg_rrl * rrl,bool enable,u32 * rrl_ctl)273 static void enable_rrls(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
274 			bool enable, u32 *rrl_ctl)
275 {
276 	for (int i = 0; i < rrl->set_num; i++)
277 		enable_rrl(imc, chan, rrl, i, enable, rrl_ctl + i);
278 }
279 
enable_rrls_ddr(struct skx_imc * imc,bool enable)280 static void enable_rrls_ddr(struct skx_imc *imc, bool enable)
281 {
282 	struct reg_rrl *rrl_ddr = res_cfg->reg_rrl_ddr;
283 	int i, chan_num = res_cfg->ddr_chan_num;
284 	struct skx_channel *chan = imc->chan;
285 
286 	if (!imc->mbase)
287 		return;
288 
289 	for (i = 0; i < chan_num; i++)
290 		enable_rrls(imc, i, rrl_ddr, enable, chan[i].rrl_ctl[0]);
291 }
292 
enable_rrls_hbm(struct skx_imc * imc,bool enable)293 static void enable_rrls_hbm(struct skx_imc *imc, bool enable)
294 {
295 	struct reg_rrl **rrl_hbm = res_cfg->reg_rrl_hbm;
296 	int i, chan_num = res_cfg->hbm_chan_num;
297 	struct skx_channel *chan = imc->chan;
298 
299 	if (!imc->mbase || !imc->hbm_mc || !rrl_hbm[0] || !rrl_hbm[1])
300 		return;
301 
302 	for (i = 0; i < chan_num; i++) {
303 		enable_rrls(imc, i, rrl_hbm[0], enable, chan[i].rrl_ctl[0]);
304 		enable_rrls(imc, i, rrl_hbm[1], enable, chan[i].rrl_ctl[1]);
305 	}
306 }
307 
enable_retry_rd_err_log(bool enable)308 static void enable_retry_rd_err_log(bool enable)
309 {
310 	struct skx_dev *d;
311 	int i, imc_num;
312 
313 	edac_dbg(2, "\n");
314 
315 	list_for_each_entry(d, i10nm_edac_list, list) {
316 		imc_num  = res_cfg->ddr_imc_num;
317 		for (i = 0; i < imc_num; i++)
318 			enable_rrls_ddr(&d->imc[i], enable);
319 
320 		imc_num += res_cfg->hbm_imc_num;
321 		for (; i < imc_num; i++)
322 			enable_rrls_hbm(&d->imc[i], enable);
323 	}
324 }
325 
show_retry_rd_err_log(struct decoded_addr * res,char * msg,int len,bool scrub_err)326 static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
327 				  int len, bool scrub_err)
328 {
329 	int i, j, n, ch = res->channel, pch = res->cs & 1;
330 	struct skx_imc *imc = &res->dev->imc[res->imc];
331 	u64 log, corr, status_mask;
332 	struct reg_rrl *rrl;
333 	bool scrub;
334 	u32 offset;
335 	u8 width;
336 
337 	if (!imc->mbase)
338 		return;
339 
340 	rrl = imc->hbm_mc ? res_cfg->reg_rrl_hbm[pch] : res_cfg->reg_rrl_ddr;
341 
342 	if (!rrl)
343 		return;
344 
345 	status_mask = rrl->over_mask | rrl->uc_mask | rrl->v_mask;
346 
347 	n = scnprintf(msg, len, " retry_rd_err_log[");
348 	for (i = 0; i < rrl->set_num; i++) {
349 		scrub = (rrl->modes[i] == FRE_SCRUB || rrl->modes[i] == LRE_SCRUB);
350 		if (scrub_err != scrub)
351 			continue;
352 
353 		for (j = 0; j < rrl->reg_num && len - n > 0; j++) {
354 			offset = rrl->offsets[i][j];
355 			width = rrl->widths[j];
356 			log = read_imc_reg(imc, ch, offset, width);
357 
358 			if (width == 4)
359 				n += scnprintf(msg + n, len - n, "%.8llx ", log);
360 			else
361 				n += scnprintf(msg + n, len - n, "%.16llx ", log);
362 
363 			/* Clear RRL status if RRL in Linux control mode. */
364 			if (retry_rd_err_log == 2 && !j && (log & status_mask))
365 				write_imc_reg(imc, ch, offset, width, log & ~status_mask);
366 		}
367 	}
368 
369 	/* Move back one space. */
370 	n--;
371 	n += scnprintf(msg + n, len - n, "]");
372 
373 	if (len - n > 0) {
374 		n += scnprintf(msg + n, len - n, " correrrcnt[");
375 		for (i = 0; i < rrl->cecnt_num && len - n > 0; i++) {
376 			offset = rrl->cecnt_offsets[i];
377 			width = rrl->cecnt_widths[i];
378 			corr = read_imc_reg(imc, ch, offset, width);
379 
380 			/* CPUs {ICX,SPR} encode two counters per 4-byte CORRERRCNT register. */
381 			if (res_cfg->type <= SPR) {
382 				n += scnprintf(msg + n, len - n, "%.4llx %.4llx ",
383 					      corr & 0xffff, corr >> 16);
384 			} else {
385 			/* CPUs {GNR} encode one counter per CORRERRCNT register. */
386 				if (width == 4)
387 					n += scnprintf(msg + n, len - n, "%.8llx ", corr);
388 				else
389 					n += scnprintf(msg + n, len - n, "%.16llx ", corr);
390 			}
391 		}
392 
393 		/* Move back one space. */
394 		n--;
395 		n += scnprintf(msg + n, len - n, "]");
396 	}
397 }
398 
pci_get_dev_wrapper(int dom,unsigned int bus,unsigned int dev,unsigned int fun)399 static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
400 					   unsigned int dev, unsigned int fun)
401 {
402 	struct pci_dev *pdev;
403 
404 	pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
405 	if (!pdev) {
406 		edac_dbg(2, "No device %02x:%02x.%x\n",
407 			 bus, dev, fun);
408 		return NULL;
409 	}
410 
411 	if (unlikely(pci_enable_device(pdev) < 0)) {
412 		edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
413 			 bus, dev, fun);
414 		pci_dev_put(pdev);
415 		return NULL;
416 	}
417 
418 	return pdev;
419 }
420 
421 /**
422  * i10nm_get_imc_num() - Get the number of present DDR memory controllers.
423  *
424  * @cfg : The pointer to the structure of EDAC resource configurations.
425  *
426  * For Granite Rapids CPUs, the number of present DDR memory controllers read
427  * at runtime overwrites the value statically configured in @cfg->ddr_imc_num.
428  * For other CPUs, the number of present DDR memory controllers is statically
429  * configured in @cfg->ddr_imc_num.
430  *
431  * RETURNS : 0 on success, < 0 on failure.
432  */
i10nm_get_imc_num(struct res_config * cfg)433 static int i10nm_get_imc_num(struct res_config *cfg)
434 {
435 	int n, imc_num, chan_num = 0;
436 	struct skx_dev *d;
437 	u32 reg;
438 
439 	list_for_each_entry(d, i10nm_edac_list, list) {
440 		d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->pcu_cr3_bdf.bus],
441 						 res_cfg->pcu_cr3_bdf.dev,
442 						 res_cfg->pcu_cr3_bdf.fun);
443 		if (!d->pcu_cr3)
444 			continue;
445 
446 		if (I10NM_GET_CAPID5_CFG(d, reg))
447 			continue;
448 
449 		n = I10NM_DDR_IMC_CH_CNT(reg);
450 
451 		if (!chan_num) {
452 			chan_num = n;
453 			edac_dbg(2, "Get DDR CH number: %d\n", chan_num);
454 		} else if (chan_num != n) {
455 			i10nm_printk(KERN_NOTICE, "Get DDR CH numbers: %d, %d\n", chan_num, n);
456 		}
457 	}
458 
459 	switch (cfg->type) {
460 	case GNR:
461 		/*
462 		 * One channel per DDR memory controller for Granite Rapids CPUs.
463 		 */
464 		imc_num = chan_num;
465 
466 		if (!imc_num) {
467 			i10nm_printk(KERN_ERR, "Invalid DDR MC number\n");
468 			return -ENODEV;
469 		}
470 
471 		if (imc_num > I10NM_NUM_DDR_IMC) {
472 			i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
473 			return -EINVAL;
474 		}
475 
476 		if (cfg->ddr_imc_num != imc_num) {
477 			/*
478 			 * Store the number of present DDR memory controllers.
479 			 */
480 			cfg->ddr_imc_num = imc_num;
481 			edac_dbg(2, "Set DDR MC number: %d", imc_num);
482 		}
483 
484 		return 0;
485 	default:
486 		/*
487 		 * For other CPUs, the number of present DDR memory controllers
488 		 * is statically pre-configured in cfg->ddr_imc_num.
489 		 */
490 		return 0;
491 	}
492 }
493 
i10nm_check_2lm(struct res_config * cfg)494 static bool i10nm_check_2lm(struct res_config *cfg)
495 {
496 	struct skx_dev *d;
497 	u32 reg;
498 	int i;
499 
500 	list_for_each_entry(d, i10nm_edac_list, list) {
501 		d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->sad_all_bdf.bus],
502 						 res_cfg->sad_all_bdf.dev,
503 						 res_cfg->sad_all_bdf.fun);
504 		if (!d->sad_all)
505 			continue;
506 
507 		for (i = 0; i < I10NM_MAX_SAD; i++) {
508 			I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg);
509 			if (I10NM_SAD_ENABLE(reg) && I10NM_SAD_NM_CACHEABLE(reg)) {
510 				edac_dbg(2, "2-level memory configuration.\n");
511 				return true;
512 			}
513 		}
514 	}
515 
516 	return false;
517 }
518 
519 /*
520  * Check whether the error comes from DDRT by ICX/Tremont/SPR model specific error code.
521  * Refer to SDM vol3B 17.11.3/17.13.2 Intel IMC MC error codes for IA32_MCi_STATUS.
522  */
i10nm_mscod_is_ddrt(u32 mscod)523 static bool i10nm_mscod_is_ddrt(u32 mscod)
524 {
525 	switch (res_cfg->type) {
526 	case I10NM:
527 		switch (mscod) {
528 		case 0x0106: case 0x0107:
529 		case 0x0800: case 0x0804:
530 		case 0x0806 ... 0x0808:
531 		case 0x080a ... 0x080e:
532 		case 0x0810: case 0x0811:
533 		case 0x0816: case 0x081e:
534 		case 0x081f:
535 			return true;
536 		}
537 
538 		break;
539 	case SPR:
540 		switch (mscod) {
541 		case 0x0800: case 0x0804:
542 		case 0x0806 ... 0x0808:
543 		case 0x080a ... 0x080e:
544 		case 0x0810: case 0x0811:
545 		case 0x0816: case 0x081e:
546 		case 0x081f:
547 			return true;
548 		}
549 
550 		break;
551 	default:
552 		return false;
553 	}
554 
555 	return false;
556 }
557 
i10nm_mc_decode_available(struct mce * mce)558 static bool i10nm_mc_decode_available(struct mce *mce)
559 {
560 #define ICX_IMCx_CHy		0x06666000
561 	u8 bank;
562 
563 	if (!decoding_via_mca || mem_cfg_2lm)
564 		return false;
565 
566 	if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
567 			!= (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
568 		return false;
569 
570 	bank = mce->bank;
571 
572 	switch (res_cfg->type) {
573 	case I10NM:
574 		/* Check whether the bank is one of {13,14,17,18,21,22,25,26} */
575 		if (!(ICX_IMCx_CHy & (1 << bank)))
576 			return false;
577 		break;
578 	case SPR:
579 		if (bank < 13 || bank > 20)
580 			return false;
581 		break;
582 	default:
583 		return false;
584 	}
585 
586 	/* DDRT errors can't be decoded from MCA bank registers */
587 	if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
588 		return false;
589 
590 	if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
591 		return false;
592 
593 	return true;
594 }
595 
i10nm_mc_decode(struct decoded_addr * res)596 static bool i10nm_mc_decode(struct decoded_addr *res)
597 {
598 	struct mce *m = res->mce;
599 	struct skx_dev *d;
600 	u8 bank;
601 
602 	if (!i10nm_mc_decode_available(m))
603 		return false;
604 
605 	list_for_each_entry(d, i10nm_edac_list, list) {
606 		if (d->imc[0].src_id == m->socketid) {
607 			res->socket = m->socketid;
608 			res->dev = d;
609 			break;
610 		}
611 	}
612 
613 	switch (res_cfg->type) {
614 	case I10NM:
615 		bank              = m->bank - 13;
616 		res->imc          = bank / 4;
617 		res->channel      = bank % 2;
618 		res->column       = GET_BITFIELD(m->misc, 9, 18) << 2;
619 		res->row          = GET_BITFIELD(m->misc, 19, 39);
620 		res->bank_group   = GET_BITFIELD(m->misc, 40, 41);
621 		res->bank_address = GET_BITFIELD(m->misc, 42, 43);
622 		res->bank_group  |= GET_BITFIELD(m->misc, 44, 44) << 2;
623 		res->rank         = GET_BITFIELD(m->misc, 56, 58);
624 		res->dimm         = res->rank >> 2;
625 		res->rank         = res->rank % 4;
626 		break;
627 	case SPR:
628 		bank              = m->bank - 13;
629 		res->imc          = bank / 2;
630 		res->channel      = bank % 2;
631 		res->column       = GET_BITFIELD(m->misc, 9, 18) << 2;
632 		res->row          = GET_BITFIELD(m->misc, 19, 36);
633 		res->bank_group   = GET_BITFIELD(m->misc, 37, 38);
634 		res->bank_address = GET_BITFIELD(m->misc, 39, 40);
635 		res->bank_group  |= GET_BITFIELD(m->misc, 41, 41) << 2;
636 		res->rank         = GET_BITFIELD(m->misc, 57, 57);
637 		res->dimm         = GET_BITFIELD(m->misc, 58, 58);
638 		break;
639 	default:
640 		return false;
641 	}
642 
643 	if (!res->dev) {
644 		skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
645 			   m->socketid, res->imc);
646 		return false;
647 	}
648 
649 	return true;
650 }
651 
652 /**
653  * get_gnr_mdev() - Get the PCI device of the @logical_idx-th DDR memory controller.
654  *
655  * @d            : The pointer to the structure of CPU socket EDAC device.
656  * @logical_idx  : The logical index of the present memory controller (0 ~ max present MC# - 1).
657  * @physical_idx : To store the corresponding physical index of @logical_idx.
658  *
659  * RETURNS       : The PCI device of the @logical_idx-th DDR memory controller, NULL on failure.
660  */
get_gnr_mdev(struct skx_dev * d,int logical_idx,int * physical_idx)661 static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *physical_idx)
662 {
663 #define GNR_MAX_IMC_PCI_CNT	28
664 
665 	struct pci_dev *mdev;
666 	int i, logical = 0;
667 
668 	/*
669 	 * Detect present memory controllers from { PCI device: 8-5, function 7-1 }
670 	 */
671 	for (i = 0; i < GNR_MAX_IMC_PCI_CNT; i++) {
672 		mdev = pci_get_dev_wrapper(d->seg,
673 					   d->bus[res_cfg->ddr_mdev_bdf.bus],
674 					   res_cfg->ddr_mdev_bdf.dev + i / 7,
675 					   res_cfg->ddr_mdev_bdf.fun + i % 7);
676 
677 		if (mdev) {
678 			if (logical == logical_idx) {
679 				*physical_idx = i;
680 				return mdev;
681 			}
682 
683 			pci_dev_put(mdev);
684 			logical++;
685 		}
686 	}
687 
688 	return NULL;
689 }
690 
get_gnr_imc_mmio_offset(void)691 static u32 get_gnr_imc_mmio_offset(void)
692 {
693 	if (boot_cpu_data.x86_vfm == INTEL_GRANITERAPIDS_D)
694 		return I10NM_GNR_D_IMC_MMIO_OFFSET;
695 
696 	return I10NM_GNR_IMC_MMIO_OFFSET;
697 }
698 
699 /**
700  * get_ddr_munit() - Get the resource of the i-th DDR memory controller.
701  *
702  * @d      : The pointer to the structure of CPU socket EDAC device.
703  * @i      : The index of the CPU socket relative DDR memory controller.
704  * @offset : To store the MMIO offset of the i-th DDR memory controller.
705  * @size   : To store the MMIO size of the i-th DDR memory controller.
706  *
707  * RETURNS : The PCI device of the i-th DDR memory controller, NULL on failure.
708  */
get_ddr_munit(struct skx_dev * d,int i,u32 * offset,unsigned long * size)709 static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsigned long *size)
710 {
711 	struct pci_dev *mdev;
712 	int physical_idx;
713 	u32 reg;
714 
715 	switch (res_cfg->type) {
716 	case GNR:
717 		if (I10NM_GET_IMC_BAR(d, 0, reg)) {
718 			i10nm_printk(KERN_ERR, "Failed to get mc0 bar\n");
719 			return NULL;
720 		}
721 
722 		mdev = get_gnr_mdev(d, i, &physical_idx);
723 		if (!mdev)
724 			return NULL;
725 
726 		*offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
727 			  get_gnr_imc_mmio_offset() +
728 			  physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
729 		*size   = I10NM_GNR_IMC_MMIO_SIZE;
730 
731 		break;
732 	default:
733 		if (I10NM_GET_IMC_BAR(d, i, reg)) {
734 			i10nm_printk(KERN_ERR, "Failed to get mc%d bar\n", i);
735 			return NULL;
736 		}
737 
738 		mdev = pci_get_dev_wrapper(d->seg,
739 					   d->bus[res_cfg->ddr_mdev_bdf.bus],
740 					   res_cfg->ddr_mdev_bdf.dev + i,
741 					   res_cfg->ddr_mdev_bdf.fun);
742 		if (!mdev)
743 			return NULL;
744 
745 		*offset  = I10NM_GET_IMC_MMIO_OFFSET(reg);
746 		*size    = I10NM_GET_IMC_MMIO_SIZE(reg);
747 	}
748 
749 	return mdev;
750 }
751 
752 /**
753  * i10nm_imc_absent() - Check whether the memory controller @imc is absent
754  *
755  * @imc    : The pointer to the structure of memory controller EDAC device.
756  *
757  * RETURNS : true if the memory controller EDAC device is absent, false otherwise.
758  */
i10nm_imc_absent(struct skx_imc * imc)759 static bool i10nm_imc_absent(struct skx_imc *imc)
760 {
761 	u32 mcmtr;
762 	int i;
763 
764 	switch (res_cfg->type) {
765 	case SPR:
766 		for (i = 0; i < res_cfg->ddr_chan_num; i++) {
767 			mcmtr = I10NM_GET_MCMTR(imc, i);
768 			edac_dbg(1, "ch%d mcmtr reg %x\n", i, mcmtr);
769 			if (mcmtr != ~0)
770 				return false;
771 		}
772 
773 		/*
774 		 * Some workstations' absent memory controllers still
775 		 * appear as PCIe devices, misleading the EDAC driver.
776 		 * By observing that the MMIO registers of these absent
777 		 * memory controllers consistently hold the value of ~0.
778 		 *
779 		 * We identify a memory controller as absent by checking
780 		 * if its MMIO register "mcmtr" == ~0 in all its channels.
781 		 */
782 		return true;
783 	default:
784 		return false;
785 	}
786 }
787 
i10nm_get_ddr_munits(void)788 static int i10nm_get_ddr_munits(void)
789 {
790 	struct pci_dev *mdev;
791 	void __iomem *mbase;
792 	unsigned long size;
793 	struct skx_dev *d;
794 	int i, lmc, j = 0;
795 	u32 reg, off;
796 	u64 base;
797 
798 	list_for_each_entry(d, i10nm_edac_list, list) {
799 		d->util_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->util_all_bdf.bus],
800 						  res_cfg->util_all_bdf.dev,
801 						  res_cfg->util_all_bdf.fun);
802 		if (!d->util_all)
803 			return -ENODEV;
804 
805 		d->uracu = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->uracu_bdf.bus],
806 					       res_cfg->uracu_bdf.dev,
807 					       res_cfg->uracu_bdf.fun);
808 		if (!d->uracu)
809 			return -ENODEV;
810 
811 		if (I10NM_GET_SCK_BAR(d, reg)) {
812 			i10nm_printk(KERN_ERR, "Failed to socket bar\n");
813 			return -ENODEV;
814 		}
815 
816 		base = I10NM_GET_SCK_MMIO_BASE(reg);
817 		edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
818 			 j++, base, reg);
819 
820 		for (lmc = 0, i = 0; i < res_cfg->ddr_imc_num; i++) {
821 			mdev = get_ddr_munit(d, i, &off, &size);
822 
823 			if (i == 0 && !mdev) {
824 				i10nm_printk(KERN_ERR, "No IMC found\n");
825 				return -ENODEV;
826 			}
827 			if (!mdev)
828 				continue;
829 
830 			edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
831 				 i, base + off, size, reg);
832 
833 			mbase = ioremap(base + off, size);
834 			if (!mbase) {
835 				i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
836 					     base + off);
837 				return -ENODEV;
838 			}
839 
840 			d->imc[lmc].mbase = mbase;
841 			if (i10nm_imc_absent(&d->imc[lmc])) {
842 				pci_dev_put(mdev);
843 				iounmap(mbase);
844 				d->imc[lmc].mbase = NULL;
845 				edac_dbg(2, "Skip absent mc%d\n", i);
846 				continue;
847 			} else {
848 				d->imc[lmc].mdev = mdev;
849 				if (res_cfg->type == SPR)
850 					skx_set_mc_mapping(d, i, lmc);
851 				lmc++;
852 			}
853 		}
854 	}
855 
856 	return 0;
857 }
858 
i10nm_check_hbm_imc(struct skx_dev * d)859 static bool i10nm_check_hbm_imc(struct skx_dev *d)
860 {
861 	u32 reg;
862 
863 	if (I10NM_GET_CAPID3_CFG(d, reg)) {
864 		i10nm_printk(KERN_ERR, "Failed to get capid3_cfg\n");
865 		return false;
866 	}
867 
868 	return I10NM_IS_HBM_PRESENT(reg) != 0;
869 }
870 
i10nm_get_hbm_munits(void)871 static int i10nm_get_hbm_munits(void)
872 {
873 	struct pci_dev *mdev;
874 	void __iomem *mbase;
875 	u32 reg, off, mcmtr;
876 	struct skx_dev *d;
877 	int i, lmc;
878 	u64 base;
879 
880 	list_for_each_entry(d, i10nm_edac_list, list) {
881 		if (!d->pcu_cr3)
882 			return -ENODEV;
883 
884 		if (!i10nm_check_hbm_imc(d)) {
885 			i10nm_printk(KERN_DEBUG, "No hbm memory\n");
886 			return -ENODEV;
887 		}
888 
889 		if (I10NM_GET_SCK_BAR(d, reg)) {
890 			i10nm_printk(KERN_ERR, "Failed to get socket bar\n");
891 			return -ENODEV;
892 		}
893 		base = I10NM_GET_SCK_MMIO_BASE(reg);
894 
895 		if (I10NM_GET_HBM_IMC_BAR(d, reg)) {
896 			i10nm_printk(KERN_ERR, "Failed to get hbm mc bar\n");
897 			return -ENODEV;
898 		}
899 		base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
900 
901 		lmc = res_cfg->ddr_imc_num;
902 
903 		for (i = 0; i < res_cfg->hbm_imc_num; i++) {
904 			mdev = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->hbm_mdev_bdf.bus],
905 						   res_cfg->hbm_mdev_bdf.dev + i / 4,
906 						   res_cfg->hbm_mdev_bdf.fun + i % 4);
907 
908 			if (i == 0 && !mdev) {
909 				i10nm_printk(KERN_ERR, "No hbm mc found\n");
910 				return -ENODEV;
911 			}
912 			if (!mdev)
913 				continue;
914 
915 			d->imc[lmc].mdev = mdev;
916 			off = i * I10NM_HBM_IMC_MMIO_SIZE;
917 
918 			edac_dbg(2, "hbm mc%d mmio base 0x%llx size 0x%x\n",
919 				 lmc, base + off, I10NM_HBM_IMC_MMIO_SIZE);
920 
921 			mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
922 			if (!mbase) {
923 				pci_dev_put(d->imc[lmc].mdev);
924 				d->imc[lmc].mdev = NULL;
925 
926 				i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
927 					     base + off);
928 				return -ENOMEM;
929 			}
930 
931 			d->imc[lmc].mbase = mbase;
932 			d->imc[lmc].hbm_mc = true;
933 
934 			mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
935 			if (!I10NM_IS_HBM_IMC(mcmtr)) {
936 				iounmap(d->imc[lmc].mbase);
937 				d->imc[lmc].mbase = NULL;
938 				d->imc[lmc].hbm_mc = false;
939 				pci_dev_put(d->imc[lmc].mdev);
940 				d->imc[lmc].mdev = NULL;
941 
942 				i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
943 				return -ENODEV;
944 			}
945 
946 			lmc++;
947 		}
948 	}
949 
950 	return 0;
951 }
952 
953 static struct res_config i10nm_cfg0 = {
954 	.type			= I10NM,
955 	.decs_did		= 0x3452,
956 	.busno_cfg_offset	= 0xcc,
957 	.ddr_imc_num		= 4,
958 	.ddr_chan_num		= 2,
959 	.ddr_dimm_num		= 2,
960 	.ddr_chan_mmio_sz	= 0x4000,
961 	.sad_all_bdf		= {1, 29, 0},
962 	.pcu_cr3_bdf		= {1, 30, 3},
963 	.util_all_bdf		= {1, 29, 1},
964 	.uracu_bdf		= {0, 0, 1},
965 	.ddr_mdev_bdf		= {0, 12, 0},
966 	.hbm_mdev_bdf		= {0, 12, 1},
967 	.sad_all_offset		= 0x108,
968 	.reg_rrl_ddr		= &icx_reg_rrl_ddr,
969 };
970 
971 static struct res_config i10nm_cfg1 = {
972 	.type			= I10NM,
973 	.decs_did		= 0x3452,
974 	.busno_cfg_offset	= 0xd0,
975 	.ddr_imc_num		= 4,
976 	.ddr_chan_num		= 2,
977 	.ddr_dimm_num		= 2,
978 	.ddr_chan_mmio_sz	= 0x4000,
979 	.sad_all_bdf		= {1, 29, 0},
980 	.pcu_cr3_bdf		= {1, 30, 3},
981 	.util_all_bdf		= {1, 29, 1},
982 	.uracu_bdf		= {0, 0, 1},
983 	.ddr_mdev_bdf		= {0, 12, 0},
984 	.hbm_mdev_bdf		= {0, 12, 1},
985 	.sad_all_offset		= 0x108,
986 	.reg_rrl_ddr		= &icx_reg_rrl_ddr,
987 };
988 
989 static struct res_config spr_cfg = {
990 	.type			= SPR,
991 	.decs_did		= 0x3252,
992 	.busno_cfg_offset	= 0xd0,
993 	.ddr_imc_num		= 4,
994 	.ddr_chan_num		= 2,
995 	.ddr_dimm_num		= 2,
996 	.hbm_imc_num		= 16,
997 	.hbm_chan_num		= 2,
998 	.hbm_dimm_num		= 1,
999 	.ddr_chan_mmio_sz	= 0x8000,
1000 	.hbm_chan_mmio_sz	= 0x4000,
1001 	.support_ddr5		= true,
1002 	.sad_all_bdf		= {1, 10, 0},
1003 	.pcu_cr3_bdf		= {1, 30, 3},
1004 	.util_all_bdf		= {1, 29, 1},
1005 	.uracu_bdf		= {0, 0, 1},
1006 	.ddr_mdev_bdf		= {0, 12, 0},
1007 	.hbm_mdev_bdf		= {0, 12, 1},
1008 	.sad_all_offset		= 0x300,
1009 	.reg_rrl_ddr		= &spr_reg_rrl_ddr,
1010 	.reg_rrl_hbm[0]		= &spr_reg_rrl_hbm_pch0,
1011 	.reg_rrl_hbm[1]		= &spr_reg_rrl_hbm_pch1,
1012 };
1013 
1014 static struct res_config gnr_cfg = {
1015 	.type			= GNR,
1016 	.decs_did		= 0x3252,
1017 	.busno_cfg_offset	= 0xd0,
1018 	.ddr_imc_num		= 12,
1019 	.ddr_chan_num		= 1,
1020 	.ddr_dimm_num		= 2,
1021 	.ddr_chan_mmio_sz	= 0x4000,
1022 	.support_ddr5		= true,
1023 	.sad_all_bdf		= {0, 13, 0},
1024 	.pcu_cr3_bdf		= {0, 5, 0},
1025 	.util_all_bdf		= {0, 13, 1},
1026 	.uracu_bdf		= {0, 0, 1},
1027 	.ddr_mdev_bdf		= {0, 5, 1},
1028 	.sad_all_offset		= 0x300,
1029 	.reg_rrl_ddr		= &gnr_reg_rrl_ddr,
1030 };
1031 
1032 static const struct x86_cpu_id i10nm_cpuids[] = {
1033 	X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MIN,		 0x3, &i10nm_cfg0),
1034 	X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D,	   0x4,	X86_STEP_MAX, &i10nm_cfg1),
1035 	X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X,	  X86_STEP_MIN,		 0x3, &i10nm_cfg0),
1036 	X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X,		   0x4, X86_STEP_MAX, &i10nm_cfg1),
1037 	X86_MATCH_VFM(	    INTEL_ICELAKE_D,				      &i10nm_cfg1),
1038 
1039 	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
1040 	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X,  &spr_cfg),
1041 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_X,  &gnr_cfg),
1042 	X86_MATCH_VFM(INTEL_GRANITERAPIDS_D,  &gnr_cfg),
1043 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
1044 	X86_MATCH_VFM(INTEL_ATOM_CRESTMONT,   &gnr_cfg),
1045 	X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X,  &gnr_cfg),
1046 	{}
1047 };
1048 MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
1049 
i10nm_check_ecc(struct skx_imc * imc,int chan)1050 static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
1051 {
1052 	u32 mcmtr;
1053 
1054 	mcmtr = I10NM_GET_MCMTR(imc, chan);
1055 	edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
1056 
1057 	return !!GET_BITFIELD(mcmtr, 2, 2);
1058 }
1059 
i10nm_get_dimm_config(struct mem_ctl_info * mci,struct res_config * cfg)1060 static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
1061 				 struct res_config *cfg)
1062 {
1063 	struct skx_pvt *pvt = mci->pvt_info;
1064 	struct skx_imc *imc = pvt->imc;
1065 	u32 mtr, mcddrtcfg = 0;
1066 	struct dimm_info *dimm;
1067 	int i, j, ndimms;
1068 
1069 	for (i = 0; i < imc->num_channels; i++) {
1070 		if (!imc->mbase)
1071 			continue;
1072 
1073 		ndimms = 0;
1074 
1075 		if (res_cfg->type != GNR)
1076 			mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
1077 
1078 		for (j = 0; j < imc->num_dimms; j++) {
1079 			dimm = edac_get_dimm(mci, i, j, 0);
1080 			mtr = I10NM_GET_DIMMMTR(imc, i, j);
1081 			edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
1082 				 mtr, mcddrtcfg, imc->mc, i, j);
1083 
1084 			if (IS_DIMM_PRESENT(mtr))
1085 				ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
1086 							    imc, i, j, cfg);
1087 			else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
1088 				ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
1089 							      EDAC_MOD_STR);
1090 		}
1091 		if (ndimms && !i10nm_check_ecc(imc, i)) {
1092 			i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
1093 				     imc->mc, i);
1094 			return -ENODEV;
1095 		}
1096 	}
1097 
1098 	return 0;
1099 }
1100 
1101 static struct notifier_block i10nm_mce_dec = {
1102 	.notifier_call	= skx_mce_check_error,
1103 	.priority	= MCE_PRIO_EDAC,
1104 };
1105 
i10nm_init(void)1106 static int __init i10nm_init(void)
1107 {
1108 	u8 mc = 0, src_id = 0;
1109 	const struct x86_cpu_id *id;
1110 	struct res_config *cfg;
1111 	const char *owner;
1112 	struct skx_dev *d;
1113 	int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
1114 	u64 tolm, tohm;
1115 	int imc_num;
1116 
1117 	edac_dbg(2, "\n");
1118 
1119 	if (ghes_get_devices())
1120 		return -EBUSY;
1121 
1122 	owner = edac_get_owner();
1123 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1124 		return -EBUSY;
1125 
1126 	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1127 		return -ENODEV;
1128 
1129 	id = x86_match_cpu(i10nm_cpuids);
1130 	if (!id)
1131 		return -ENODEV;
1132 
1133 	cfg = (struct res_config *)id->driver_data;
1134 	skx_set_res_cfg(cfg);
1135 	res_cfg = cfg;
1136 
1137 	rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
1138 	if (rc)
1139 		return rc;
1140 
1141 	rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
1142 	if (rc < 0)
1143 		goto fail;
1144 	if (rc == 0) {
1145 		i10nm_printk(KERN_ERR, "No memory controllers found\n");
1146 		return -ENODEV;
1147 	}
1148 
1149 	rc = i10nm_get_imc_num(cfg);
1150 	if (rc < 0)
1151 		goto fail;
1152 
1153 	mem_cfg_2lm = i10nm_check_2lm(cfg);
1154 	skx_set_mem_cfg(mem_cfg_2lm);
1155 
1156 	rc = i10nm_get_ddr_munits();
1157 
1158 	if (i10nm_get_hbm_munits() && rc)
1159 		goto fail;
1160 
1161 	imc_num = res_cfg->ddr_imc_num + res_cfg->hbm_imc_num;
1162 
1163 	list_for_each_entry(d, i10nm_edac_list, list) {
1164 		rc = skx_get_src_id(d, 0xf8, &src_id);
1165 		if (rc < 0)
1166 			goto fail;
1167 
1168 		edac_dbg(2, "src_id = %d\n", src_id);
1169 		for (i = 0; i < imc_num; i++) {
1170 			if (!d->imc[i].mdev)
1171 				continue;
1172 
1173 			d->imc[i].mc  = mc++;
1174 			d->imc[i].lmc = i;
1175 			d->imc[i].src_id = src_id;
1176 			if (d->imc[i].hbm_mc) {
1177 				d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
1178 				d->imc[i].num_channels = cfg->hbm_chan_num;
1179 				d->imc[i].num_dimms    = cfg->hbm_dimm_num;
1180 			} else {
1181 				d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
1182 				d->imc[i].num_channels = cfg->ddr_chan_num;
1183 				d->imc[i].num_dimms    = cfg->ddr_dimm_num;
1184 			}
1185 
1186 			rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
1187 					      "Intel_10nm Socket", EDAC_MOD_STR,
1188 					      i10nm_get_dimm_config, cfg);
1189 			if (rc < 0)
1190 				goto fail;
1191 		}
1192 	}
1193 
1194 	rc = skx_adxl_get();
1195 	if (rc)
1196 		goto fail;
1197 
1198 	opstate_init();
1199 	mce_register_decode_chain(&i10nm_mce_dec);
1200 	skx_setup_debug("i10nm_test");
1201 
1202 	if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
1203 		skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
1204 		if (retry_rd_err_log == 2)
1205 			enable_retry_rd_err_log(true);
1206 	} else {
1207 		skx_set_decode(i10nm_mc_decode, NULL);
1208 	}
1209 
1210 	i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
1211 
1212 	return 0;
1213 fail:
1214 	skx_remove();
1215 	return rc;
1216 }
1217 
i10nm_exit(void)1218 static void __exit i10nm_exit(void)
1219 {
1220 	edac_dbg(2, "\n");
1221 
1222 	if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
1223 		skx_set_decode(NULL, NULL);
1224 		if (retry_rd_err_log == 2)
1225 			enable_retry_rd_err_log(false);
1226 	}
1227 
1228 	skx_teardown_debug();
1229 	mce_unregister_decode_chain(&i10nm_mce_dec);
1230 	skx_adxl_put();
1231 	skx_remove();
1232 }
1233 
1234 module_init(i10nm_init);
1235 module_exit(i10nm_exit);
1236 
set_decoding_via_mca(const char * buf,const struct kernel_param * kp)1237 static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp)
1238 {
1239 	unsigned long val;
1240 	int ret;
1241 
1242 	ret = kstrtoul(buf, 0, &val);
1243 
1244 	if (ret || val > 1)
1245 		return -EINVAL;
1246 
1247 	if (val && mem_cfg_2lm) {
1248 		i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n");
1249 		return -EIO;
1250 	}
1251 
1252 	ret = param_set_int(buf, kp);
1253 
1254 	return ret;
1255 }
1256 
1257 static const struct kernel_param_ops decoding_via_mca_param_ops = {
1258 	.set = set_decoding_via_mca,
1259 	.get = param_get_int,
1260 };
1261 
1262 module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644);
1263 MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable");
1264 
1265 module_param(retry_rd_err_log, int, 0444);
1266 MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
1267 
1268 MODULE_LICENSE("GPL v2");
1269 MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");
1270