xref: /linux/drivers/edac/versal_edac.c (revision 79997eda0d31bc68203c95ecb978773ee6ce7a1f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Versal memory controller driver
4  * Copyright (C) 2023 Advanced Micro Devices, Inc.
5  */
6 #include <linux/bitfield.h>
7 #include <linux/edac.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/firmware/xlnx-zynqmp.h>
16 #include <linux/firmware/xlnx-event-manager.h>
17 
18 #include "edac_module.h"
19 
20 /* Granularity of reported error in bytes */
21 #define XDDR_EDAC_ERR_GRAIN			1
22 
23 #define XDDR_EDAC_MSG_SIZE			256
24 #define EVENT					2
25 
26 #define XDDR_PCSR_OFFSET			0xC
27 #define XDDR_ISR_OFFSET				0x14
28 #define XDDR_IRQ_EN_OFFSET			0x20
29 #define XDDR_IRQ1_EN_OFFSET			0x2C
30 #define XDDR_IRQ_DIS_OFFSET			0x24
31 #define XDDR_IRQ_CE_MASK			GENMASK(18, 15)
32 #define XDDR_IRQ_UE_MASK			GENMASK(14, 11)
33 
34 #define XDDR_REG_CONFIG0_OFFSET			0x258
35 #define XDDR_REG_CONFIG0_BUS_WIDTH_MASK		GENMASK(19, 18)
36 #define XDDR_REG_CONFIG0_NUM_CHANS_MASK		BIT(17)
37 #define XDDR_REG_CONFIG0_NUM_RANKS_MASK		GENMASK(15, 14)
38 #define XDDR_REG_CONFIG0_SIZE_MASK		GENMASK(10, 8)
39 
40 #define XDDR_REG_PINOUT_OFFSET			0x25C
41 #define XDDR_REG_PINOUT_ECC_EN_MASK		GENMASK(7, 5)
42 
43 #define ECCW0_FLIP_CTRL				0x109C
44 #define ECCW0_FLIP0_OFFSET			0x10A0
45 #define ECCW1_FLIP_CTRL				0x10AC
46 #define ECCW1_FLIP0_OFFSET			0x10B0
47 #define ECCR0_CERR_STAT_OFFSET			0x10BC
48 #define ECCR0_CE_ADDR_LO_OFFSET			0x10C0
49 #define ECCR0_CE_ADDR_HI_OFFSET			0x10C4
50 #define ECCR0_CE_DATA_LO_OFFSET			0x10C8
51 #define ECCR0_CE_DATA_HI_OFFSET			0x10CC
52 #define ECCR0_CE_DATA_PAR_OFFSET		0x10D0
53 
54 #define ECCR0_UERR_STAT_OFFSET			0x10D4
55 #define ECCR0_UE_ADDR_LO_OFFSET			0x10D8
56 #define ECCR0_UE_ADDR_HI_OFFSET			0x10DC
57 #define ECCR0_UE_DATA_LO_OFFSET			0x10E0
58 #define ECCR0_UE_DATA_HI_OFFSET			0x10E4
59 #define ECCR0_UE_DATA_PAR_OFFSET		0x10E8
60 
61 #define ECCR1_CERR_STAT_OFFSET			0x10F4
62 #define ECCR1_CE_ADDR_LO_OFFSET			0x10F8
63 #define ECCR1_CE_ADDR_HI_OFFSET			0x10FC
64 #define ECCR1_CE_DATA_LO_OFFSET			0x1100
65 #define ECCR1_CE_DATA_HI_OFFSET			0x110C
66 #define ECCR1_CE_DATA_PAR_OFFSET		0x1108
67 
68 #define ECCR1_UERR_STAT_OFFSET			0x110C
69 #define ECCR1_UE_ADDR_LO_OFFSET			0x1110
70 #define ECCR1_UE_ADDR_HI_OFFSET			0x1114
71 #define ECCR1_UE_DATA_LO_OFFSET			0x1118
72 #define ECCR1_UE_DATA_HI_OFFSET			0x111C
73 #define ECCR1_UE_DATA_PAR_OFFSET		0x1120
74 
75 #define XDDR_NOC_REG_ADEC4_OFFSET		0x44
76 #define RANK_1_MASK				GENMASK(11, 6)
77 #define LRANK_0_MASK				GENMASK(17, 12)
78 #define LRANK_1_MASK				GENMASK(23, 18)
79 #define MASK_24					GENMASK(29, 24)
80 
81 #define XDDR_NOC_REG_ADEC5_OFFSET		0x48
82 #define XDDR_NOC_REG_ADEC6_OFFSET		0x4C
83 #define XDDR_NOC_REG_ADEC7_OFFSET		0x50
84 #define XDDR_NOC_REG_ADEC8_OFFSET		0x54
85 #define XDDR_NOC_REG_ADEC9_OFFSET		0x58
86 #define XDDR_NOC_REG_ADEC10_OFFSET		0x5C
87 
88 #define XDDR_NOC_REG_ADEC11_OFFSET		0x60
89 #define MASK_0					GENMASK(5, 0)
90 #define GRP_0_MASK				GENMASK(11, 6)
91 #define GRP_1_MASK				GENMASK(17, 12)
92 #define CH_0_MASK				GENMASK(23, 18)
93 
94 #define XDDR_NOC_REG_ADEC12_OFFSET		0x71C
95 #define XDDR_NOC_REG_ADEC13_OFFSET		0x720
96 
97 #define XDDR_NOC_REG_ADEC14_OFFSET		0x724
98 #define XDDR_NOC_ROW_MATCH_MASK			GENMASK(17, 0)
99 #define XDDR_NOC_COL_MATCH_MASK			GENMASK(27, 18)
100 #define XDDR_NOC_BANK_MATCH_MASK		GENMASK(29, 28)
101 #define XDDR_NOC_GRP_MATCH_MASK			GENMASK(31, 30)
102 
103 #define XDDR_NOC_REG_ADEC15_OFFSET		0x728
104 #define XDDR_NOC_RANK_MATCH_MASK		GENMASK(1, 0)
105 #define XDDR_NOC_LRANK_MATCH_MASK		GENMASK(4, 2)
106 #define XDDR_NOC_CH_MATCH_MASK			BIT(5)
107 #define XDDR_NOC_MOD_SEL_MASK			BIT(6)
108 #define XDDR_NOC_MATCH_EN_MASK			BIT(8)
109 
110 #define ECCR_UE_CE_ADDR_HI_ROW_MASK		GENMASK(7, 0)
111 
112 #define XDDR_EDAC_NR_CSROWS			1
113 #define XDDR_EDAC_NR_CHANS			1
114 
115 #define XDDR_BUS_WIDTH_64			0
116 #define XDDR_BUS_WIDTH_32			1
117 #define XDDR_BUS_WIDTH_16			2
118 
119 #define ECC_CEPOISON_MASK			0x1
120 #define ECC_UEPOISON_MASK			0x3
121 
122 #define XDDR_MAX_ROW_CNT			18
123 #define XDDR_MAX_COL_CNT			10
124 #define XDDR_MAX_RANK_CNT			2
125 #define XDDR_MAX_LRANK_CNT			3
126 #define XDDR_MAX_BANK_CNT			2
127 #define XDDR_MAX_GRP_CNT			2
128 
129 /*
130  * Config and system registers are usually locked. This is the
131  * code which unlocks them in order to accept writes. See
132  *
133  * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register
134  */
135 #define PCSR_UNLOCK_VAL				0xF9E8D7C6
136 #define XDDR_ERR_TYPE_CE			0
137 #define XDDR_ERR_TYPE_UE			1
138 
139 #define XILINX_DRAM_SIZE_4G			0
140 #define XILINX_DRAM_SIZE_6G			1
141 #define XILINX_DRAM_SIZE_8G			2
142 #define XILINX_DRAM_SIZE_12G			3
143 #define XILINX_DRAM_SIZE_16G			4
144 #define XILINX_DRAM_SIZE_32G			5
145 
146 /**
147  * struct ecc_error_info - ECC error log information.
148  * @burstpos:		Burst position.
149  * @lrank:		Logical Rank number.
150  * @rank:		Rank number.
151  * @group:		Group number.
152  * @bank:		Bank number.
153  * @col:		Column number.
154  * @row:		Row number.
155  * @rowhi:		Row number higher bits.
156  * @i:			ECC error info.
157  */
158 union ecc_error_info {
159 	struct {
160 		u32 burstpos:3;
161 		u32 lrank:3;
162 		u32 rank:2;
163 		u32 group:2;
164 		u32 bank:2;
165 		u32 col:10;
166 		u32 row:10;
167 		u32 rowhi;
168 	};
169 	u64 i;
170 } __packed;
171 
172 union edac_info {
173 	struct {
174 		u32 row0:6;
175 		u32 row1:6;
176 		u32 row2:6;
177 		u32 row3:6;
178 		u32 row4:6;
179 		u32 reserved:2;
180 	};
181 	struct {
182 		u32 col1:6;
183 		u32 col2:6;
184 		u32 col3:6;
185 		u32 col4:6;
186 		u32 col5:6;
187 		u32 reservedcol:2;
188 	};
189 	u32 i;
190 } __packed;
191 
192 /**
193  * struct ecc_status - ECC status information to report.
194  * @ceinfo:	Correctable error log information.
195  * @ueinfo:	Uncorrectable error log information.
196  * @channel:	Channel number.
197  * @error_type:	Error type information.
198  */
199 struct ecc_status {
200 	union ecc_error_info ceinfo[2];
201 	union ecc_error_info ueinfo[2];
202 	u8 channel;
203 	u8 error_type;
204 };
205 
206 /**
207  * struct edac_priv - DDR memory controller private instance data.
208  * @ddrmc_baseaddr:	Base address of the DDR controller.
209  * @ddrmc_noc_baseaddr:	Base address of the DDRMC NOC.
210  * @message:		Buffer for framing the event specific info.
211  * @mc_id:		Memory controller ID.
212  * @ce_cnt:		Correctable error count.
213  * @ue_cnt:		UnCorrectable error count.
214  * @stat:		ECC status information.
215  * @lrank_bit:		Bit shifts for lrank bit.
216  * @rank_bit:		Bit shifts for rank bit.
217  * @row_bit:		Bit shifts for row bit.
218  * @col_bit:		Bit shifts for column bit.
219  * @bank_bit:		Bit shifts for bank bit.
220  * @grp_bit:		Bit shifts for group bit.
221  * @ch_bit:		Bit shifts for channel bit.
222  * @err_inject_addr:	Data poison address.
223  * @debugfs:		Debugfs handle.
224  */
225 struct edac_priv {
226 	void __iomem *ddrmc_baseaddr;
227 	void __iomem *ddrmc_noc_baseaddr;
228 	char message[XDDR_EDAC_MSG_SIZE];
229 	u32 mc_id;
230 	u32 ce_cnt;
231 	u32 ue_cnt;
232 	struct ecc_status stat;
233 	u32 lrank_bit[3];
234 	u32 rank_bit[2];
235 	u32 row_bit[18];
236 	u32 col_bit[10];
237 	u32 bank_bit[2];
238 	u32 grp_bit[2];
239 	u32 ch_bit;
240 #ifdef CONFIG_EDAC_DEBUG
241 	u64 err_inject_addr;
242 	struct dentry *debugfs;
243 #endif
244 };
245 
246 static void get_ce_error_info(struct edac_priv *priv)
247 {
248 	void __iomem *ddrmc_base;
249 	struct ecc_status *p;
250 	u32  regval;
251 	u64  reghi;
252 
253 	ddrmc_base = priv->ddrmc_baseaddr;
254 	p = &priv->stat;
255 
256 	p->error_type = XDDR_ERR_TYPE_CE;
257 	regval = readl(ddrmc_base + ECCR0_CE_ADDR_LO_OFFSET);
258 	reghi = regval & ECCR_UE_CE_ADDR_HI_ROW_MASK;
259 	p->ceinfo[0].i = regval | reghi << 32;
260 	regval = readl(ddrmc_base + ECCR0_CE_ADDR_HI_OFFSET);
261 
262 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
263 		 readl(ddrmc_base + ECCR0_CE_DATA_LO_OFFSET),
264 		 readl(ddrmc_base + ECCR0_CE_DATA_HI_OFFSET),
265 		 readl(ddrmc_base + ECCR0_CE_DATA_PAR_OFFSET));
266 
267 	regval = readl(ddrmc_base + ECCR1_CE_ADDR_LO_OFFSET);
268 	reghi = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
269 	p->ceinfo[1].i = regval | reghi << 32;
270 	regval = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
271 
272 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
273 		 readl(ddrmc_base + ECCR1_CE_DATA_LO_OFFSET),
274 		 readl(ddrmc_base + ECCR1_CE_DATA_HI_OFFSET),
275 		 readl(ddrmc_base + ECCR1_CE_DATA_PAR_OFFSET));
276 }
277 
278 static void get_ue_error_info(struct edac_priv *priv)
279 {
280 	void __iomem *ddrmc_base;
281 	struct ecc_status *p;
282 	u32  regval;
283 	u64 reghi;
284 
285 	ddrmc_base = priv->ddrmc_baseaddr;
286 	p = &priv->stat;
287 
288 	p->error_type = XDDR_ERR_TYPE_UE;
289 	regval = readl(ddrmc_base + ECCR0_UE_ADDR_LO_OFFSET);
290 	reghi = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
291 
292 	p->ueinfo[0].i = regval | reghi << 32;
293 	regval = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
294 
295 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
296 		 readl(ddrmc_base + ECCR0_UE_DATA_LO_OFFSET),
297 		 readl(ddrmc_base + ECCR0_UE_DATA_HI_OFFSET),
298 		 readl(ddrmc_base + ECCR0_UE_DATA_PAR_OFFSET));
299 
300 	regval = readl(ddrmc_base + ECCR1_UE_ADDR_LO_OFFSET);
301 	reghi = readl(ddrmc_base + ECCR1_UE_ADDR_HI_OFFSET);
302 	p->ueinfo[1].i = regval | reghi << 32;
303 
304 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
305 		 readl(ddrmc_base + ECCR1_UE_DATA_LO_OFFSET),
306 		 readl(ddrmc_base + ECCR1_UE_DATA_HI_OFFSET),
307 		 readl(ddrmc_base + ECCR1_UE_DATA_PAR_OFFSET));
308 }
309 
310 static bool get_error_info(struct edac_priv *priv)
311 {
312 	u32 eccr0_ceval, eccr1_ceval, eccr0_ueval, eccr1_ueval;
313 	void __iomem *ddrmc_base;
314 	struct ecc_status *p;
315 
316 	ddrmc_base = priv->ddrmc_baseaddr;
317 	p = &priv->stat;
318 
319 	eccr0_ceval = readl(ddrmc_base + ECCR0_CERR_STAT_OFFSET);
320 	eccr1_ceval = readl(ddrmc_base + ECCR1_CERR_STAT_OFFSET);
321 	eccr0_ueval = readl(ddrmc_base + ECCR0_UERR_STAT_OFFSET);
322 	eccr1_ueval = readl(ddrmc_base + ECCR1_UERR_STAT_OFFSET);
323 
324 	if (!eccr0_ceval && !eccr1_ceval && !eccr0_ueval && !eccr1_ueval)
325 		return 1;
326 	if (!eccr0_ceval)
327 		p->channel = 1;
328 	else
329 		p->channel = 0;
330 
331 	if (eccr0_ceval || eccr1_ceval)
332 		get_ce_error_info(priv);
333 
334 	if (eccr0_ueval || eccr1_ueval) {
335 		if (!eccr0_ueval)
336 			p->channel = 1;
337 		else
338 			p->channel = 0;
339 		get_ue_error_info(priv);
340 	}
341 
342 	/* Unlock the PCSR registers */
343 	writel(PCSR_UNLOCK_VAL, ddrmc_base + XDDR_PCSR_OFFSET);
344 
345 	writel(0, ddrmc_base + ECCR0_CERR_STAT_OFFSET);
346 	writel(0, ddrmc_base + ECCR1_CERR_STAT_OFFSET);
347 	writel(0, ddrmc_base + ECCR0_UERR_STAT_OFFSET);
348 	writel(0, ddrmc_base + ECCR1_UERR_STAT_OFFSET);
349 
350 	/* Lock the PCSR registers */
351 	writel(1, ddrmc_base + XDDR_PCSR_OFFSET);
352 
353 	return 0;
354 }
355 
356 /**
357  * convert_to_physical - Convert to physical address.
358  * @priv:	DDR memory controller private instance data.
359  * @pinf:	ECC error info structure.
360  *
361  * Return: Physical address of the DDR memory.
362  */
363 static unsigned long convert_to_physical(struct edac_priv *priv, union ecc_error_info pinf)
364 {
365 	unsigned long err_addr = 0;
366 	u32 index;
367 	u32 row;
368 
369 	row = pinf.rowhi << 10 | pinf.row;
370 	for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
371 		err_addr |= (row & BIT(0)) << priv->row_bit[index];
372 		row >>= 1;
373 	}
374 
375 	for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
376 		err_addr |= (pinf.col & BIT(0)) << priv->col_bit[index];
377 		pinf.col >>= 1;
378 	}
379 
380 	for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
381 		err_addr |= (pinf.bank & BIT(0)) << priv->bank_bit[index];
382 		pinf.bank >>= 1;
383 	}
384 
385 	for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
386 		err_addr |= (pinf.group & BIT(0)) << priv->grp_bit[index];
387 		pinf.group >>= 1;
388 	}
389 
390 	for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
391 		err_addr |= (pinf.rank & BIT(0)) << priv->rank_bit[index];
392 		pinf.rank >>= 1;
393 	}
394 
395 	for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
396 		err_addr |= (pinf.lrank & BIT(0)) << priv->lrank_bit[index];
397 		pinf.lrank >>= 1;
398 	}
399 
400 	err_addr |= (priv->stat.channel & BIT(0)) << priv->ch_bit;
401 
402 	return err_addr;
403 }
404 
405 /**
406  * handle_error - Handle Correctable and Uncorrectable errors.
407  * @mci:	EDAC memory controller instance.
408  * @stat:	ECC status structure.
409  *
410  * Handles ECC correctable and uncorrectable errors.
411  */
412 static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
413 {
414 	struct edac_priv *priv = mci->pvt_info;
415 	union ecc_error_info pinf;
416 
417 	if (stat->error_type == XDDR_ERR_TYPE_CE) {
418 		priv->ce_cnt++;
419 		pinf = stat->ceinfo[stat->channel];
420 		snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
421 			 "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
422 			 "CE", priv->mc_id,
423 			 convert_to_physical(priv, pinf), pinf.burstpos);
424 
425 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
426 				     priv->ce_cnt, 0, 0, 0, 0, 0, -1,
427 				     priv->message, "");
428 	}
429 
430 	if (stat->error_type == XDDR_ERR_TYPE_UE) {
431 		priv->ue_cnt++;
432 		pinf = stat->ueinfo[stat->channel];
433 		snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
434 			 "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
435 			 "UE", priv->mc_id,
436 			 convert_to_physical(priv, pinf), pinf.burstpos);
437 
438 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
439 				     priv->ue_cnt, 0, 0, 0, 0, 0, -1,
440 				     priv->message, "");
441 	}
442 
443 	memset(stat, 0, sizeof(*stat));
444 }
445 
446 /**
447  * err_callback - Handle Correctable and Uncorrectable errors.
448  * @payload:	payload data.
449  * @data:	mci controller data.
450  *
451  * Handles ECC correctable and uncorrectable errors.
452  */
453 static void err_callback(const u32 *payload, void *data)
454 {
455 	struct mem_ctl_info *mci = (struct mem_ctl_info *)data;
456 	struct edac_priv *priv;
457 	struct ecc_status *p;
458 	int regval;
459 
460 	priv = mci->pvt_info;
461 	p = &priv->stat;
462 
463 	regval = readl(priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
464 
465 	if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_CR)
466 		p->error_type = XDDR_ERR_TYPE_CE;
467 	if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_NCR)
468 		p->error_type = XDDR_ERR_TYPE_UE;
469 
470 	if (get_error_info(priv))
471 		return;
472 
473 	handle_error(mci, &priv->stat);
474 
475 	/* Unlock the PCSR registers */
476 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
477 
478 	/* Clear the ISR */
479 	writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
480 
481 	/* Lock the PCSR registers */
482 	writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
483 	edac_dbg(3, "Total error count CE %d UE %d\n",
484 		 priv->ce_cnt, priv->ue_cnt);
485 }
486 
487 /**
488  * get_dwidth - Return the controller memory width.
489  * @base:	DDR memory controller base address.
490  *
491  * Get the EDAC device type width appropriate for the controller
492  * configuration.
493  *
494  * Return: a device type width enumeration.
495  */
496 static enum dev_type get_dwidth(const void __iomem *base)
497 {
498 	enum dev_type dt;
499 	u32 regval;
500 	u32 width;
501 
502 	regval = readl(base + XDDR_REG_CONFIG0_OFFSET);
503 	width  = FIELD_GET(XDDR_REG_CONFIG0_BUS_WIDTH_MASK, regval);
504 
505 	switch (width) {
506 	case XDDR_BUS_WIDTH_16:
507 		dt = DEV_X2;
508 		break;
509 	case XDDR_BUS_WIDTH_32:
510 		dt = DEV_X4;
511 		break;
512 	case XDDR_BUS_WIDTH_64:
513 		dt = DEV_X8;
514 		break;
515 	default:
516 		dt = DEV_UNKNOWN;
517 	}
518 
519 	return dt;
520 }
521 
522 /**
523  * get_ecc_state - Return the controller ECC enable/disable status.
524  * @base:	DDR memory controller base address.
525  *
526  * Get the ECC enable/disable status for the controller.
527  *
528  * Return: a ECC status boolean i.e true/false - enabled/disabled.
529  */
530 static bool get_ecc_state(void __iomem *base)
531 {
532 	enum dev_type dt;
533 	u32 ecctype;
534 
535 	dt = get_dwidth(base);
536 	if (dt == DEV_UNKNOWN)
537 		return false;
538 
539 	ecctype = readl(base + XDDR_REG_PINOUT_OFFSET);
540 	ecctype &= XDDR_REG_PINOUT_ECC_EN_MASK;
541 
542 	return !!ecctype;
543 }
544 
545 /**
546  * get_memsize - Get the size of the attached memory device.
547  * @priv:	DDR memory controller private instance data.
548  *
549  * Return: the memory size in bytes.
550  */
551 static u64 get_memsize(struct edac_priv *priv)
552 {
553 	u32 regval;
554 	u64 size;
555 
556 	regval = readl(priv->ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
557 	regval  = FIELD_GET(XDDR_REG_CONFIG0_SIZE_MASK, regval);
558 
559 	switch (regval) {
560 	case XILINX_DRAM_SIZE_4G:
561 		size = 4U;      break;
562 	case XILINX_DRAM_SIZE_6G:
563 		size = 6U;      break;
564 	case XILINX_DRAM_SIZE_8G:
565 		size = 8U;      break;
566 	case XILINX_DRAM_SIZE_12G:
567 		size = 12U;     break;
568 	case XILINX_DRAM_SIZE_16G:
569 		size = 16U;     break;
570 	case XILINX_DRAM_SIZE_32G:
571 		size = 32U;     break;
572 	/* Invalid configuration */
573 	default:
574 		size = 0;	break;
575 	}
576 
577 	size *= SZ_1G;
578 	return size;
579 }
580 
581 /**
582  * init_csrows - Initialize the csrow data.
583  * @mci:	EDAC memory controller instance.
584  *
585  * Initialize the chip select rows associated with the EDAC memory
586  * controller instance.
587  */
588 static void init_csrows(struct mem_ctl_info *mci)
589 {
590 	struct edac_priv *priv = mci->pvt_info;
591 	struct csrow_info *csi;
592 	struct dimm_info *dimm;
593 	unsigned long size;
594 	u32 row;
595 	int ch;
596 
597 	size = get_memsize(priv);
598 	for (row = 0; row < mci->nr_csrows; row++) {
599 		csi = mci->csrows[row];
600 		for (ch = 0; ch < csi->nr_channels; ch++) {
601 			dimm = csi->channels[ch]->dimm;
602 			dimm->edac_mode	= EDAC_SECDED;
603 			dimm->mtype = MEM_DDR4;
604 			dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
605 			dimm->grain = XDDR_EDAC_ERR_GRAIN;
606 			dimm->dtype = get_dwidth(priv->ddrmc_baseaddr);
607 		}
608 	}
609 }
610 
611 /**
612  * mc_init - Initialize one driver instance.
613  * @mci:	EDAC memory controller instance.
614  * @pdev:	platform device.
615  *
616  * Perform initialization of the EDAC memory controller instance and
617  * related driver-private data associated with the memory controller the
618  * instance is bound to.
619  */
620 static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
621 {
622 	mci->pdev = &pdev->dev;
623 	platform_set_drvdata(pdev, mci);
624 
625 	/* Initialize controller capabilities and configuration */
626 	mci->mtype_cap = MEM_FLAG_DDR4;
627 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
628 	mci->scrub_cap = SCRUB_HW_SRC;
629 	mci->scrub_mode = SCRUB_NONE;
630 
631 	mci->edac_cap = EDAC_FLAG_SECDED;
632 	mci->ctl_name = "xlnx_ddr_controller";
633 	mci->dev_name = dev_name(&pdev->dev);
634 	mci->mod_name = "xlnx_edac";
635 
636 	edac_op_state = EDAC_OPSTATE_INT;
637 
638 	init_csrows(mci);
639 }
640 
641 static void enable_intr(struct edac_priv *priv)
642 {
643 	/* Unlock the PCSR registers */
644 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
645 
646 	/* Enable UE and CE Interrupts to support the interrupt case */
647 	writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
648 	       priv->ddrmc_baseaddr + XDDR_IRQ_EN_OFFSET);
649 
650 	writel(XDDR_IRQ_UE_MASK,
651 	       priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET);
652 	/* Lock the PCSR registers */
653 	writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
654 }
655 
656 static void disable_intr(struct edac_priv *priv)
657 {
658 	/* Unlock the PCSR registers */
659 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
660 
661 	/* Disable UE/CE Interrupts */
662 	writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
663 	       priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET);
664 
665 	/* Lock the PCSR registers */
666 	writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
667 }
668 
669 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
670 
671 #ifdef CONFIG_EDAC_DEBUG
672 /**
673  * poison_setup - Update poison registers.
674  * @priv:	DDR memory controller private instance data.
675  *
676  * Update poison registers as per DDR mapping upon write of the address
677  * location the fault is injected.
678  * Return: none.
679  */
680 static void poison_setup(struct edac_priv *priv)
681 {
682 	u32 col = 0, row = 0, bank = 0, grp = 0, rank = 0, lrank = 0, ch = 0;
683 	u32 index, regval;
684 
685 	for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
686 		row |= (((priv->err_inject_addr >> priv->row_bit[index]) &
687 						BIT(0)) << index);
688 	}
689 
690 	for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
691 		col |= (((priv->err_inject_addr >> priv->col_bit[index]) &
692 						BIT(0)) << index);
693 	}
694 
695 	for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
696 		bank |= (((priv->err_inject_addr >> priv->bank_bit[index]) &
697 						BIT(0)) << index);
698 	}
699 
700 	for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
701 		grp |= (((priv->err_inject_addr >> priv->grp_bit[index]) &
702 						BIT(0)) << index);
703 	}
704 
705 	for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
706 		rank |= (((priv->err_inject_addr >> priv->rank_bit[index]) &
707 						BIT(0)) << index);
708 	}
709 
710 	for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
711 		lrank |= (((priv->err_inject_addr >> priv->lrank_bit[index]) &
712 						BIT(0)) << index);
713 	}
714 
715 	ch = (priv->err_inject_addr >> priv->ch_bit) & BIT(0);
716 	if (ch)
717 		writel(0xFF, priv->ddrmc_baseaddr + ECCW1_FLIP_CTRL);
718 	else
719 		writel(0xFF, priv->ddrmc_baseaddr + ECCW0_FLIP_CTRL);
720 
721 	writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC12_OFFSET);
722 	writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC13_OFFSET);
723 
724 	regval = row & XDDR_NOC_ROW_MATCH_MASK;
725 	regval |= FIELD_PREP(XDDR_NOC_COL_MATCH_MASK, col);
726 	regval |= FIELD_PREP(XDDR_NOC_BANK_MATCH_MASK, bank);
727 	regval |= FIELD_PREP(XDDR_NOC_GRP_MATCH_MASK, grp);
728 	writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC14_OFFSET);
729 
730 	regval = rank & XDDR_NOC_RANK_MATCH_MASK;
731 	regval |= FIELD_PREP(XDDR_NOC_LRANK_MATCH_MASK, lrank);
732 	regval |= FIELD_PREP(XDDR_NOC_CH_MATCH_MASK, ch);
733 	regval |= (XDDR_NOC_MOD_SEL_MASK | XDDR_NOC_MATCH_EN_MASK);
734 	writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET);
735 }
736 
737 static ssize_t xddr_inject_data_poison_store(struct mem_ctl_info *mci,
738 					     const char __user *data)
739 {
740 	struct edac_priv *priv = mci->pvt_info;
741 
742 	writel(0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
743 	writel(0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
744 
745 	if (strncmp(data, "CE", 2) == 0) {
746 		writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
747 		       ECCW0_FLIP0_OFFSET);
748 		writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
749 		       ECCW1_FLIP0_OFFSET);
750 	} else {
751 		writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
752 		       ECCW0_FLIP0_OFFSET);
753 		writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
754 		       ECCW1_FLIP0_OFFSET);
755 	}
756 
757 	/* Lock the PCSR registers */
758 	writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
759 
760 	return 0;
761 }
762 
763 static ssize_t inject_data_poison_store(struct file *file, const char __user *data,
764 					size_t count, loff_t *ppos)
765 {
766 	struct device *dev = file->private_data;
767 	struct mem_ctl_info *mci = to_mci(dev);
768 	struct edac_priv *priv = mci->pvt_info;
769 
770 	/* Unlock the PCSR registers */
771 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
772 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
773 
774 	poison_setup(priv);
775 
776 	/* Lock the PCSR registers */
777 	writel(1, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
778 
779 	xddr_inject_data_poison_store(mci, data);
780 
781 	return count;
782 }
783 
784 static const struct file_operations xddr_inject_enable_fops = {
785 	.open = simple_open,
786 	.write = inject_data_poison_store,
787 	.llseek = generic_file_llseek,
788 };
789 
790 static void create_debugfs_attributes(struct mem_ctl_info *mci)
791 {
792 	struct edac_priv *priv = mci->pvt_info;
793 
794 	priv->debugfs = edac_debugfs_create_dir(mci->dev_name);
795 	if (!priv->debugfs)
796 		return;
797 
798 	edac_debugfs_create_file("inject_error", 0200, priv->debugfs,
799 				 &mci->dev, &xddr_inject_enable_fops);
800 	debugfs_create_x64("address", 0600, priv->debugfs,
801 			   &priv->err_inject_addr);
802 	mci->debugfs = priv->debugfs;
803 }
804 
805 static inline void process_bit(struct edac_priv *priv, unsigned int start, u32 regval)
806 {
807 	union edac_info rows;
808 
809 	rows.i  = regval;
810 	priv->row_bit[start]	 = rows.row0;
811 	priv->row_bit[start + 1] = rows.row1;
812 	priv->row_bit[start + 2] = rows.row2;
813 	priv->row_bit[start + 3] = rows.row3;
814 	priv->row_bit[start + 4] = rows.row4;
815 }
816 
817 static void setup_row_address_map(struct edac_priv *priv)
818 {
819 	u32 regval;
820 	union edac_info rows;
821 
822 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC5_OFFSET);
823 	process_bit(priv, 0, regval);
824 
825 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC6_OFFSET);
826 	process_bit(priv, 5, regval);
827 
828 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC7_OFFSET);
829 	process_bit(priv, 10, regval);
830 
831 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
832 	rows.i  = regval;
833 
834 	priv->row_bit[15] = rows.row0;
835 	priv->row_bit[16] = rows.row1;
836 	priv->row_bit[17] = rows.row2;
837 }
838 
839 static void setup_column_address_map(struct edac_priv *priv)
840 {
841 	u32 regval;
842 	union edac_info cols;
843 
844 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
845 	priv->col_bit[0] = FIELD_GET(MASK_24, regval);
846 
847 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC9_OFFSET);
848 	cols.i  = regval;
849 	priv->col_bit[1] = cols.col1;
850 	priv->col_bit[2] = cols.col2;
851 	priv->col_bit[3] = cols.col3;
852 	priv->col_bit[4] = cols.col4;
853 	priv->col_bit[5] = cols.col5;
854 
855 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
856 	cols.i  = regval;
857 	priv->col_bit[6] = cols.col1;
858 	priv->col_bit[7] = cols.col2;
859 	priv->col_bit[8] = cols.col3;
860 	priv->col_bit[9] = cols.col4;
861 }
862 
863 static void setup_bank_grp_ch_address_map(struct edac_priv *priv)
864 {
865 	u32 regval;
866 
867 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
868 	priv->bank_bit[0] = FIELD_GET(MASK_24, regval);
869 
870 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC11_OFFSET);
871 	priv->bank_bit[1] = (regval & MASK_0);
872 	priv->grp_bit[0] = FIELD_GET(GRP_0_MASK, regval);
873 	priv->grp_bit[1] = FIELD_GET(GRP_1_MASK, regval);
874 	priv->ch_bit = FIELD_GET(CH_0_MASK, regval);
875 }
876 
877 static void setup_rank_lrank_address_map(struct edac_priv *priv)
878 {
879 	u32 regval;
880 
881 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC4_OFFSET);
882 	priv->rank_bit[0] = (regval & MASK_0);
883 	priv->rank_bit[1] = FIELD_GET(RANK_1_MASK, regval);
884 	priv->lrank_bit[0] = FIELD_GET(LRANK_0_MASK, regval);
885 	priv->lrank_bit[1] = FIELD_GET(LRANK_1_MASK, regval);
886 	priv->lrank_bit[2] = FIELD_GET(MASK_24, regval);
887 }
888 
889 /**
890  * setup_address_map - Set Address Map by querying ADDRMAP registers.
891  * @priv:	DDR memory controller private instance data.
892  *
893  * Set Address Map by querying ADDRMAP registers.
894  *
895  * Return: none.
896  */
897 static void setup_address_map(struct edac_priv *priv)
898 {
899 	setup_row_address_map(priv);
900 
901 	setup_column_address_map(priv);
902 
903 	setup_bank_grp_ch_address_map(priv);
904 
905 	setup_rank_lrank_address_map(priv);
906 }
907 #endif /* CONFIG_EDAC_DEBUG */
908 
909 static const struct of_device_id xlnx_edac_match[] = {
910 	{ .compatible = "xlnx,versal-ddrmc", },
911 	{
912 		/* end of table */
913 	}
914 };
915 
916 MODULE_DEVICE_TABLE(of, xlnx_edac_match);
917 static u32 emif_get_id(struct device_node *node)
918 {
919 	u32 addr, my_addr, my_id = 0;
920 	struct device_node *np;
921 	const __be32 *addrp;
922 
923 	addrp = of_get_address(node, 0, NULL, NULL);
924 	my_addr = (u32)of_translate_address(node, addrp);
925 
926 	for_each_matching_node(np, xlnx_edac_match) {
927 		if (np == node)
928 			continue;
929 
930 		addrp = of_get_address(np, 0, NULL, NULL);
931 		addr = (u32)of_translate_address(np, addrp);
932 
933 		edac_printk(KERN_INFO, EDAC_MC,
934 			    "addr=%x, my_addr=%x\n",
935 			    addr, my_addr);
936 
937 		if (addr < my_addr)
938 			my_id++;
939 	}
940 
941 	return my_id;
942 }
943 
944 static int mc_probe(struct platform_device *pdev)
945 {
946 	void __iomem *ddrmc_baseaddr, *ddrmc_noc_baseaddr;
947 	struct edac_mc_layer layers[2];
948 	struct mem_ctl_info *mci;
949 	u8 num_chans, num_csrows;
950 	struct edac_priv *priv;
951 	u32 edac_mc_id, regval;
952 	int rc;
953 
954 	ddrmc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "base");
955 	if (IS_ERR(ddrmc_baseaddr))
956 		return PTR_ERR(ddrmc_baseaddr);
957 
958 	ddrmc_noc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "noc");
959 	if (IS_ERR(ddrmc_noc_baseaddr))
960 		return PTR_ERR(ddrmc_noc_baseaddr);
961 
962 	if (!get_ecc_state(ddrmc_baseaddr))
963 		return -ENXIO;
964 
965 	/* Allocate ID number for the EMIF controller */
966 	edac_mc_id = emif_get_id(pdev->dev.of_node);
967 
968 	regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
969 	num_chans = FIELD_PREP(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
970 	num_chans++;
971 
972 	num_csrows = FIELD_PREP(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
973 	num_csrows *= 2;
974 	if (!num_csrows)
975 		num_csrows = 1;
976 
977 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
978 	layers[0].size = num_csrows;
979 	layers[0].is_virt_csrow = true;
980 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
981 	layers[1].size = num_chans;
982 	layers[1].is_virt_csrow = false;
983 
984 	mci = edac_mc_alloc(edac_mc_id, ARRAY_SIZE(layers), layers,
985 			    sizeof(struct edac_priv));
986 	if (!mci) {
987 		edac_printk(KERN_ERR, EDAC_MC,
988 			    "Failed memory allocation for mc instance\n");
989 		return -ENOMEM;
990 	}
991 
992 	priv = mci->pvt_info;
993 	priv->ddrmc_baseaddr = ddrmc_baseaddr;
994 	priv->ddrmc_noc_baseaddr = ddrmc_noc_baseaddr;
995 	priv->ce_cnt = 0;
996 	priv->ue_cnt = 0;
997 	priv->mc_id = edac_mc_id;
998 
999 	mc_init(mci, pdev);
1000 
1001 	rc = edac_mc_add_mc(mci);
1002 	if (rc) {
1003 		edac_printk(KERN_ERR, EDAC_MC,
1004 			    "Failed to register with EDAC core\n");
1005 		goto free_edac_mc;
1006 	}
1007 
1008 	rc = xlnx_register_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
1009 				 XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR |
1010 				 XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR,
1011 				 false, err_callback, mci);
1012 	if (rc) {
1013 		if (rc == -EACCES)
1014 			rc = -EPROBE_DEFER;
1015 
1016 		goto del_mc;
1017 	}
1018 
1019 #ifdef CONFIG_EDAC_DEBUG
1020 	create_debugfs_attributes(mci);
1021 	setup_address_map(priv);
1022 #endif
1023 	enable_intr(priv);
1024 	return rc;
1025 
1026 del_mc:
1027 	edac_mc_del_mc(&pdev->dev);
1028 free_edac_mc:
1029 	edac_mc_free(mci);
1030 
1031 	return rc;
1032 }
1033 
1034 static int mc_remove(struct platform_device *pdev)
1035 {
1036 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1037 	struct edac_priv *priv = mci->pvt_info;
1038 
1039 	disable_intr(priv);
1040 
1041 #ifdef CONFIG_EDAC_DEBUG
1042 	debugfs_remove_recursive(priv->debugfs);
1043 #endif
1044 
1045 	xlnx_unregister_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
1046 			      XPM_EVENT_ERROR_MASK_DDRMC_CR |
1047 			      XPM_EVENT_ERROR_MASK_NOC_CR |
1048 			      XPM_EVENT_ERROR_MASK_NOC_NCR |
1049 			      XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
1050 	edac_mc_del_mc(&pdev->dev);
1051 	edac_mc_free(mci);
1052 
1053 	return 0;
1054 }
1055 
1056 static struct platform_driver xilinx_ddr_edac_mc_driver = {
1057 	.driver = {
1058 		.name = "xilinx-ddrmc-edac",
1059 		.of_match_table = xlnx_edac_match,
1060 	},
1061 	.probe = mc_probe,
1062 	.remove = mc_remove,
1063 };
1064 
1065 module_platform_driver(xilinx_ddr_edac_mc_driver);
1066 
1067 MODULE_AUTHOR("AMD Inc");
1068 MODULE_DESCRIPTION("Xilinx DDRMC ECC driver");
1069 MODULE_LICENSE("GPL");
1070