xref: /linux/drivers/edac/versal_edac.c (revision 4232da23d75d173195c6766729e51947b64f83cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Versal memory controller driver
4  * Copyright (C) 2023 Advanced Micro Devices, Inc.
5  */
6 #include <linux/bitfield.h>
7 #include <linux/edac.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/firmware/xlnx-zynqmp.h>
16 #include <linux/firmware/xlnx-event-manager.h>
17 
18 #include "edac_module.h"
19 
20 /* Granularity of reported error in bytes */
21 #define XDDR_EDAC_ERR_GRAIN			1
22 
23 #define XDDR_EDAC_MSG_SIZE			256
24 #define EVENT					2
25 
26 #define XDDR_PCSR_OFFSET			0xC
27 #define XDDR_ISR_OFFSET				0x14
28 #define XDDR_IRQ_EN_OFFSET			0x20
29 #define XDDR_IRQ1_EN_OFFSET			0x2C
30 #define XDDR_IRQ_DIS_OFFSET			0x24
31 #define XDDR_IRQ_CE_MASK			GENMASK(18, 15)
32 #define XDDR_IRQ_UE_MASK			GENMASK(14, 11)
33 
34 #define XDDR_REG_CONFIG0_OFFSET			0x258
35 #define XDDR_REG_CONFIG0_BUS_WIDTH_MASK		GENMASK(19, 18)
36 #define XDDR_REG_CONFIG0_NUM_CHANS_MASK		BIT(17)
37 #define XDDR_REG_CONFIG0_NUM_RANKS_MASK		GENMASK(15, 14)
38 #define XDDR_REG_CONFIG0_SIZE_MASK		GENMASK(10, 8)
39 
40 #define XDDR_REG_PINOUT_OFFSET			0x25C
41 #define XDDR_REG_PINOUT_ECC_EN_MASK		GENMASK(7, 5)
42 
43 #define ECCW0_FLIP_CTRL				0x109C
44 #define ECCW0_FLIP0_OFFSET			0x10A0
45 #define ECCW0_FLIP0_BITS			31
46 #define ECCW0_FLIP1_OFFSET			0x10A4
47 #define ECCW1_FLIP_CTRL				0x10AC
48 #define ECCW1_FLIP0_OFFSET			0x10B0
49 #define ECCW1_FLIP1_OFFSET			0x10B4
50 #define ECCR0_CERR_STAT_OFFSET			0x10BC
51 #define ECCR0_CE_ADDR_LO_OFFSET			0x10C0
52 #define ECCR0_CE_ADDR_HI_OFFSET			0x10C4
53 #define ECCR0_CE_DATA_LO_OFFSET			0x10C8
54 #define ECCR0_CE_DATA_HI_OFFSET			0x10CC
55 #define ECCR0_CE_DATA_PAR_OFFSET		0x10D0
56 
57 #define ECCR0_UERR_STAT_OFFSET			0x10D4
58 #define ECCR0_UE_ADDR_LO_OFFSET			0x10D8
59 #define ECCR0_UE_ADDR_HI_OFFSET			0x10DC
60 #define ECCR0_UE_DATA_LO_OFFSET			0x10E0
61 #define ECCR0_UE_DATA_HI_OFFSET			0x10E4
62 #define ECCR0_UE_DATA_PAR_OFFSET		0x10E8
63 
64 #define ECCR1_CERR_STAT_OFFSET			0x10F4
65 #define ECCR1_CE_ADDR_LO_OFFSET			0x10F8
66 #define ECCR1_CE_ADDR_HI_OFFSET			0x10FC
67 #define ECCR1_CE_DATA_LO_OFFSET			0x1100
68 #define ECCR1_CE_DATA_HI_OFFSET			0x110C
69 #define ECCR1_CE_DATA_PAR_OFFSET		0x1108
70 
71 #define ECCR1_UERR_STAT_OFFSET			0x110C
72 #define ECCR1_UE_ADDR_LO_OFFSET			0x1110
73 #define ECCR1_UE_ADDR_HI_OFFSET			0x1114
74 #define ECCR1_UE_DATA_LO_OFFSET			0x1118
75 #define ECCR1_UE_DATA_HI_OFFSET			0x111C
76 #define ECCR1_UE_DATA_PAR_OFFSET		0x1120
77 
78 #define XDDR_NOC_REG_ADEC4_OFFSET		0x44
79 #define RANK_1_MASK				GENMASK(11, 6)
80 #define LRANK_0_MASK				GENMASK(17, 12)
81 #define LRANK_1_MASK				GENMASK(23, 18)
82 #define MASK_24					GENMASK(29, 24)
83 
84 #define XDDR_NOC_REG_ADEC5_OFFSET		0x48
85 #define XDDR_NOC_REG_ADEC6_OFFSET		0x4C
86 #define XDDR_NOC_REG_ADEC7_OFFSET		0x50
87 #define XDDR_NOC_REG_ADEC8_OFFSET		0x54
88 #define XDDR_NOC_REG_ADEC9_OFFSET		0x58
89 #define XDDR_NOC_REG_ADEC10_OFFSET		0x5C
90 
91 #define XDDR_NOC_REG_ADEC11_OFFSET		0x60
92 #define MASK_0					GENMASK(5, 0)
93 #define GRP_0_MASK				GENMASK(11, 6)
94 #define GRP_1_MASK				GENMASK(17, 12)
95 #define CH_0_MASK				GENMASK(23, 18)
96 
97 #define XDDR_NOC_REG_ADEC12_OFFSET		0x71C
98 #define XDDR_NOC_REG_ADEC13_OFFSET		0x720
99 
100 #define XDDR_NOC_REG_ADEC14_OFFSET		0x724
101 #define XDDR_NOC_ROW_MATCH_MASK			GENMASK(17, 0)
102 #define XDDR_NOC_COL_MATCH_MASK			GENMASK(27, 18)
103 #define XDDR_NOC_BANK_MATCH_MASK		GENMASK(29, 28)
104 #define XDDR_NOC_GRP_MATCH_MASK			GENMASK(31, 30)
105 
106 #define XDDR_NOC_REG_ADEC15_OFFSET		0x728
107 #define XDDR_NOC_RANK_MATCH_MASK		GENMASK(1, 0)
108 #define XDDR_NOC_LRANK_MATCH_MASK		GENMASK(4, 2)
109 #define XDDR_NOC_CH_MATCH_MASK			BIT(5)
110 #define XDDR_NOC_MOD_SEL_MASK			BIT(6)
111 #define XDDR_NOC_MATCH_EN_MASK			BIT(8)
112 
113 #define ECCR_UE_CE_ADDR_HI_ROW_MASK		GENMASK(7, 0)
114 
115 #define XDDR_EDAC_NR_CSROWS			1
116 #define XDDR_EDAC_NR_CHANS			1
117 
118 #define XDDR_BUS_WIDTH_64			0
119 #define XDDR_BUS_WIDTH_32			1
120 #define XDDR_BUS_WIDTH_16			2
121 
122 #define XDDR_MAX_ROW_CNT			18
123 #define XDDR_MAX_COL_CNT			10
124 #define XDDR_MAX_RANK_CNT			2
125 #define XDDR_MAX_LRANK_CNT			3
126 #define XDDR_MAX_BANK_CNT			2
127 #define XDDR_MAX_GRP_CNT			2
128 
129 /*
130  * Config and system registers are usually locked. This is the
131  * code which unlocks them in order to accept writes. See
132  *
133  * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register
134  */
135 #define PCSR_UNLOCK_VAL				0xF9E8D7C6
136 #define PCSR_LOCK_VAL				1
137 #define XDDR_ERR_TYPE_CE			0
138 #define XDDR_ERR_TYPE_UE			1
139 
140 #define XILINX_DRAM_SIZE_4G			0
141 #define XILINX_DRAM_SIZE_6G			1
142 #define XILINX_DRAM_SIZE_8G			2
143 #define XILINX_DRAM_SIZE_12G			3
144 #define XILINX_DRAM_SIZE_16G			4
145 #define XILINX_DRAM_SIZE_32G			5
146 #define NUM_UE_BITPOS				2
147 
148 /**
149  * struct ecc_error_info - ECC error log information.
150  * @burstpos:		Burst position.
151  * @lrank:		Logical Rank number.
152  * @rank:		Rank number.
153  * @group:		Group number.
154  * @bank:		Bank number.
155  * @col:		Column number.
156  * @row:		Row number.
157  * @rowhi:		Row number higher bits.
158  * @i:			ECC error info.
159  */
160 union ecc_error_info {
161 	struct {
162 		u32 burstpos:3;
163 		u32 lrank:3;
164 		u32 rank:2;
165 		u32 group:2;
166 		u32 bank:2;
167 		u32 col:10;
168 		u32 row:10;
169 		u32 rowhi;
170 	};
171 	u64 i;
172 } __packed;
173 
174 union edac_info {
175 	struct {
176 		u32 row0:6;
177 		u32 row1:6;
178 		u32 row2:6;
179 		u32 row3:6;
180 		u32 row4:6;
181 		u32 reserved:2;
182 	};
183 	struct {
184 		u32 col1:6;
185 		u32 col2:6;
186 		u32 col3:6;
187 		u32 col4:6;
188 		u32 col5:6;
189 		u32 reservedcol:2;
190 	};
191 	u32 i;
192 } __packed;
193 
194 /**
195  * struct ecc_status - ECC status information to report.
196  * @ceinfo:	Correctable error log information.
197  * @ueinfo:	Uncorrectable error log information.
198  * @channel:	Channel number.
199  * @error_type:	Error type information.
200  */
201 struct ecc_status {
202 	union ecc_error_info ceinfo[2];
203 	union ecc_error_info ueinfo[2];
204 	u8 channel;
205 	u8 error_type;
206 };
207 
208 /**
209  * struct edac_priv - DDR memory controller private instance data.
210  * @ddrmc_baseaddr:	Base address of the DDR controller.
211  * @ddrmc_noc_baseaddr:	Base address of the DDRMC NOC.
212  * @message:		Buffer for framing the event specific info.
213  * @mc_id:		Memory controller ID.
214  * @ce_cnt:		Correctable error count.
215  * @ue_cnt:		UnCorrectable error count.
216  * @stat:		ECC status information.
217  * @lrank_bit:		Bit shifts for lrank bit.
218  * @rank_bit:		Bit shifts for rank bit.
219  * @row_bit:		Bit shifts for row bit.
220  * @col_bit:		Bit shifts for column bit.
221  * @bank_bit:		Bit shifts for bank bit.
222  * @grp_bit:		Bit shifts for group bit.
223  * @ch_bit:		Bit shifts for channel bit.
224  * @err_inject_addr:	Data poison address.
225  * @debugfs:		Debugfs handle.
226  */
227 struct edac_priv {
228 	void __iomem *ddrmc_baseaddr;
229 	void __iomem *ddrmc_noc_baseaddr;
230 	char message[XDDR_EDAC_MSG_SIZE];
231 	u32 mc_id;
232 	u32 ce_cnt;
233 	u32 ue_cnt;
234 	struct ecc_status stat;
235 	u32 lrank_bit[3];
236 	u32 rank_bit[2];
237 	u32 row_bit[18];
238 	u32 col_bit[10];
239 	u32 bank_bit[2];
240 	u32 grp_bit[2];
241 	u32 ch_bit;
242 #ifdef CONFIG_EDAC_DEBUG
243 	u64 err_inject_addr;
244 	struct dentry *debugfs;
245 #endif
246 };
247 
get_ce_error_info(struct edac_priv * priv)248 static void get_ce_error_info(struct edac_priv *priv)
249 {
250 	void __iomem *ddrmc_base;
251 	struct ecc_status *p;
252 	u32  regval;
253 	u64  reghi;
254 
255 	ddrmc_base = priv->ddrmc_baseaddr;
256 	p = &priv->stat;
257 
258 	p->error_type = XDDR_ERR_TYPE_CE;
259 	regval = readl(ddrmc_base + ECCR0_CE_ADDR_LO_OFFSET);
260 	reghi = regval & ECCR_UE_CE_ADDR_HI_ROW_MASK;
261 	p->ceinfo[0].i = regval | reghi << 32;
262 	regval = readl(ddrmc_base + ECCR0_CE_ADDR_HI_OFFSET);
263 
264 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
265 		 readl(ddrmc_base + ECCR0_CE_DATA_LO_OFFSET),
266 		 readl(ddrmc_base + ECCR0_CE_DATA_HI_OFFSET),
267 		 readl(ddrmc_base + ECCR0_CE_DATA_PAR_OFFSET));
268 
269 	regval = readl(ddrmc_base + ECCR1_CE_ADDR_LO_OFFSET);
270 	reghi = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
271 	p->ceinfo[1].i = regval | reghi << 32;
272 	regval = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
273 
274 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
275 		 readl(ddrmc_base + ECCR1_CE_DATA_LO_OFFSET),
276 		 readl(ddrmc_base + ECCR1_CE_DATA_HI_OFFSET),
277 		 readl(ddrmc_base + ECCR1_CE_DATA_PAR_OFFSET));
278 }
279 
get_ue_error_info(struct edac_priv * priv)280 static void get_ue_error_info(struct edac_priv *priv)
281 {
282 	void __iomem *ddrmc_base;
283 	struct ecc_status *p;
284 	u32  regval;
285 	u64 reghi;
286 
287 	ddrmc_base = priv->ddrmc_baseaddr;
288 	p = &priv->stat;
289 
290 	p->error_type = XDDR_ERR_TYPE_UE;
291 	regval = readl(ddrmc_base + ECCR0_UE_ADDR_LO_OFFSET);
292 	reghi = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
293 
294 	p->ueinfo[0].i = regval | reghi << 32;
295 	regval = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
296 
297 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
298 		 readl(ddrmc_base + ECCR0_UE_DATA_LO_OFFSET),
299 		 readl(ddrmc_base + ECCR0_UE_DATA_HI_OFFSET),
300 		 readl(ddrmc_base + ECCR0_UE_DATA_PAR_OFFSET));
301 
302 	regval = readl(ddrmc_base + ECCR1_UE_ADDR_LO_OFFSET);
303 	reghi = readl(ddrmc_base + ECCR1_UE_ADDR_HI_OFFSET);
304 	p->ueinfo[1].i = regval | reghi << 32;
305 
306 	edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
307 		 readl(ddrmc_base + ECCR1_UE_DATA_LO_OFFSET),
308 		 readl(ddrmc_base + ECCR1_UE_DATA_HI_OFFSET),
309 		 readl(ddrmc_base + ECCR1_UE_DATA_PAR_OFFSET));
310 }
311 
get_error_info(struct edac_priv * priv)312 static bool get_error_info(struct edac_priv *priv)
313 {
314 	u32 eccr0_ceval, eccr1_ceval, eccr0_ueval, eccr1_ueval;
315 	void __iomem *ddrmc_base;
316 	struct ecc_status *p;
317 
318 	ddrmc_base = priv->ddrmc_baseaddr;
319 	p = &priv->stat;
320 
321 	eccr0_ceval = readl(ddrmc_base + ECCR0_CERR_STAT_OFFSET);
322 	eccr1_ceval = readl(ddrmc_base + ECCR1_CERR_STAT_OFFSET);
323 	eccr0_ueval = readl(ddrmc_base + ECCR0_UERR_STAT_OFFSET);
324 	eccr1_ueval = readl(ddrmc_base + ECCR1_UERR_STAT_OFFSET);
325 
326 	if (!eccr0_ceval && !eccr1_ceval && !eccr0_ueval && !eccr1_ueval)
327 		return 1;
328 	if (!eccr0_ceval)
329 		p->channel = 1;
330 	else
331 		p->channel = 0;
332 
333 	if (eccr0_ceval || eccr1_ceval)
334 		get_ce_error_info(priv);
335 
336 	if (eccr0_ueval || eccr1_ueval) {
337 		if (!eccr0_ueval)
338 			p->channel = 1;
339 		else
340 			p->channel = 0;
341 		get_ue_error_info(priv);
342 	}
343 
344 	/* Unlock the PCSR registers */
345 	writel(PCSR_UNLOCK_VAL, ddrmc_base + XDDR_PCSR_OFFSET);
346 
347 	writel(0, ddrmc_base + ECCR0_CERR_STAT_OFFSET);
348 	writel(0, ddrmc_base + ECCR1_CERR_STAT_OFFSET);
349 	writel(0, ddrmc_base + ECCR0_UERR_STAT_OFFSET);
350 	writel(0, ddrmc_base + ECCR1_UERR_STAT_OFFSET);
351 
352 	/* Lock the PCSR registers */
353 	writel(1, ddrmc_base + XDDR_PCSR_OFFSET);
354 
355 	return 0;
356 }
357 
358 /**
359  * convert_to_physical - Convert to physical address.
360  * @priv:	DDR memory controller private instance data.
361  * @pinf:	ECC error info structure.
362  *
363  * Return: Physical address of the DDR memory.
364  */
convert_to_physical(struct edac_priv * priv,union ecc_error_info pinf)365 static unsigned long convert_to_physical(struct edac_priv *priv, union ecc_error_info pinf)
366 {
367 	unsigned long err_addr = 0;
368 	u32 index;
369 	u32 row;
370 
371 	row = pinf.rowhi << 10 | pinf.row;
372 	for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
373 		err_addr |= (row & BIT(0)) << priv->row_bit[index];
374 		row >>= 1;
375 	}
376 
377 	for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
378 		err_addr |= (pinf.col & BIT(0)) << priv->col_bit[index];
379 		pinf.col >>= 1;
380 	}
381 
382 	for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
383 		err_addr |= (pinf.bank & BIT(0)) << priv->bank_bit[index];
384 		pinf.bank >>= 1;
385 	}
386 
387 	for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
388 		err_addr |= (pinf.group & BIT(0)) << priv->grp_bit[index];
389 		pinf.group >>= 1;
390 	}
391 
392 	for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
393 		err_addr |= (pinf.rank & BIT(0)) << priv->rank_bit[index];
394 		pinf.rank >>= 1;
395 	}
396 
397 	for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
398 		err_addr |= (pinf.lrank & BIT(0)) << priv->lrank_bit[index];
399 		pinf.lrank >>= 1;
400 	}
401 
402 	err_addr |= (priv->stat.channel & BIT(0)) << priv->ch_bit;
403 
404 	return err_addr;
405 }
406 
407 /**
408  * handle_error - Handle Correctable and Uncorrectable errors.
409  * @mci:	EDAC memory controller instance.
410  * @stat:	ECC status structure.
411  *
412  * Handles ECC correctable and uncorrectable errors.
413  */
handle_error(struct mem_ctl_info * mci,struct ecc_status * stat)414 static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
415 {
416 	struct edac_priv *priv = mci->pvt_info;
417 	union ecc_error_info pinf;
418 
419 	if (stat->error_type == XDDR_ERR_TYPE_CE) {
420 		priv->ce_cnt++;
421 		pinf = stat->ceinfo[stat->channel];
422 		snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
423 			 "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
424 			 "CE", priv->mc_id,
425 			 convert_to_physical(priv, pinf), pinf.burstpos);
426 
427 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
428 				     1, 0, 0, 0, 0, 0, -1,
429 				     priv->message, "");
430 	}
431 
432 	if (stat->error_type == XDDR_ERR_TYPE_UE) {
433 		priv->ue_cnt++;
434 		pinf = stat->ueinfo[stat->channel];
435 		snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
436 			 "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
437 			 "UE", priv->mc_id,
438 			 convert_to_physical(priv, pinf), pinf.burstpos);
439 
440 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
441 				     1, 0, 0, 0, 0, 0, -1,
442 				     priv->message, "");
443 	}
444 
445 	memset(stat, 0, sizeof(*stat));
446 }
447 
448 /**
449  * err_callback - Handle Correctable and Uncorrectable errors.
450  * @payload:	payload data.
451  * @data:	mci controller data.
452  *
453  * Handles ECC correctable and uncorrectable errors.
454  */
err_callback(const u32 * payload,void * data)455 static void err_callback(const u32 *payload, void *data)
456 {
457 	struct mem_ctl_info *mci = (struct mem_ctl_info *)data;
458 	struct edac_priv *priv;
459 	struct ecc_status *p;
460 	int regval;
461 
462 	priv = mci->pvt_info;
463 	p = &priv->stat;
464 
465 	regval = readl(priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
466 
467 	if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_CR)
468 		p->error_type = XDDR_ERR_TYPE_CE;
469 	if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_NCR)
470 		p->error_type = XDDR_ERR_TYPE_UE;
471 
472 	if (get_error_info(priv))
473 		return;
474 
475 	handle_error(mci, &priv->stat);
476 
477 	/* Unlock the PCSR registers */
478 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
479 
480 	/* Clear the ISR */
481 	writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
482 
483 	/* Lock the PCSR registers */
484 	writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
485 	edac_dbg(3, "Total error count CE %d UE %d\n",
486 		 priv->ce_cnt, priv->ue_cnt);
487 }
488 
489 /**
490  * get_dwidth - Return the controller memory width.
491  * @base:	DDR memory controller base address.
492  *
493  * Get the EDAC device type width appropriate for the controller
494  * configuration.
495  *
496  * Return: a device type width enumeration.
497  */
get_dwidth(const void __iomem * base)498 static enum dev_type get_dwidth(const void __iomem *base)
499 {
500 	enum dev_type dt;
501 	u32 regval;
502 	u32 width;
503 
504 	regval = readl(base + XDDR_REG_CONFIG0_OFFSET);
505 	width  = FIELD_GET(XDDR_REG_CONFIG0_BUS_WIDTH_MASK, regval);
506 
507 	switch (width) {
508 	case XDDR_BUS_WIDTH_16:
509 		dt = DEV_X2;
510 		break;
511 	case XDDR_BUS_WIDTH_32:
512 		dt = DEV_X4;
513 		break;
514 	case XDDR_BUS_WIDTH_64:
515 		dt = DEV_X8;
516 		break;
517 	default:
518 		dt = DEV_UNKNOWN;
519 	}
520 
521 	return dt;
522 }
523 
524 /**
525  * get_ecc_state - Return the controller ECC enable/disable status.
526  * @base:	DDR memory controller base address.
527  *
528  * Get the ECC enable/disable status for the controller.
529  *
530  * Return: a ECC status boolean i.e true/false - enabled/disabled.
531  */
get_ecc_state(void __iomem * base)532 static bool get_ecc_state(void __iomem *base)
533 {
534 	enum dev_type dt;
535 	u32 ecctype;
536 
537 	dt = get_dwidth(base);
538 	if (dt == DEV_UNKNOWN)
539 		return false;
540 
541 	ecctype = readl(base + XDDR_REG_PINOUT_OFFSET);
542 	ecctype &= XDDR_REG_PINOUT_ECC_EN_MASK;
543 
544 	return !!ecctype;
545 }
546 
547 /**
548  * get_memsize - Get the size of the attached memory device.
549  * @priv:	DDR memory controller private instance data.
550  *
551  * Return: the memory size in bytes.
552  */
get_memsize(struct edac_priv * priv)553 static u64 get_memsize(struct edac_priv *priv)
554 {
555 	u32 regval;
556 	u64 size;
557 
558 	regval = readl(priv->ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
559 	regval  = FIELD_GET(XDDR_REG_CONFIG0_SIZE_MASK, regval);
560 
561 	switch (regval) {
562 	case XILINX_DRAM_SIZE_4G:
563 		size = 4U;      break;
564 	case XILINX_DRAM_SIZE_6G:
565 		size = 6U;      break;
566 	case XILINX_DRAM_SIZE_8G:
567 		size = 8U;      break;
568 	case XILINX_DRAM_SIZE_12G:
569 		size = 12U;     break;
570 	case XILINX_DRAM_SIZE_16G:
571 		size = 16U;     break;
572 	case XILINX_DRAM_SIZE_32G:
573 		size = 32U;     break;
574 	/* Invalid configuration */
575 	default:
576 		size = 0;	break;
577 	}
578 
579 	size *= SZ_1G;
580 	return size;
581 }
582 
583 /**
584  * init_csrows - Initialize the csrow data.
585  * @mci:	EDAC memory controller instance.
586  *
587  * Initialize the chip select rows associated with the EDAC memory
588  * controller instance.
589  */
init_csrows(struct mem_ctl_info * mci)590 static void init_csrows(struct mem_ctl_info *mci)
591 {
592 	struct edac_priv *priv = mci->pvt_info;
593 	struct csrow_info *csi;
594 	struct dimm_info *dimm;
595 	unsigned long size;
596 	u32 row;
597 	int ch;
598 
599 	size = get_memsize(priv);
600 	for (row = 0; row < mci->nr_csrows; row++) {
601 		csi = mci->csrows[row];
602 		for (ch = 0; ch < csi->nr_channels; ch++) {
603 			dimm = csi->channels[ch]->dimm;
604 			dimm->edac_mode	= EDAC_SECDED;
605 			dimm->mtype = MEM_DDR4;
606 			dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
607 			dimm->grain = XDDR_EDAC_ERR_GRAIN;
608 			dimm->dtype = get_dwidth(priv->ddrmc_baseaddr);
609 		}
610 	}
611 }
612 
613 /**
614  * mc_init - Initialize one driver instance.
615  * @mci:	EDAC memory controller instance.
616  * @pdev:	platform device.
617  *
618  * Perform initialization of the EDAC memory controller instance and
619  * related driver-private data associated with the memory controller the
620  * instance is bound to.
621  */
mc_init(struct mem_ctl_info * mci,struct platform_device * pdev)622 static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
623 {
624 	mci->pdev = &pdev->dev;
625 	platform_set_drvdata(pdev, mci);
626 
627 	/* Initialize controller capabilities and configuration */
628 	mci->mtype_cap = MEM_FLAG_DDR4;
629 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
630 	mci->scrub_cap = SCRUB_HW_SRC;
631 	mci->scrub_mode = SCRUB_NONE;
632 
633 	mci->edac_cap = EDAC_FLAG_SECDED;
634 	mci->ctl_name = "xlnx_ddr_controller";
635 	mci->dev_name = dev_name(&pdev->dev);
636 	mci->mod_name = "xlnx_edac";
637 
638 	edac_op_state = EDAC_OPSTATE_INT;
639 
640 	init_csrows(mci);
641 }
642 
enable_intr(struct edac_priv * priv)643 static void enable_intr(struct edac_priv *priv)
644 {
645 	/* Unlock the PCSR registers */
646 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
647 
648 	/* Enable UE and CE Interrupts to support the interrupt case */
649 	writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
650 	       priv->ddrmc_baseaddr + XDDR_IRQ_EN_OFFSET);
651 
652 	writel(XDDR_IRQ_UE_MASK,
653 	       priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET);
654 	/* Lock the PCSR registers */
655 	writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
656 }
657 
disable_intr(struct edac_priv * priv)658 static void disable_intr(struct edac_priv *priv)
659 {
660 	/* Unlock the PCSR registers */
661 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
662 
663 	/* Disable UE/CE Interrupts */
664 	writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
665 	       priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET);
666 
667 	/* Lock the PCSR registers */
668 	writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
669 }
670 
671 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
672 
673 #ifdef CONFIG_EDAC_DEBUG
674 /**
675  * poison_setup - Update poison registers.
676  * @priv:	DDR memory controller private instance data.
677  *
678  * Update poison registers as per DDR mapping upon write of the address
679  * location the fault is injected.
680  * Return: none.
681  */
poison_setup(struct edac_priv * priv)682 static void poison_setup(struct edac_priv *priv)
683 {
684 	u32 col = 0, row = 0, bank = 0, grp = 0, rank = 0, lrank = 0, ch = 0;
685 	u32 index, regval;
686 
687 	for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
688 		row |= (((priv->err_inject_addr >> priv->row_bit[index]) &
689 						BIT(0)) << index);
690 	}
691 
692 	for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
693 		col |= (((priv->err_inject_addr >> priv->col_bit[index]) &
694 						BIT(0)) << index);
695 	}
696 
697 	for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
698 		bank |= (((priv->err_inject_addr >> priv->bank_bit[index]) &
699 						BIT(0)) << index);
700 	}
701 
702 	for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
703 		grp |= (((priv->err_inject_addr >> priv->grp_bit[index]) &
704 						BIT(0)) << index);
705 	}
706 
707 	for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
708 		rank |= (((priv->err_inject_addr >> priv->rank_bit[index]) &
709 						BIT(0)) << index);
710 	}
711 
712 	for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
713 		lrank |= (((priv->err_inject_addr >> priv->lrank_bit[index]) &
714 						BIT(0)) << index);
715 	}
716 
717 	ch = (priv->err_inject_addr >> priv->ch_bit) & BIT(0);
718 	if (ch)
719 		writel(0xFF, priv->ddrmc_baseaddr + ECCW1_FLIP_CTRL);
720 	else
721 		writel(0xFF, priv->ddrmc_baseaddr + ECCW0_FLIP_CTRL);
722 
723 	writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC12_OFFSET);
724 	writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC13_OFFSET);
725 
726 	regval = row & XDDR_NOC_ROW_MATCH_MASK;
727 	regval |= FIELD_PREP(XDDR_NOC_COL_MATCH_MASK, col);
728 	regval |= FIELD_PREP(XDDR_NOC_BANK_MATCH_MASK, bank);
729 	regval |= FIELD_PREP(XDDR_NOC_GRP_MATCH_MASK, grp);
730 	writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC14_OFFSET);
731 
732 	regval = rank & XDDR_NOC_RANK_MATCH_MASK;
733 	regval |= FIELD_PREP(XDDR_NOC_LRANK_MATCH_MASK, lrank);
734 	regval |= FIELD_PREP(XDDR_NOC_CH_MATCH_MASK, ch);
735 	regval |= (XDDR_NOC_MOD_SEL_MASK | XDDR_NOC_MATCH_EN_MASK);
736 	writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET);
737 }
738 
xddr_inject_data_ce_store(struct mem_ctl_info * mci,u8 ce_bitpos)739 static void xddr_inject_data_ce_store(struct mem_ctl_info *mci, u8 ce_bitpos)
740 {
741 	u32 ecc0_flip0, ecc1_flip0, ecc0_flip1, ecc1_flip1;
742 	struct edac_priv *priv = mci->pvt_info;
743 
744 	if (ce_bitpos < ECCW0_FLIP0_BITS) {
745 		ecc0_flip0 = BIT(ce_bitpos);
746 		ecc1_flip0 = BIT(ce_bitpos);
747 		ecc0_flip1 = 0;
748 		ecc1_flip1 = 0;
749 	} else {
750 		ce_bitpos = ce_bitpos - ECCW0_FLIP0_BITS;
751 		ecc0_flip1 = BIT(ce_bitpos);
752 		ecc1_flip1 = BIT(ce_bitpos);
753 		ecc0_flip0 = 0;
754 		ecc1_flip0 = 0;
755 	}
756 
757 	writel(ecc0_flip0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
758 	writel(ecc1_flip0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
759 	writel(ecc0_flip1, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
760 	writel(ecc1_flip1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
761 }
762 
763 /*
764  * To inject a correctable error, the following steps are needed:
765  *
766  * - Write the correctable error bit position value:
767  *	echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ce
768  *
769  * poison_setup() derives the row, column, bank, group and rank and
770  * writes to the ADEC registers based on the address given by the user.
771  *
772  * The ADEC12 and ADEC13 are mask registers; write 0 to make sure default
773  * configuration is there and no addresses are masked.
774  *
775  * The row, column, bank, group and rank registers are written to the
776  * match ADEC bit to generate errors at the particular address. ADEC14
777  * and ADEC15 have the match bits.
778  *
779  * xddr_inject_data_ce_store() updates the ECC FLIP registers with the
780  * bits to be corrupted based on the bit position given by the user.
781  *
782  * Upon doing a read to the address the errors are injected.
783  */
inject_data_ce_store(struct file * file,const char __user * data,size_t count,loff_t * ppos)784 static ssize_t inject_data_ce_store(struct file *file, const char __user *data,
785 				    size_t count, loff_t *ppos)
786 {
787 	struct device *dev = file->private_data;
788 	struct mem_ctl_info *mci = to_mci(dev);
789 	struct edac_priv *priv = mci->pvt_info;
790 	u8 ce_bitpos;
791 	int ret;
792 
793 	ret = kstrtou8_from_user(data, count, 0, &ce_bitpos);
794 	if (ret)
795 		return ret;
796 
797 	/* Unlock the PCSR registers */
798 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
799 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
800 
801 	poison_setup(priv);
802 
803 	xddr_inject_data_ce_store(mci, ce_bitpos);
804 	ret = count;
805 
806 	/* Lock the PCSR registers */
807 	writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
808 	writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
809 
810 	return ret;
811 }
812 
813 static const struct file_operations xddr_inject_ce_fops = {
814 	.open = simple_open,
815 	.write = inject_data_ce_store,
816 	.llseek = generic_file_llseek,
817 };
818 
xddr_inject_data_ue_store(struct mem_ctl_info * mci,u32 val0,u32 val1)819 static void xddr_inject_data_ue_store(struct mem_ctl_info *mci, u32 val0, u32 val1)
820 {
821 	struct edac_priv *priv = mci->pvt_info;
822 
823 	writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
824 	writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
825 	writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
826 	writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
827 }
828 
829 /*
830  * To inject an uncorrectable error, the following steps are needed:
831  *	echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ue
832  *
833  * poison_setup() derives the row, column, bank, group and rank and
834  * writes to the ADEC registers based on the address given by the user.
835  *
836  * The ADEC12 and ADEC13 are mask registers; write 0 so that none of the
837  * addresses are masked. The row, column, bank, group and rank registers
838  * are written to the match ADEC bit to generate errors at the
839  * particular address. ADEC14 and ADEC15 have the match bits.
840  *
841  * xddr_inject_data_ue_store() updates the ECC FLIP registers with the
842  * bits to be corrupted based on the bit position given by the user. For
843  * uncorrectable errors
844  * 2 bit errors are injected.
845  *
846  * Upon doing a read to the address the errors are injected.
847  */
inject_data_ue_store(struct file * file,const char __user * data,size_t count,loff_t * ppos)848 static ssize_t inject_data_ue_store(struct file *file, const char __user *data,
849 				    size_t count, loff_t *ppos)
850 {
851 	struct device *dev = file->private_data;
852 	struct mem_ctl_info *mci = to_mci(dev);
853 	struct edac_priv *priv = mci->pvt_info;
854 	char buf[6], *pbuf, *token[2];
855 	u32 val0 = 0, val1 = 0;
856 	u8 len, ue0, ue1;
857 	int i, ret;
858 
859 	len = min_t(size_t, count, sizeof(buf));
860 	if (copy_from_user(buf, data, len))
861 		return -EFAULT;
862 
863 	buf[len] = '\0';
864 	pbuf = &buf[0];
865 	for (i = 0; i < NUM_UE_BITPOS; i++)
866 		token[i] = strsep(&pbuf, ",");
867 
868 	if (!token[0] || !token[1])
869 		return -EFAULT;
870 
871 	ret = kstrtou8(token[0], 0, &ue0);
872 	if (ret)
873 		return ret;
874 
875 	ret = kstrtou8(token[1], 0, &ue1);
876 	if (ret)
877 		return ret;
878 
879 	if (ue0 < ECCW0_FLIP0_BITS) {
880 		val0 = BIT(ue0);
881 	} else {
882 		ue0 = ue0 - ECCW0_FLIP0_BITS;
883 		val1 = BIT(ue0);
884 	}
885 
886 	if (ue1 < ECCW0_FLIP0_BITS) {
887 		val0 |= BIT(ue1);
888 	} else {
889 		ue1 = ue1 - ECCW0_FLIP0_BITS;
890 		val1 |= BIT(ue1);
891 	}
892 
893 	/* Unlock the PCSR registers */
894 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
895 	writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
896 
897 	poison_setup(priv);
898 
899 	xddr_inject_data_ue_store(mci, val0, val1);
900 
901 	/* Lock the PCSR registers */
902 	writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
903 	writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
904 	return count;
905 }
906 
907 static const struct file_operations xddr_inject_ue_fops = {
908 	.open = simple_open,
909 	.write = inject_data_ue_store,
910 	.llseek = generic_file_llseek,
911 };
912 
create_debugfs_attributes(struct mem_ctl_info * mci)913 static void create_debugfs_attributes(struct mem_ctl_info *mci)
914 {
915 	struct edac_priv *priv = mci->pvt_info;
916 
917 	priv->debugfs = edac_debugfs_create_dir(mci->dev_name);
918 	if (!priv->debugfs)
919 		return;
920 
921 	if (!edac_debugfs_create_file("inject_ce", 0200, priv->debugfs,
922 				      &mci->dev, &xddr_inject_ce_fops)) {
923 		debugfs_remove_recursive(priv->debugfs);
924 		return;
925 	}
926 
927 	if (!edac_debugfs_create_file("inject_ue", 0200, priv->debugfs,
928 				      &mci->dev, &xddr_inject_ue_fops)) {
929 		debugfs_remove_recursive(priv->debugfs);
930 		return;
931 	}
932 	debugfs_create_x64("address", 0600, priv->debugfs,
933 			   &priv->err_inject_addr);
934 	mci->debugfs = priv->debugfs;
935 }
936 
process_bit(struct edac_priv * priv,unsigned int start,u32 regval)937 static inline void process_bit(struct edac_priv *priv, unsigned int start, u32 regval)
938 {
939 	union edac_info rows;
940 
941 	rows.i  = regval;
942 	priv->row_bit[start]	 = rows.row0;
943 	priv->row_bit[start + 1] = rows.row1;
944 	priv->row_bit[start + 2] = rows.row2;
945 	priv->row_bit[start + 3] = rows.row3;
946 	priv->row_bit[start + 4] = rows.row4;
947 }
948 
setup_row_address_map(struct edac_priv * priv)949 static void setup_row_address_map(struct edac_priv *priv)
950 {
951 	u32 regval;
952 	union edac_info rows;
953 
954 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC5_OFFSET);
955 	process_bit(priv, 0, regval);
956 
957 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC6_OFFSET);
958 	process_bit(priv, 5, regval);
959 
960 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC7_OFFSET);
961 	process_bit(priv, 10, regval);
962 
963 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
964 	rows.i  = regval;
965 
966 	priv->row_bit[15] = rows.row0;
967 	priv->row_bit[16] = rows.row1;
968 	priv->row_bit[17] = rows.row2;
969 }
970 
setup_column_address_map(struct edac_priv * priv)971 static void setup_column_address_map(struct edac_priv *priv)
972 {
973 	u32 regval;
974 	union edac_info cols;
975 
976 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
977 	priv->col_bit[0] = FIELD_GET(MASK_24, regval);
978 
979 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC9_OFFSET);
980 	cols.i  = regval;
981 	priv->col_bit[1] = cols.col1;
982 	priv->col_bit[2] = cols.col2;
983 	priv->col_bit[3] = cols.col3;
984 	priv->col_bit[4] = cols.col4;
985 	priv->col_bit[5] = cols.col5;
986 
987 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
988 	cols.i  = regval;
989 	priv->col_bit[6] = cols.col1;
990 	priv->col_bit[7] = cols.col2;
991 	priv->col_bit[8] = cols.col3;
992 	priv->col_bit[9] = cols.col4;
993 }
994 
setup_bank_grp_ch_address_map(struct edac_priv * priv)995 static void setup_bank_grp_ch_address_map(struct edac_priv *priv)
996 {
997 	u32 regval;
998 
999 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
1000 	priv->bank_bit[0] = FIELD_GET(MASK_24, regval);
1001 
1002 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC11_OFFSET);
1003 	priv->bank_bit[1] = (regval & MASK_0);
1004 	priv->grp_bit[0] = FIELD_GET(GRP_0_MASK, regval);
1005 	priv->grp_bit[1] = FIELD_GET(GRP_1_MASK, regval);
1006 	priv->ch_bit = FIELD_GET(CH_0_MASK, regval);
1007 }
1008 
setup_rank_lrank_address_map(struct edac_priv * priv)1009 static void setup_rank_lrank_address_map(struct edac_priv *priv)
1010 {
1011 	u32 regval;
1012 
1013 	regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC4_OFFSET);
1014 	priv->rank_bit[0] = (regval & MASK_0);
1015 	priv->rank_bit[1] = FIELD_GET(RANK_1_MASK, regval);
1016 	priv->lrank_bit[0] = FIELD_GET(LRANK_0_MASK, regval);
1017 	priv->lrank_bit[1] = FIELD_GET(LRANK_1_MASK, regval);
1018 	priv->lrank_bit[2] = FIELD_GET(MASK_24, regval);
1019 }
1020 
1021 /**
1022  * setup_address_map - Set Address Map by querying ADDRMAP registers.
1023  * @priv:	DDR memory controller private instance data.
1024  *
1025  * Set Address Map by querying ADDRMAP registers.
1026  *
1027  * Return: none.
1028  */
setup_address_map(struct edac_priv * priv)1029 static void setup_address_map(struct edac_priv *priv)
1030 {
1031 	setup_row_address_map(priv);
1032 
1033 	setup_column_address_map(priv);
1034 
1035 	setup_bank_grp_ch_address_map(priv);
1036 
1037 	setup_rank_lrank_address_map(priv);
1038 }
1039 #endif /* CONFIG_EDAC_DEBUG */
1040 
1041 static const struct of_device_id xlnx_edac_match[] = {
1042 	{ .compatible = "xlnx,versal-ddrmc", },
1043 	{
1044 		/* end of table */
1045 	}
1046 };
1047 
1048 MODULE_DEVICE_TABLE(of, xlnx_edac_match);
emif_get_id(struct device_node * node)1049 static u32 emif_get_id(struct device_node *node)
1050 {
1051 	u32 addr, my_addr, my_id = 0;
1052 	struct device_node *np;
1053 	const __be32 *addrp;
1054 
1055 	addrp = of_get_address(node, 0, NULL, NULL);
1056 	my_addr = (u32)of_translate_address(node, addrp);
1057 
1058 	for_each_matching_node(np, xlnx_edac_match) {
1059 		if (np == node)
1060 			continue;
1061 
1062 		addrp = of_get_address(np, 0, NULL, NULL);
1063 		addr = (u32)of_translate_address(np, addrp);
1064 
1065 		edac_printk(KERN_INFO, EDAC_MC,
1066 			    "addr=%x, my_addr=%x\n",
1067 			    addr, my_addr);
1068 
1069 		if (addr < my_addr)
1070 			my_id++;
1071 	}
1072 
1073 	return my_id;
1074 }
1075 
mc_probe(struct platform_device * pdev)1076 static int mc_probe(struct platform_device *pdev)
1077 {
1078 	void __iomem *ddrmc_baseaddr, *ddrmc_noc_baseaddr;
1079 	struct edac_mc_layer layers[2];
1080 	struct mem_ctl_info *mci;
1081 	u8 num_chans, num_csrows;
1082 	struct edac_priv *priv;
1083 	u32 edac_mc_id, regval;
1084 	int rc;
1085 
1086 	ddrmc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "base");
1087 	if (IS_ERR(ddrmc_baseaddr))
1088 		return PTR_ERR(ddrmc_baseaddr);
1089 
1090 	ddrmc_noc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "noc");
1091 	if (IS_ERR(ddrmc_noc_baseaddr))
1092 		return PTR_ERR(ddrmc_noc_baseaddr);
1093 
1094 	if (!get_ecc_state(ddrmc_baseaddr))
1095 		return -ENXIO;
1096 
1097 	/* Allocate ID number for the EMIF controller */
1098 	edac_mc_id = emif_get_id(pdev->dev.of_node);
1099 
1100 	regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
1101 	num_chans = FIELD_GET(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
1102 	num_chans++;
1103 
1104 	num_csrows = FIELD_GET(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
1105 	num_csrows *= 2;
1106 	if (!num_csrows)
1107 		num_csrows = 1;
1108 
1109 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1110 	layers[0].size = num_csrows;
1111 	layers[0].is_virt_csrow = true;
1112 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1113 	layers[1].size = num_chans;
1114 	layers[1].is_virt_csrow = false;
1115 
1116 	mci = edac_mc_alloc(edac_mc_id, ARRAY_SIZE(layers), layers,
1117 			    sizeof(struct edac_priv));
1118 	if (!mci) {
1119 		edac_printk(KERN_ERR, EDAC_MC,
1120 			    "Failed memory allocation for mc instance\n");
1121 		return -ENOMEM;
1122 	}
1123 
1124 	priv = mci->pvt_info;
1125 	priv->ddrmc_baseaddr = ddrmc_baseaddr;
1126 	priv->ddrmc_noc_baseaddr = ddrmc_noc_baseaddr;
1127 	priv->ce_cnt = 0;
1128 	priv->ue_cnt = 0;
1129 	priv->mc_id = edac_mc_id;
1130 
1131 	mc_init(mci, pdev);
1132 
1133 	rc = edac_mc_add_mc(mci);
1134 	if (rc) {
1135 		edac_printk(KERN_ERR, EDAC_MC,
1136 			    "Failed to register with EDAC core\n");
1137 		goto free_edac_mc;
1138 	}
1139 
1140 	rc = xlnx_register_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
1141 				 XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR,
1142 				 false, err_callback, mci);
1143 	if (rc) {
1144 		if (rc == -EACCES)
1145 			rc = -EPROBE_DEFER;
1146 
1147 		goto del_mc;
1148 	}
1149 
1150 #ifdef CONFIG_EDAC_DEBUG
1151 	create_debugfs_attributes(mci);
1152 	setup_address_map(priv);
1153 #endif
1154 	enable_intr(priv);
1155 	return rc;
1156 
1157 del_mc:
1158 	edac_mc_del_mc(&pdev->dev);
1159 free_edac_mc:
1160 	edac_mc_free(mci);
1161 
1162 	return rc;
1163 }
1164 
mc_remove(struct platform_device * pdev)1165 static void mc_remove(struct platform_device *pdev)
1166 {
1167 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1168 	struct edac_priv *priv = mci->pvt_info;
1169 
1170 	disable_intr(priv);
1171 
1172 #ifdef CONFIG_EDAC_DEBUG
1173 	debugfs_remove_recursive(priv->debugfs);
1174 #endif
1175 
1176 	xlnx_unregister_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
1177 			      XPM_EVENT_ERROR_MASK_DDRMC_CR |
1178 			      XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
1179 	edac_mc_del_mc(&pdev->dev);
1180 	edac_mc_free(mci);
1181 }
1182 
1183 static struct platform_driver xilinx_ddr_edac_mc_driver = {
1184 	.driver = {
1185 		.name = "xilinx-ddrmc-edac",
1186 		.of_match_table = xlnx_edac_match,
1187 	},
1188 	.probe = mc_probe,
1189 	.remove_new = mc_remove,
1190 };
1191 
1192 module_platform_driver(xilinx_ddr_edac_mc_driver);
1193 
1194 MODULE_AUTHOR("AMD Inc");
1195 MODULE_DESCRIPTION("Xilinx DDRMC ECC driver");
1196 MODULE_LICENSE("GPL");
1197