xref: /linux/drivers/edac/synopsys_edac.c (revision d7223aed30cd77be31dabd635e709828f3255366)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Synopsys DDR ECC Driver
4  * This driver is based on ppc4xx_edac.c drivers
5  *
6  * Copyright (C) 2012 - 2014 Xilinx, Inc.
7  */
8 
9 #include <linux/edac.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/spinlock.h>
13 #include <linux/sizes.h>
14 #include <linux/interrupt.h>
15 #include <linux/of.h>
16 
17 #include "edac_module.h"
18 
19 /* Number of cs_rows needed per memory controller */
20 #define SYNPS_EDAC_NR_CSROWS		1
21 
22 /* Number of channels per memory controller */
23 #define SYNPS_EDAC_NR_CHANS		1
24 
25 /* Granularity of reported error in bytes */
26 #define SYNPS_EDAC_ERR_GRAIN		1
27 
28 #define SYNPS_EDAC_MSG_SIZE		256
29 
30 #define SYNPS_EDAC_MOD_STRING		"synps_edac"
31 #define SYNPS_EDAC_MOD_VER		"1"
32 
33 /* Synopsys DDR memory controller registers that are relevant to ECC */
34 #define CTRL_OFST			0x0
35 #define T_ZQ_OFST			0xA4
36 
37 /* ECC control register */
38 #define ECC_CTRL_OFST			0xC4
39 /* ECC log register */
40 #define CE_LOG_OFST			0xC8
41 /* ECC address register */
42 #define CE_ADDR_OFST			0xCC
43 /* ECC data[31:0] register */
44 #define CE_DATA_31_0_OFST		0xD0
45 
46 /* Uncorrectable error info registers */
47 #define UE_LOG_OFST			0xDC
48 #define UE_ADDR_OFST			0xE0
49 #define UE_DATA_31_0_OFST		0xE4
50 
51 #define STAT_OFST			0xF0
52 #define SCRUB_OFST			0xF4
53 
54 /* Control register bit field definitions */
55 #define CTRL_BW_MASK			0xC
56 #define CTRL_BW_SHIFT			2
57 
58 #define DDRCTL_WDTH_16			1
59 #define DDRCTL_WDTH_32			0
60 
61 /* ZQ register bit field definitions */
62 #define T_ZQ_DDRMODE_MASK		0x2
63 
64 /* ECC control register bit field definitions */
65 #define ECC_CTRL_CLR_CE_ERR		0x2
66 #define ECC_CTRL_CLR_UE_ERR		0x1
67 
68 /* ECC correctable/uncorrectable error log register definitions */
69 #define LOG_VALID			0x1
70 #define CE_LOG_BITPOS_MASK		0xFE
71 #define CE_LOG_BITPOS_SHIFT		1
72 
73 /* ECC correctable/uncorrectable error address register definitions */
74 #define ADDR_COL_MASK			0xFFF
75 #define ADDR_ROW_MASK			0xFFFF000
76 #define ADDR_ROW_SHIFT			12
77 #define ADDR_BANK_MASK			0x70000000
78 #define ADDR_BANK_SHIFT			28
79 
80 /* ECC statistic register definitions */
81 #define STAT_UECNT_MASK			0xFF
82 #define STAT_CECNT_MASK			0xFF00
83 #define STAT_CECNT_SHIFT		8
84 
85 /* ECC scrub register definitions */
86 #define SCRUB_MODE_MASK			0x7
87 #define SCRUB_MODE_SECDED		0x4
88 
89 /* DDR ECC Quirks */
90 #define DDR_ECC_INTR_SUPPORT		BIT(0)
91 #define DDR_ECC_DATA_POISON_SUPPORT	BIT(1)
92 #define DDR_ECC_INTR_SELF_CLEAR		BIT(2)
93 
94 /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
95 /* ECC Configuration Registers */
96 #define ECC_CFG0_OFST			0x70
97 #define ECC_CFG1_OFST			0x74
98 
99 /* ECC Status Register */
100 #define ECC_STAT_OFST			0x78
101 
102 /* ECC Clear Register */
103 #define ECC_CLR_OFST			0x7C
104 
105 /* ECC Error count Register */
106 #define ECC_ERRCNT_OFST			0x80
107 
108 /* ECC Corrected Error Address Register */
109 #define ECC_CEADDR0_OFST		0x84
110 #define ECC_CEADDR1_OFST		0x88
111 
112 /* ECC Syndrome Registers */
113 #define ECC_CSYND0_OFST			0x8C
114 #define ECC_CSYND1_OFST			0x90
115 #define ECC_CSYND2_OFST			0x94
116 
117 /* ECC Bit Mask0 Address Register */
118 #define ECC_BITMASK0_OFST		0x98
119 #define ECC_BITMASK1_OFST		0x9C
120 #define ECC_BITMASK2_OFST		0xA0
121 
122 /* ECC UnCorrected Error Address Register */
123 #define ECC_UEADDR0_OFST		0xA4
124 #define ECC_UEADDR1_OFST		0xA8
125 
126 /* ECC Syndrome Registers */
127 #define ECC_UESYND0_OFST		0xAC
128 #define ECC_UESYND1_OFST		0xB0
129 #define ECC_UESYND2_OFST		0xB4
130 
131 /* ECC Poison Address Reg */
132 #define ECC_POISON0_OFST		0xB8
133 #define ECC_POISON1_OFST		0xBC
134 
135 #define ECC_ADDRMAP0_OFFSET		0x200
136 
137 /* Control register bitfield definitions */
138 #define ECC_CTRL_BUSWIDTH_MASK		0x3000
139 #define ECC_CTRL_BUSWIDTH_SHIFT		12
140 #define ECC_CTRL_CLR_CE_ERRCNT		BIT(2)
141 #define ECC_CTRL_CLR_UE_ERRCNT		BIT(3)
142 
143 /* DDR Control Register width definitions  */
144 #define DDRCTL_EWDTH_16			2
145 #define DDRCTL_EWDTH_32			1
146 #define DDRCTL_EWDTH_64			0
147 
148 /* ECC status register definitions */
149 #define ECC_STAT_UECNT_MASK		0xF0000
150 #define ECC_STAT_UECNT_SHIFT		16
151 #define ECC_STAT_CECNT_MASK		0xF00
152 #define ECC_STAT_CECNT_SHIFT		8
153 #define ECC_STAT_BITNUM_MASK		0x7F
154 
155 /* ECC error count register definitions */
156 #define ECC_ERRCNT_UECNT_MASK		0xFFFF0000
157 #define ECC_ERRCNT_UECNT_SHIFT		16
158 #define ECC_ERRCNT_CECNT_MASK		0xFFFF
159 
160 /* DDR QOS Interrupt register definitions */
161 #define DDR_QOS_IRQ_STAT_OFST		0x20200
162 #define DDR_QOSUE_MASK			0x4
163 #define	DDR_QOSCE_MASK			0x2
164 #define	ECC_CE_UE_INTR_MASK		0x6
165 #define DDR_QOS_IRQ_EN_OFST		0x20208
166 #define DDR_QOS_IRQ_DB_OFST		0x2020C
167 
168 /* DDR QOS Interrupt register definitions */
169 #define DDR_UE_MASK			BIT(9)
170 #define DDR_CE_MASK			BIT(8)
171 
172 /* ECC Corrected Error Register Mask and Shifts*/
173 #define ECC_CEADDR0_RW_MASK		0x3FFFF
174 #define ECC_CEADDR0_RNK_MASK		BIT(24)
175 #define ECC_CEADDR1_BNKGRP_MASK		0x3000000
176 #define ECC_CEADDR1_BNKNR_MASK		0x70000
177 #define ECC_CEADDR1_BLKNR_MASK		0xFFF
178 #define ECC_CEADDR1_BNKGRP_SHIFT	24
179 #define ECC_CEADDR1_BNKNR_SHIFT		16
180 
181 /* ECC Poison register shifts */
182 #define ECC_POISON0_RANK_SHIFT		24
183 #define ECC_POISON0_RANK_MASK		BIT(24)
184 #define ECC_POISON0_COLUMN_SHIFT	0
185 #define ECC_POISON0_COLUMN_MASK		0xFFF
186 #define ECC_POISON1_BG_SHIFT		28
187 #define ECC_POISON1_BG_MASK		0x30000000
188 #define ECC_POISON1_BANKNR_SHIFT	24
189 #define ECC_POISON1_BANKNR_MASK		0x7000000
190 #define ECC_POISON1_ROW_SHIFT		0
191 #define ECC_POISON1_ROW_MASK		0x3FFFF
192 
193 /* DDR Memory type defines */
194 #define MEM_TYPE_DDR3			0x1
195 #define MEM_TYPE_LPDDR3			0x8
196 #define MEM_TYPE_DDR2			0x4
197 #define MEM_TYPE_DDR4			0x10
198 #define MEM_TYPE_LPDDR4			0x20
199 
200 /* DDRC Software control register */
201 #define DDRC_SWCTL			0x320
202 
203 /* DDRC ECC CE & UE poison mask */
204 #define ECC_CEPOISON_MASK		0x3
205 #define ECC_UEPOISON_MASK		0x1
206 
207 /* DDRC Device config masks */
208 #define DDRC_MSTR_CFG_MASK		0xC0000000
209 #define DDRC_MSTR_CFG_SHIFT		30
210 #define DDRC_MSTR_CFG_X4_MASK		0x0
211 #define DDRC_MSTR_CFG_X8_MASK		0x1
212 #define DDRC_MSTR_CFG_X16_MASK		0x2
213 #define DDRC_MSTR_CFG_X32_MASK		0x3
214 
215 #define DDR_MAX_ROW_SHIFT		18
216 #define DDR_MAX_COL_SHIFT		14
217 #define DDR_MAX_BANK_SHIFT		3
218 #define DDR_MAX_BANKGRP_SHIFT		2
219 
220 #define ROW_MAX_VAL_MASK		0xF
221 #define COL_MAX_VAL_MASK		0xF
222 #define BANK_MAX_VAL_MASK		0x1F
223 #define BANKGRP_MAX_VAL_MASK		0x1F
224 #define RANK_MAX_VAL_MASK		0x1F
225 
226 #define ROW_B0_BASE			6
227 #define ROW_B1_BASE			7
228 #define ROW_B2_BASE			8
229 #define ROW_B3_BASE			9
230 #define ROW_B4_BASE			10
231 #define ROW_B5_BASE			11
232 #define ROW_B6_BASE			12
233 #define ROW_B7_BASE			13
234 #define ROW_B8_BASE			14
235 #define ROW_B9_BASE			15
236 #define ROW_B10_BASE			16
237 #define ROW_B11_BASE			17
238 #define ROW_B12_BASE			18
239 #define ROW_B13_BASE			19
240 #define ROW_B14_BASE			20
241 #define ROW_B15_BASE			21
242 #define ROW_B16_BASE			22
243 #define ROW_B17_BASE			23
244 
245 #define COL_B2_BASE			2
246 #define COL_B3_BASE			3
247 #define COL_B4_BASE			4
248 #define COL_B5_BASE			5
249 #define COL_B6_BASE			6
250 #define COL_B7_BASE			7
251 #define COL_B8_BASE			8
252 #define COL_B9_BASE			9
253 #define COL_B10_BASE			10
254 #define COL_B11_BASE			11
255 #define COL_B12_BASE			12
256 #define COL_B13_BASE			13
257 
258 #define BANK_B0_BASE			2
259 #define BANK_B1_BASE			3
260 #define BANK_B2_BASE			4
261 
262 #define BANKGRP_B0_BASE			2
263 #define BANKGRP_B1_BASE			3
264 
265 #define RANK_B0_BASE			6
266 
267 /**
268  * struct ecc_error_info - ECC error log information.
269  * @row:	Row number.
270  * @col:	Column number.
271  * @bank:	Bank number.
272  * @bitpos:	Bit position.
273  * @data:	Data causing the error.
274  * @bankgrpnr:	Bank group number.
275  * @blknr:	Block number.
276  */
277 struct ecc_error_info {
278 	u32 row;
279 	u32 col;
280 	u32 bank;
281 	u32 bitpos;
282 	u32 data;
283 	u32 bankgrpnr;
284 	u32 blknr;
285 };
286 
287 /**
288  * struct synps_ecc_status - ECC status information to report.
289  * @ce_cnt:	Correctable error count.
290  * @ue_cnt:	Uncorrectable error count.
291  * @ceinfo:	Correctable error log information.
292  * @ueinfo:	Uncorrectable error log information.
293  */
294 struct synps_ecc_status {
295 	u32 ce_cnt;
296 	u32 ue_cnt;
297 	struct ecc_error_info ceinfo;
298 	struct ecc_error_info ueinfo;
299 };
300 
301 /**
302  * struct synps_edac_priv - DDR memory controller private instance data.
303  * @baseaddr:		Base address of the DDR controller.
304  * @reglock:		Concurrent CSRs access lock.
305  * @message:		Buffer for framing the event specific info.
306  * @stat:		ECC status information.
307  * @p_data:		Platform data.
308  * @ce_cnt:		Correctable Error count.
309  * @ue_cnt:		Uncorrectable Error count.
310  * @poison_addr:	Data poison address.
311  * @row_shift:		Bit shifts for row bit.
312  * @col_shift:		Bit shifts for column bit.
313  * @bank_shift:		Bit shifts for bank bit.
314  * @bankgrp_shift:	Bit shifts for bank group bit.
315  * @rank_shift:		Bit shifts for rank bit.
316  */
317 struct synps_edac_priv {
318 	void __iomem *baseaddr;
319 	spinlock_t reglock;
320 	char message[SYNPS_EDAC_MSG_SIZE];
321 	struct synps_ecc_status stat;
322 	const struct synps_platform_data *p_data;
323 	u32 ce_cnt;
324 	u32 ue_cnt;
325 #ifdef CONFIG_EDAC_DEBUG
326 	ulong poison_addr;
327 	u32 row_shift[18];
328 	u32 col_shift[14];
329 	u32 bank_shift[3];
330 	u32 bankgrp_shift[2];
331 	u32 rank_shift[1];
332 #endif
333 };
334 
335 enum synps_platform_type {
336 	ZYNQ,
337 	ZYNQMP,
338 	SYNPS,
339 };
340 
341 /**
342  * struct synps_platform_data -  synps platform data structure.
343  * @platform:		Identifies the target hardware platform
344  * @get_error_info:	Get EDAC error info.
345  * @get_mtype:		Get mtype.
346  * @get_dtype:		Get dtype.
347  * @get_mem_info:	Get EDAC memory info
348  * @quirks:		To differentiate IPs.
349  */
350 struct synps_platform_data {
351 	enum synps_platform_type platform;
352 	int (*get_error_info)(struct synps_edac_priv *priv);
353 	enum mem_type (*get_mtype)(const void __iomem *base);
354 	enum dev_type (*get_dtype)(const void __iomem *base);
355 #ifdef CONFIG_EDAC_DEBUG
356 	u64 (*get_mem_info)(struct synps_edac_priv *priv);
357 #endif
358 	int quirks;
359 };
360 
361 /**
362  * zynq_get_error_info - Get the current ECC error info.
363  * @priv:	DDR memory controller private instance data.
364  *
365  * Return: one if there is no error, otherwise zero.
366  */
zynq_get_error_info(struct synps_edac_priv * priv)367 static int zynq_get_error_info(struct synps_edac_priv *priv)
368 {
369 	struct synps_ecc_status *p;
370 	u32 regval, clearval = 0;
371 	void __iomem *base;
372 
373 	base = priv->baseaddr;
374 	p = &priv->stat;
375 
376 	regval = readl(base + STAT_OFST);
377 	if (!regval)
378 		return 1;
379 
380 	p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT;
381 	p->ue_cnt = regval & STAT_UECNT_MASK;
382 
383 	regval = readl(base + CE_LOG_OFST);
384 	if (!(p->ce_cnt && (regval & LOG_VALID)))
385 		goto ue_err;
386 
387 	p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT;
388 	regval = readl(base + CE_ADDR_OFST);
389 	p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
390 	p->ceinfo.col = regval & ADDR_COL_MASK;
391 	p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
392 	p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
393 	edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
394 		 p->ceinfo.data);
395 	clearval = ECC_CTRL_CLR_CE_ERR;
396 
397 ue_err:
398 	regval = readl(base + UE_LOG_OFST);
399 	if (!(p->ue_cnt && (regval & LOG_VALID)))
400 		goto out;
401 
402 	regval = readl(base + UE_ADDR_OFST);
403 	p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT;
404 	p->ueinfo.col = regval & ADDR_COL_MASK;
405 	p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
406 	p->ueinfo.data = readl(base + UE_DATA_31_0_OFST);
407 	clearval |= ECC_CTRL_CLR_UE_ERR;
408 
409 out:
410 	writel(clearval, base + ECC_CTRL_OFST);
411 	writel(0x0, base + ECC_CTRL_OFST);
412 
413 	return 0;
414 }
415 
416 #ifdef CONFIG_EDAC_DEBUG
417 /**
418  * zynqmp_get_mem_info - Get the current memory info.
419  * @priv:	DDR memory controller private instance data.
420  *
421  * Return: host interface address.
422  */
zynqmp_get_mem_info(struct synps_edac_priv * priv)423 static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv)
424 {
425 	u64 hif_addr = 0, linear_addr;
426 
427 	linear_addr = priv->poison_addr;
428 	if (linear_addr >= SZ_32G)
429 		linear_addr = linear_addr - SZ_32G + SZ_2G;
430 	hif_addr = linear_addr >> 3;
431 	return hif_addr;
432 }
433 #endif
434 
435 /**
436  * zynqmp_get_error_info - Get the current ECC error info.
437  * @priv:	DDR memory controller private instance data.
438  *
439  * Return: one if there is no error otherwise returns zero.
440  */
zynqmp_get_error_info(struct synps_edac_priv * priv)441 static int zynqmp_get_error_info(struct synps_edac_priv *priv)
442 {
443 	struct synps_ecc_status *p;
444 	u32 regval, clearval;
445 	unsigned long flags;
446 	void __iomem *base;
447 
448 	base = priv->baseaddr;
449 	p = &priv->stat;
450 
451 	regval = readl(base + ECC_ERRCNT_OFST);
452 	p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
453 	p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
454 	if (!p->ce_cnt)
455 		goto ue_err;
456 
457 	regval = readl(base + ECC_STAT_OFST);
458 	if (!regval)
459 		return 1;
460 
461 	p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
462 
463 	regval = readl(base + ECC_CEADDR0_OFST);
464 	p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
465 	regval = readl(base + ECC_CEADDR1_OFST);
466 	p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
467 					ECC_CEADDR1_BNKNR_SHIFT;
468 	p->ceinfo.bankgrpnr = (regval &	ECC_CEADDR1_BNKGRP_MASK) >>
469 					ECC_CEADDR1_BNKGRP_SHIFT;
470 	p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
471 	p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
472 	edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
473 		 readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
474 		 readl(base + ECC_CSYND2_OFST));
475 ue_err:
476 	if (!p->ue_cnt)
477 		goto out;
478 
479 	regval = readl(base + ECC_UEADDR0_OFST);
480 	p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
481 	regval = readl(base + ECC_UEADDR1_OFST);
482 	p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
483 					ECC_CEADDR1_BNKGRP_SHIFT;
484 	p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
485 					ECC_CEADDR1_BNKNR_SHIFT;
486 	p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
487 	p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
488 out:
489 	spin_lock_irqsave(&priv->reglock, flags);
490 
491 	clearval = readl(base + ECC_CLR_OFST) |
492 		   ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
493 		   ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
494 	writel(clearval, base + ECC_CLR_OFST);
495 
496 	spin_unlock_irqrestore(&priv->reglock, flags);
497 
498 	return 0;
499 }
500 
501 /**
502  * handle_error - Handle Correctable and Uncorrectable errors.
503  * @mci:	EDAC memory controller instance.
504  * @p:		Synopsys ECC status structure.
505  *
506  * Handles ECC correctable and uncorrectable errors.
507  */
handle_error(struct mem_ctl_info * mci,struct synps_ecc_status * p)508 static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
509 {
510 	struct synps_edac_priv *priv = mci->pvt_info;
511 	struct ecc_error_info *pinf;
512 
513 	if (p->ce_cnt) {
514 		pinf = &p->ceinfo;
515 		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
516 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
517 				 "DDR ECC error type:%s Row %d Bank %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
518 				 "CE", pinf->row, pinf->bank,
519 				 pinf->bankgrpnr, pinf->blknr,
520 				 pinf->bitpos, pinf->data);
521 		} else {
522 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
523 				 "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
524 				 "CE", pinf->row, pinf->bank, pinf->col,
525 				 pinf->bitpos, pinf->data);
526 		}
527 
528 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
529 				     p->ce_cnt, 0, 0, 0, 0, 0, -1,
530 				     priv->message, "");
531 	}
532 
533 	if (p->ue_cnt) {
534 		pinf = &p->ueinfo;
535 		if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
536 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
537 				 "DDR ECC error type :%s Row %d Bank %d BankGroup Number %d Block Number %d",
538 				 "UE", pinf->row, pinf->bank,
539 				 pinf->bankgrpnr, pinf->blknr);
540 		} else {
541 			snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
542 				 "DDR ECC error type :%s Row %d Bank %d Col %d ",
543 				 "UE", pinf->row, pinf->bank, pinf->col);
544 		}
545 
546 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
547 				     p->ue_cnt, 0, 0, 0, 0, 0, -1,
548 				     priv->message, "");
549 	}
550 
551 	memset(p, 0, sizeof(*p));
552 }
553 
enable_intr(struct synps_edac_priv * priv)554 static void enable_intr(struct synps_edac_priv *priv)
555 {
556 	unsigned long flags;
557 
558 	/* Enable UE/CE Interrupts */
559 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
560 		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
561 		       priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
562 
563 		return;
564 	}
565 
566 	spin_lock_irqsave(&priv->reglock, flags);
567 
568 	writel(DDR_UE_MASK | DDR_CE_MASK,
569 	       priv->baseaddr + ECC_CLR_OFST);
570 
571 	spin_unlock_irqrestore(&priv->reglock, flags);
572 }
573 
disable_intr(struct synps_edac_priv * priv)574 static void disable_intr(struct synps_edac_priv *priv)
575 {
576 	unsigned long flags;
577 
578 	/* Disable UE/CE Interrupts */
579 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
580 		writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
581 		       priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
582 
583 		return;
584 	}
585 
586 	spin_lock_irqsave(&priv->reglock, flags);
587 
588 	writel(0, priv->baseaddr + ECC_CLR_OFST);
589 
590 	spin_unlock_irqrestore(&priv->reglock, flags);
591 }
592 
593 /**
594  * intr_handler - Interrupt Handler for ECC interrupts.
595  * @irq:        IRQ number.
596  * @dev_id:     Device ID.
597  *
598  * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
599  */
intr_handler(int irq,void * dev_id)600 static irqreturn_t intr_handler(int irq, void *dev_id)
601 {
602 	const struct synps_platform_data *p_data;
603 	struct mem_ctl_info *mci = dev_id;
604 	struct synps_edac_priv *priv;
605 	int status, regval;
606 
607 	priv = mci->pvt_info;
608 	p_data = priv->p_data;
609 
610 	/*
611 	 * v3.0 of the controller has the ce/ue bits cleared automatically,
612 	 * so this condition does not apply.
613 	 */
614 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
615 		regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
616 		regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
617 		if (!(regval & ECC_CE_UE_INTR_MASK))
618 			return IRQ_NONE;
619 	}
620 
621 	status = p_data->get_error_info(priv);
622 	if (status)
623 		return IRQ_NONE;
624 
625 	priv->ce_cnt += priv->stat.ce_cnt;
626 	priv->ue_cnt += priv->stat.ue_cnt;
627 	handle_error(mci, &priv->stat);
628 
629 	edac_dbg(3, "Total error count CE %d UE %d\n",
630 		 priv->ce_cnt, priv->ue_cnt);
631 	/* v3.0 of the controller does not have this register */
632 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
633 		writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
634 
635 	return IRQ_HANDLED;
636 }
637 
638 /**
639  * check_errors - Check controller for ECC errors.
640  * @mci:	EDAC memory controller instance.
641  *
642  * Check and post ECC errors. Called by the polling thread.
643  */
check_errors(struct mem_ctl_info * mci)644 static void check_errors(struct mem_ctl_info *mci)
645 {
646 	const struct synps_platform_data *p_data;
647 	struct synps_edac_priv *priv;
648 	int status;
649 
650 	priv = mci->pvt_info;
651 	p_data = priv->p_data;
652 
653 	status = p_data->get_error_info(priv);
654 	if (status)
655 		return;
656 
657 	priv->ce_cnt += priv->stat.ce_cnt;
658 	priv->ue_cnt += priv->stat.ue_cnt;
659 	handle_error(mci, &priv->stat);
660 
661 	edac_dbg(3, "Total error count CE %d UE %d\n",
662 		 priv->ce_cnt, priv->ue_cnt);
663 }
664 
665 /**
666  * zynq_get_dtype - Return the controller memory width.
667  * @base:	DDR memory controller base address.
668  *
669  * Get the EDAC device type width appropriate for the current controller
670  * configuration.
671  *
672  * Return: a device type width enumeration.
673  */
zynq_get_dtype(const void __iomem * base)674 static enum dev_type zynq_get_dtype(const void __iomem *base)
675 {
676 	enum dev_type dt;
677 	u32 width;
678 
679 	width = readl(base + CTRL_OFST);
680 	width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT;
681 
682 	switch (width) {
683 	case DDRCTL_WDTH_16:
684 		dt = DEV_X2;
685 		break;
686 	case DDRCTL_WDTH_32:
687 		dt = DEV_X4;
688 		break;
689 	default:
690 		dt = DEV_UNKNOWN;
691 	}
692 
693 	return dt;
694 }
695 
696 /**
697  * zynqmp_get_dtype - Return the controller memory width.
698  * @base:	DDR memory controller base address.
699  *
700  * Get the EDAC device type width appropriate for the current controller
701  * configuration.
702  *
703  * Return: a device type width enumeration.
704  */
zynqmp_get_dtype(const void __iomem * base)705 static enum dev_type zynqmp_get_dtype(const void __iomem *base)
706 {
707 	enum dev_type dt;
708 	u32 width;
709 
710 	width = readl(base + CTRL_OFST);
711 	width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
712 	switch (width) {
713 	case DDRCTL_EWDTH_16:
714 		dt = DEV_X2;
715 		break;
716 	case DDRCTL_EWDTH_32:
717 		dt = DEV_X4;
718 		break;
719 	case DDRCTL_EWDTH_64:
720 		dt = DEV_X8;
721 		break;
722 	default:
723 		dt = DEV_UNKNOWN;
724 	}
725 
726 	return dt;
727 }
728 
get_ecc_state(struct synps_edac_priv * priv)729 static bool get_ecc_state(struct synps_edac_priv *priv)
730 {
731 	u32 ecctype, clearval;
732 	enum dev_type dt;
733 
734 	if (priv->p_data->platform == ZYNQ) {
735 		dt = zynq_get_dtype(priv->baseaddr);
736 		if (dt == DEV_UNKNOWN)
737 			return false;
738 
739 		ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK;
740 		if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) {
741 			clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR;
742 			writel(clearval, priv->baseaddr + ECC_CTRL_OFST);
743 			writel(0x0, priv->baseaddr + ECC_CTRL_OFST);
744 			return true;
745 		}
746 	} else {
747 		dt = zynqmp_get_dtype(priv->baseaddr);
748 		if (dt == DEV_UNKNOWN)
749 			return false;
750 
751 		ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
752 		if (ecctype == SCRUB_MODE_SECDED &&
753 		    (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) {
754 			clearval = readl(priv->baseaddr + ECC_CLR_OFST) |
755 			ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
756 			ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
757 			writel(clearval, priv->baseaddr + ECC_CLR_OFST);
758 			return true;
759 		}
760 	}
761 
762 	return false;
763 }
764 
765 /**
766  * get_memsize - Read the size of the attached memory device.
767  *
768  * Return: the memory size in bytes.
769  */
get_memsize(void)770 static u32 get_memsize(void)
771 {
772 	struct sysinfo inf;
773 
774 	si_meminfo(&inf);
775 
776 	return inf.totalram * inf.mem_unit;
777 }
778 
779 /**
780  * zynq_get_mtype - Return the controller memory type.
781  * @base:	Synopsys ECC status structure.
782  *
783  * Get the EDAC memory type appropriate for the current controller
784  * configuration.
785  *
786  * Return: a memory type enumeration.
787  */
zynq_get_mtype(const void __iomem * base)788 static enum mem_type zynq_get_mtype(const void __iomem *base)
789 {
790 	enum mem_type mt;
791 	u32 memtype;
792 
793 	memtype = readl(base + T_ZQ_OFST);
794 
795 	if (memtype & T_ZQ_DDRMODE_MASK)
796 		mt = MEM_DDR3;
797 	else
798 		mt = MEM_DDR2;
799 
800 	return mt;
801 }
802 
803 /**
804  * zynqmp_get_mtype - Returns controller memory type.
805  * @base:	Synopsys ECC status structure.
806  *
807  * Get the EDAC memory type appropriate for the current controller
808  * configuration.
809  *
810  * Return: a memory type enumeration.
811  */
zynqmp_get_mtype(const void __iomem * base)812 static enum mem_type zynqmp_get_mtype(const void __iomem *base)
813 {
814 	enum mem_type mt;
815 	u32 memtype;
816 
817 	memtype = readl(base + CTRL_OFST);
818 
819 	if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
820 		mt = MEM_DDR3;
821 	else if (memtype & MEM_TYPE_DDR2)
822 		mt = MEM_RDDR2;
823 	else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
824 		mt = MEM_DDR4;
825 	else
826 		mt = MEM_EMPTY;
827 
828 	return mt;
829 }
830 
831 /**
832  * init_csrows - Initialize the csrow data.
833  * @mci:	EDAC memory controller instance.
834  *
835  * Initialize the chip select rows associated with the EDAC memory
836  * controller instance.
837  */
init_csrows(struct mem_ctl_info * mci)838 static void init_csrows(struct mem_ctl_info *mci)
839 {
840 	struct synps_edac_priv *priv = mci->pvt_info;
841 	const struct synps_platform_data *p_data;
842 	struct csrow_info *csi;
843 	struct dimm_info *dimm;
844 	u32 size, row;
845 	int j;
846 
847 	p_data = priv->p_data;
848 
849 	for (row = 0; row < mci->nr_csrows; row++) {
850 		csi = mci->csrows[row];
851 		size = get_memsize();
852 
853 		for (j = 0; j < csi->nr_channels; j++) {
854 			dimm		= csi->channels[j]->dimm;
855 			dimm->edac_mode	= EDAC_SECDED;
856 			dimm->mtype	= p_data->get_mtype(priv->baseaddr);
857 			dimm->nr_pages	= (size >> PAGE_SHIFT) / csi->nr_channels;
858 			dimm->grain	= SYNPS_EDAC_ERR_GRAIN;
859 			dimm->dtype	= p_data->get_dtype(priv->baseaddr);
860 		}
861 	}
862 }
863 
864 /**
865  * mc_init - Initialize one driver instance.
866  * @mci:	EDAC memory controller instance.
867  * @pdev:	platform device.
868  *
869  * Perform initialization of the EDAC memory controller instance and
870  * related driver-private data associated with the memory controller the
871  * instance is bound to.
872  */
mc_init(struct mem_ctl_info * mci,struct platform_device * pdev)873 static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
874 {
875 	struct synps_edac_priv *priv;
876 
877 	mci->pdev = &pdev->dev;
878 	priv = mci->pvt_info;
879 	platform_set_drvdata(pdev, mci);
880 
881 	/* Initialize controller capabilities and configuration */
882 	mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
883 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
884 	mci->scrub_cap = SCRUB_HW_SRC;
885 	mci->scrub_mode = SCRUB_NONE;
886 
887 	mci->edac_cap = EDAC_FLAG_SECDED;
888 	mci->ctl_name = "synps_ddr_controller";
889 	mci->dev_name = SYNPS_EDAC_MOD_STRING;
890 	mci->mod_name = SYNPS_EDAC_MOD_VER;
891 
892 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
893 		edac_op_state = EDAC_OPSTATE_INT;
894 	} else {
895 		edac_op_state = EDAC_OPSTATE_POLL;
896 		mci->edac_check = check_errors;
897 	}
898 
899 	mci->ctl_page_to_phys = NULL;
900 
901 	init_csrows(mci);
902 }
903 
setup_irq(struct mem_ctl_info * mci,struct platform_device * pdev)904 static int setup_irq(struct mem_ctl_info *mci,
905 		     struct platform_device *pdev)
906 {
907 	struct synps_edac_priv *priv = mci->pvt_info;
908 	int ret, irq;
909 
910 	irq = platform_get_irq(pdev, 0);
911 	if (irq < 0) {
912 		edac_printk(KERN_ERR, EDAC_MC,
913 			    "No IRQ %d in DT\n", irq);
914 		return irq;
915 	}
916 
917 	ret = devm_request_irq(&pdev->dev, irq, intr_handler,
918 			       0, dev_name(&pdev->dev), mci);
919 	if (ret < 0) {
920 		edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
921 		return ret;
922 	}
923 
924 	enable_intr(priv);
925 
926 	return 0;
927 }
928 
929 static const struct synps_platform_data zynq_edac_def = {
930 	.platform = ZYNQ,
931 	.get_error_info	= zynq_get_error_info,
932 	.get_mtype	= zynq_get_mtype,
933 	.get_dtype	= zynq_get_dtype,
934 	.quirks		= 0,
935 };
936 
937 static const struct synps_platform_data zynqmp_edac_def = {
938 	.platform = ZYNQMP,
939 	.get_error_info	= zynqmp_get_error_info,
940 	.get_mtype	= zynqmp_get_mtype,
941 	.get_dtype	= zynqmp_get_dtype,
942 #ifdef CONFIG_EDAC_DEBUG
943 	.get_mem_info	= zynqmp_get_mem_info,
944 #endif
945 	.quirks         = (DDR_ECC_INTR_SUPPORT
946 #ifdef CONFIG_EDAC_DEBUG
947 			  | DDR_ECC_DATA_POISON_SUPPORT
948 #endif
949 			  ),
950 };
951 
952 static const struct synps_platform_data synopsys_edac_def = {
953 	.platform = SYNPS,
954 	.get_error_info	= zynqmp_get_error_info,
955 	.get_mtype	= zynqmp_get_mtype,
956 	.get_dtype	= zynqmp_get_dtype,
957 	.quirks         = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
958 #ifdef CONFIG_EDAC_DEBUG
959 			  | DDR_ECC_DATA_POISON_SUPPORT
960 #endif
961 			  ),
962 };
963 
964 
965 static const struct of_device_id synps_edac_match[] = {
966 	{
967 		.compatible = "xlnx,zynq-ddrc-a05",
968 		.data = (void *)&zynq_edac_def
969 	},
970 	{
971 		.compatible = "xlnx,zynqmp-ddrc-2.40a",
972 		.data = (void *)&zynqmp_edac_def
973 	},
974 	{
975 		.compatible = "snps,ddrc-3.80a",
976 		.data = (void *)&synopsys_edac_def
977 	},
978 	{
979 		/* end of table */
980 	}
981 };
982 
983 MODULE_DEVICE_TABLE(of, synps_edac_match);
984 
985 #ifdef CONFIG_EDAC_DEBUG
986 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
987 
988 /**
989  * ddr_poison_setup -	Update poison registers.
990  * @priv:		DDR memory controller private instance data.
991  *
992  * Update poison registers as per DDR mapping.
993  * Return: none.
994  */
ddr_poison_setup(struct synps_edac_priv * priv)995 static void ddr_poison_setup(struct synps_edac_priv *priv)
996 {
997 	int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
998 	const struct synps_platform_data *p_data;
999 	int index;
1000 	ulong hif_addr = 0;
1001 
1002 	p_data = priv->p_data;
1003 
1004 	if (p_data->get_mem_info)
1005 		hif_addr = p_data->get_mem_info(priv);
1006 	else
1007 		hif_addr = priv->poison_addr >> 3;
1008 
1009 	for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
1010 		if (priv->row_shift[index])
1011 			row |= (((hif_addr >> priv->row_shift[index]) &
1012 						BIT(0)) << index);
1013 		else
1014 			break;
1015 	}
1016 
1017 	for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
1018 		if (priv->col_shift[index] || index < 3)
1019 			col |= (((hif_addr >> priv->col_shift[index]) &
1020 						BIT(0)) << index);
1021 		else
1022 			break;
1023 	}
1024 
1025 	for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
1026 		if (priv->bank_shift[index])
1027 			bank |= (((hif_addr >> priv->bank_shift[index]) &
1028 						BIT(0)) << index);
1029 		else
1030 			break;
1031 	}
1032 
1033 	for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
1034 		if (priv->bankgrp_shift[index])
1035 			bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
1036 						& BIT(0)) << index);
1037 		else
1038 			break;
1039 	}
1040 
1041 	if (priv->rank_shift[0])
1042 		rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
1043 
1044 	regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
1045 	regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
1046 	writel(regval, priv->baseaddr + ECC_POISON0_OFST);
1047 
1048 	regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
1049 	regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
1050 	regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
1051 	writel(regval, priv->baseaddr + ECC_POISON1_OFST);
1052 }
1053 
inject_data_error_show(struct device * dev,struct device_attribute * mattr,char * data)1054 static ssize_t inject_data_error_show(struct device *dev,
1055 				      struct device_attribute *mattr,
1056 				      char *data)
1057 {
1058 	struct mem_ctl_info *mci = to_mci(dev);
1059 	struct synps_edac_priv *priv = mci->pvt_info;
1060 
1061 	return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
1062 			"Error injection Address: 0x%lx\n\r",
1063 			readl(priv->baseaddr + ECC_POISON0_OFST),
1064 			readl(priv->baseaddr + ECC_POISON1_OFST),
1065 			priv->poison_addr);
1066 }
1067 
inject_data_error_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)1068 static ssize_t inject_data_error_store(struct device *dev,
1069 				       struct device_attribute *mattr,
1070 				       const char *data, size_t count)
1071 {
1072 	struct mem_ctl_info *mci = to_mci(dev);
1073 	struct synps_edac_priv *priv = mci->pvt_info;
1074 
1075 	if (kstrtoul(data, 0, &priv->poison_addr))
1076 		return -EINVAL;
1077 
1078 	ddr_poison_setup(priv);
1079 
1080 	return count;
1081 }
1082 
inject_data_poison_show(struct device * dev,struct device_attribute * mattr,char * data)1083 static ssize_t inject_data_poison_show(struct device *dev,
1084 				       struct device_attribute *mattr,
1085 				       char *data)
1086 {
1087 	struct mem_ctl_info *mci = to_mci(dev);
1088 	struct synps_edac_priv *priv = mci->pvt_info;
1089 
1090 	return sprintf(data, "Data Poisoning: %s\n\r",
1091 			(((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
1092 			? ("Correctable Error") : ("UnCorrectable Error"));
1093 }
1094 
inject_data_poison_store(struct device * dev,struct device_attribute * mattr,const char * data,size_t count)1095 static ssize_t inject_data_poison_store(struct device *dev,
1096 					struct device_attribute *mattr,
1097 					const char *data, size_t count)
1098 {
1099 	struct mem_ctl_info *mci = to_mci(dev);
1100 	struct synps_edac_priv *priv = mci->pvt_info;
1101 
1102 	writel(0, priv->baseaddr + DDRC_SWCTL);
1103 	if (strncmp(data, "CE", 2) == 0)
1104 		writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1105 	else
1106 		writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
1107 	writel(1, priv->baseaddr + DDRC_SWCTL);
1108 
1109 	return count;
1110 }
1111 
1112 static DEVICE_ATTR_RW(inject_data_error);
1113 static DEVICE_ATTR_RW(inject_data_poison);
1114 
edac_create_sysfs_attributes(struct mem_ctl_info * mci)1115 static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
1116 {
1117 	int rc;
1118 
1119 	rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
1120 	if (rc < 0)
1121 		return rc;
1122 	rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
1123 	if (rc < 0)
1124 		return rc;
1125 	return 0;
1126 }
1127 
edac_remove_sysfs_attributes(struct mem_ctl_info * mci)1128 static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
1129 {
1130 	device_remove_file(&mci->dev, &dev_attr_inject_data_error);
1131 	device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
1132 }
1133 
setup_row_address_map(struct synps_edac_priv * priv,u32 * addrmap)1134 static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1135 {
1136 	u32 addrmap_row_b2_10;
1137 	int index;
1138 
1139 	priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
1140 	priv->row_shift[1] = ((addrmap[5] >> 8) &
1141 			ROW_MAX_VAL_MASK) + ROW_B1_BASE;
1142 
1143 	addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
1144 	if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
1145 		for (index = 2; index < 11; index++)
1146 			priv->row_shift[index] = addrmap_row_b2_10 +
1147 				index + ROW_B0_BASE;
1148 
1149 	} else {
1150 		priv->row_shift[2] = (addrmap[9] &
1151 				ROW_MAX_VAL_MASK) + ROW_B2_BASE;
1152 		priv->row_shift[3] = ((addrmap[9] >> 8) &
1153 				ROW_MAX_VAL_MASK) + ROW_B3_BASE;
1154 		priv->row_shift[4] = ((addrmap[9] >> 16) &
1155 				ROW_MAX_VAL_MASK) + ROW_B4_BASE;
1156 		priv->row_shift[5] = ((addrmap[9] >> 24) &
1157 				ROW_MAX_VAL_MASK) + ROW_B5_BASE;
1158 		priv->row_shift[6] = (addrmap[10] &
1159 				ROW_MAX_VAL_MASK) + ROW_B6_BASE;
1160 		priv->row_shift[7] = ((addrmap[10] >> 8) &
1161 				ROW_MAX_VAL_MASK) + ROW_B7_BASE;
1162 		priv->row_shift[8] = ((addrmap[10] >> 16) &
1163 				ROW_MAX_VAL_MASK) + ROW_B8_BASE;
1164 		priv->row_shift[9] = ((addrmap[10] >> 24) &
1165 				ROW_MAX_VAL_MASK) + ROW_B9_BASE;
1166 		priv->row_shift[10] = (addrmap[11] &
1167 				ROW_MAX_VAL_MASK) + ROW_B10_BASE;
1168 	}
1169 
1170 	priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
1171 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
1172 				ROW_MAX_VAL_MASK) + ROW_B11_BASE);
1173 	priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
1174 				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
1175 				ROW_MAX_VAL_MASK) + ROW_B12_BASE);
1176 	priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
1177 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
1178 				ROW_MAX_VAL_MASK) + ROW_B13_BASE);
1179 	priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
1180 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
1181 				ROW_MAX_VAL_MASK) + ROW_B14_BASE);
1182 	priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
1183 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
1184 				ROW_MAX_VAL_MASK) + ROW_B15_BASE);
1185 	priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
1186 				ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
1187 				ROW_MAX_VAL_MASK) + ROW_B16_BASE);
1188 	priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
1189 				ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
1190 				ROW_MAX_VAL_MASK) + ROW_B17_BASE);
1191 }
1192 
setup_column_address_map(struct synps_edac_priv * priv,u32 * addrmap)1193 static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1194 {
1195 	u32 width, memtype;
1196 	int index;
1197 
1198 	memtype = readl(priv->baseaddr + CTRL_OFST);
1199 	width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
1200 
1201 	priv->col_shift[0] = 0;
1202 	priv->col_shift[1] = 1;
1203 	priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
1204 	priv->col_shift[3] = ((addrmap[2] >> 8) &
1205 			COL_MAX_VAL_MASK) + COL_B3_BASE;
1206 	priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
1207 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
1208 					COL_MAX_VAL_MASK) + COL_B4_BASE);
1209 	priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
1210 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
1211 					COL_MAX_VAL_MASK) + COL_B5_BASE);
1212 	priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
1213 			COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
1214 					COL_MAX_VAL_MASK) + COL_B6_BASE);
1215 	priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
1216 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
1217 					COL_MAX_VAL_MASK) + COL_B7_BASE);
1218 	priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
1219 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
1220 					COL_MAX_VAL_MASK) + COL_B8_BASE);
1221 	priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
1222 			COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
1223 					COL_MAX_VAL_MASK) + COL_B9_BASE);
1224 	if (width == DDRCTL_EWDTH_64) {
1225 		if (memtype & MEM_TYPE_LPDDR3) {
1226 			priv->col_shift[10] = ((addrmap[4] &
1227 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1228 				((addrmap[4] & COL_MAX_VAL_MASK) +
1229 				 COL_B10_BASE);
1230 			priv->col_shift[11] = (((addrmap[4] >> 8) &
1231 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1232 				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1233 				 COL_B11_BASE);
1234 		} else {
1235 			priv->col_shift[11] = ((addrmap[4] &
1236 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1237 				((addrmap[4] & COL_MAX_VAL_MASK) +
1238 				 COL_B10_BASE);
1239 			priv->col_shift[13] = (((addrmap[4] >> 8) &
1240 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1241 				(((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
1242 				 COL_B11_BASE);
1243 		}
1244 	} else if (width == DDRCTL_EWDTH_32) {
1245 		if (memtype & MEM_TYPE_LPDDR3) {
1246 			priv->col_shift[10] = (((addrmap[3] >> 24) &
1247 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1248 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1249 				 COL_B9_BASE);
1250 			priv->col_shift[11] = ((addrmap[4] &
1251 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1252 				((addrmap[4] & COL_MAX_VAL_MASK) +
1253 				 COL_B10_BASE);
1254 		} else {
1255 			priv->col_shift[11] = (((addrmap[3] >> 24) &
1256 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1257 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1258 				 COL_B9_BASE);
1259 			priv->col_shift[13] = ((addrmap[4] &
1260 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1261 				((addrmap[4] & COL_MAX_VAL_MASK) +
1262 				 COL_B10_BASE);
1263 		}
1264 	} else {
1265 		if (memtype & MEM_TYPE_LPDDR3) {
1266 			priv->col_shift[10] = (((addrmap[3] >> 16) &
1267 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1268 				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1269 				 COL_B8_BASE);
1270 			priv->col_shift[11] = (((addrmap[3] >> 24) &
1271 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1272 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1273 				 COL_B9_BASE);
1274 			priv->col_shift[13] = ((addrmap[4] &
1275 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1276 				((addrmap[4] & COL_MAX_VAL_MASK) +
1277 				 COL_B10_BASE);
1278 		} else {
1279 			priv->col_shift[11] = (((addrmap[3] >> 16) &
1280 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1281 				(((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
1282 				 COL_B8_BASE);
1283 			priv->col_shift[13] = (((addrmap[3] >> 24) &
1284 				COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
1285 				(((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
1286 				 COL_B9_BASE);
1287 		}
1288 	}
1289 
1290 	if (width) {
1291 		for (index = 9; index > width; index--) {
1292 			priv->col_shift[index] = priv->col_shift[index - width];
1293 			priv->col_shift[index - width] = 0;
1294 		}
1295 	}
1296 
1297 }
1298 
setup_bank_address_map(struct synps_edac_priv * priv,u32 * addrmap)1299 static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1300 {
1301 	priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
1302 	priv->bank_shift[1] = ((addrmap[1] >> 8) &
1303 				BANK_MAX_VAL_MASK) + BANK_B1_BASE;
1304 	priv->bank_shift[2] = (((addrmap[1] >> 16) &
1305 				BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
1306 				(((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
1307 				 BANK_B2_BASE);
1308 
1309 }
1310 
setup_bg_address_map(struct synps_edac_priv * priv,u32 * addrmap)1311 static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1312 {
1313 	priv->bankgrp_shift[0] = (addrmap[8] &
1314 				BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
1315 	priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
1316 				BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
1317 				& BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
1318 
1319 }
1320 
setup_rank_address_map(struct synps_edac_priv * priv,u32 * addrmap)1321 static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
1322 {
1323 	priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
1324 				RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
1325 				RANK_MAX_VAL_MASK) + RANK_B0_BASE);
1326 }
1327 
1328 /**
1329  * setup_address_map -	Set Address Map by querying ADDRMAP registers.
1330  * @priv:		DDR memory controller private instance data.
1331  *
1332  * Set Address Map by querying ADDRMAP registers.
1333  *
1334  * Return: none.
1335  */
setup_address_map(struct synps_edac_priv * priv)1336 static void setup_address_map(struct synps_edac_priv *priv)
1337 {
1338 	u32 addrmap[12];
1339 	int index;
1340 
1341 	for (index = 0; index < 12; index++) {
1342 		u32 addrmap_offset;
1343 
1344 		addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
1345 		addrmap[index] = readl(priv->baseaddr + addrmap_offset);
1346 	}
1347 
1348 	setup_row_address_map(priv, addrmap);
1349 
1350 	setup_column_address_map(priv, addrmap);
1351 
1352 	setup_bank_address_map(priv, addrmap);
1353 
1354 	setup_bg_address_map(priv, addrmap);
1355 
1356 	setup_rank_address_map(priv, addrmap);
1357 }
1358 #endif /* CONFIG_EDAC_DEBUG */
1359 
1360 /**
1361  * mc_probe - Check controller and bind driver.
1362  * @pdev:	platform device.
1363  *
1364  * Probe a specific controller instance for binding with the driver.
1365  *
1366  * Return: 0 if the controller instance was successfully bound to the
1367  * driver; otherwise, < 0 on error.
1368  */
mc_probe(struct platform_device * pdev)1369 static int mc_probe(struct platform_device *pdev)
1370 {
1371 	const struct synps_platform_data *p_data;
1372 	struct edac_mc_layer layers[2];
1373 	struct synps_edac_priv *priv;
1374 	struct mem_ctl_info *mci;
1375 	void __iomem *baseaddr;
1376 	int rc;
1377 
1378 	baseaddr = devm_platform_ioremap_resource(pdev, 0);
1379 	if (IS_ERR(baseaddr))
1380 		return PTR_ERR(baseaddr);
1381 
1382 	p_data = of_device_get_match_data(&pdev->dev);
1383 	if (!p_data)
1384 		return -ENODEV;
1385 
1386 
1387 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1388 	layers[0].size = SYNPS_EDAC_NR_CSROWS;
1389 	layers[0].is_virt_csrow = true;
1390 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1391 	layers[1].size = SYNPS_EDAC_NR_CHANS;
1392 	layers[1].is_virt_csrow = false;
1393 
1394 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1395 			    sizeof(struct synps_edac_priv));
1396 	if (!mci) {
1397 		edac_printk(KERN_ERR, EDAC_MC,
1398 			    "Failed memory allocation for mc instance\n");
1399 		return -ENOMEM;
1400 	}
1401 
1402 	priv = mci->pvt_info;
1403 	priv->baseaddr = baseaddr;
1404 	priv->p_data = p_data;
1405 	if (!get_ecc_state(priv)) {
1406 		edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
1407 		rc = -ENODEV;
1408 		goto free_edac_mc;
1409 	}
1410 
1411 	spin_lock_init(&priv->reglock);
1412 
1413 	mc_init(mci, pdev);
1414 
1415 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
1416 		rc = setup_irq(mci, pdev);
1417 		if (rc)
1418 			goto free_edac_mc;
1419 	}
1420 
1421 	rc = edac_mc_add_mc(mci);
1422 	if (rc) {
1423 		edac_printk(KERN_ERR, EDAC_MC,
1424 			    "Failed to register with EDAC core\n");
1425 		goto free_edac_mc;
1426 	}
1427 
1428 #ifdef CONFIG_EDAC_DEBUG
1429 	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
1430 		rc = edac_create_sysfs_attributes(mci);
1431 		if (rc) {
1432 			edac_printk(KERN_ERR, EDAC_MC,
1433 					"Failed to create sysfs entries\n");
1434 			goto free_edac_mc;
1435 		}
1436 	}
1437 
1438 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1439 		setup_address_map(priv);
1440 #endif
1441 
1442 	/*
1443 	 * Start capturing the correctable and uncorrectable errors. A write of
1444 	 * 0 starts the counters.
1445 	 */
1446 	if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
1447 		writel(0x0, baseaddr + ECC_CTRL_OFST);
1448 
1449 	return rc;
1450 
1451 free_edac_mc:
1452 	edac_mc_free(mci);
1453 
1454 	return rc;
1455 }
1456 
1457 /**
1458  * mc_remove - Unbind driver from controller.
1459  * @pdev:	Platform device.
1460  *
1461  * Return: Unconditionally 0
1462  */
mc_remove(struct platform_device * pdev)1463 static void mc_remove(struct platform_device *pdev)
1464 {
1465 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1466 	struct synps_edac_priv *priv = mci->pvt_info;
1467 
1468 	if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
1469 		disable_intr(priv);
1470 
1471 #ifdef CONFIG_EDAC_DEBUG
1472 	if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
1473 		edac_remove_sysfs_attributes(mci);
1474 #endif
1475 
1476 	edac_mc_del_mc(&pdev->dev);
1477 	edac_mc_free(mci);
1478 }
1479 
1480 static struct platform_driver synps_edac_mc_driver = {
1481 	.driver = {
1482 		   .name = "synopsys-edac",
1483 		   .of_match_table = synps_edac_match,
1484 		   },
1485 	.probe = mc_probe,
1486 	.remove = mc_remove,
1487 };
1488 
1489 module_platform_driver(synps_edac_mc_driver);
1490 
1491 MODULE_AUTHOR("Xilinx Inc");
1492 MODULE_DESCRIPTION("Synopsys DDR ECC driver");
1493 MODULE_LICENSE("GPL v2");
1494