xref: /linux/drivers/edac/highbank_mc_edac.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2011-2012 Calxeda, Inc.
4  */
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/ctype.h>
8 #include <linux/edac.h>
9 #include <linux/interrupt.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/platform_device.h>
13 #include <linux/uaccess.h>
14 
15 #include "edac_module.h"
16 
17 /* DDR Ctrlr Error Registers */
18 
19 #define HB_DDR_ECC_ERR_BASE		0x128
20 #define MW_DDR_ECC_ERR_BASE		0x1b4
21 
22 #define HB_DDR_ECC_OPT			0x00
23 #define HB_DDR_ECC_U_ERR_ADDR		0x08
24 #define HB_DDR_ECC_U_ERR_STAT		0x0c
25 #define HB_DDR_ECC_U_ERR_DATAL		0x10
26 #define HB_DDR_ECC_U_ERR_DATAH		0x14
27 #define HB_DDR_ECC_C_ERR_ADDR		0x18
28 #define HB_DDR_ECC_C_ERR_STAT		0x1c
29 #define HB_DDR_ECC_C_ERR_DATAL		0x20
30 #define HB_DDR_ECC_C_ERR_DATAH		0x24
31 
32 #define HB_DDR_ECC_OPT_MODE_MASK	0x3
33 #define HB_DDR_ECC_OPT_FWC		0x100
34 #define HB_DDR_ECC_OPT_XOR_SHIFT	16
35 
36 /* DDR Ctrlr Interrupt Registers */
37 
38 #define HB_DDR_ECC_INT_BASE		0x180
39 #define MW_DDR_ECC_INT_BASE		0x218
40 
41 #define HB_DDR_ECC_INT_STATUS		0x00
42 #define HB_DDR_ECC_INT_ACK		0x04
43 
44 #define HB_DDR_ECC_INT_STAT_CE		0x8
45 #define HB_DDR_ECC_INT_STAT_DOUBLE_CE	0x10
46 #define HB_DDR_ECC_INT_STAT_UE		0x20
47 #define HB_DDR_ECC_INT_STAT_DOUBLE_UE	0x40
48 
49 struct hb_mc_drvdata {
50 	void __iomem *mc_err_base;
51 	void __iomem *mc_int_base;
52 };
53 
54 static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
55 {
56 	struct mem_ctl_info *mci = dev_id;
57 	struct hb_mc_drvdata *drvdata = mci->pvt_info;
58 	u32 status, err_addr;
59 
60 	/* Read the interrupt status register */
61 	status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
62 
63 	if (status & HB_DDR_ECC_INT_STAT_UE) {
64 		err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
65 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
66 				     err_addr >> PAGE_SHIFT,
67 				     err_addr & ~PAGE_MASK, 0,
68 				     0, 0, -1,
69 				     mci->ctl_name, "");
70 	}
71 	if (status & HB_DDR_ECC_INT_STAT_CE) {
72 		u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
73 		syndrome = (syndrome >> 8) & 0xff;
74 		err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
75 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
76 				     err_addr >> PAGE_SHIFT,
77 				     err_addr & ~PAGE_MASK, syndrome,
78 				     0, 0, -1,
79 				     mci->ctl_name, "");
80 	}
81 
82 	/* clear the error, clears the interrupt */
83 	writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
84 	return IRQ_HANDLED;
85 }
86 
87 static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
88 {
89 	struct hb_mc_drvdata *pdata = mci->pvt_info;
90 	u32 reg;
91 
92 	reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
93 	reg &= HB_DDR_ECC_OPT_MODE_MASK;
94 	reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
95 	writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
96 }
97 
98 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
99 
100 static ssize_t highbank_mc_inject_ctrl(struct device *dev,
101 	struct device_attribute *attr, const char *buf, size_t count)
102 {
103 	struct mem_ctl_info *mci = to_mci(dev);
104 	u8 synd;
105 
106 	if (kstrtou8(buf, 16, &synd))
107 		return -EINVAL;
108 
109 	highbank_mc_err_inject(mci, synd);
110 
111 	return count;
112 }
113 
114 static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
115 
116 static struct attribute *highbank_dev_attrs[] = {
117 	&dev_attr_inject_ctrl.attr,
118 	NULL
119 };
120 
121 ATTRIBUTE_GROUPS(highbank_dev);
122 
123 struct hb_mc_settings {
124 	int	err_offset;
125 	int	int_offset;
126 };
127 
128 static struct hb_mc_settings hb_settings = {
129 	.err_offset = HB_DDR_ECC_ERR_BASE,
130 	.int_offset = HB_DDR_ECC_INT_BASE,
131 };
132 
133 static struct hb_mc_settings mw_settings = {
134 	.err_offset = MW_DDR_ECC_ERR_BASE,
135 	.int_offset = MW_DDR_ECC_INT_BASE,
136 };
137 
138 static const struct of_device_id hb_ddr_ctrl_of_match[] = {
139 	{ .compatible = "calxeda,hb-ddr-ctrl",		.data = &hb_settings },
140 	{ .compatible = "calxeda,ecx-2000-ddr-ctrl",	.data = &mw_settings },
141 	{},
142 };
143 MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
144 
145 static int highbank_mc_probe(struct platform_device *pdev)
146 {
147 	const struct of_device_id *id;
148 	const struct hb_mc_settings *settings;
149 	struct edac_mc_layer layers[2];
150 	struct mem_ctl_info *mci;
151 	struct hb_mc_drvdata *drvdata;
152 	struct dimm_info *dimm;
153 	struct resource *r;
154 	void __iomem *base;
155 	u32 control;
156 	int irq;
157 	int res = 0;
158 
159 	id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
160 	if (!id)
161 		return -ENODEV;
162 
163 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
164 	layers[0].size = 1;
165 	layers[0].is_virt_csrow = true;
166 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
167 	layers[1].size = 1;
168 	layers[1].is_virt_csrow = false;
169 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
170 			    sizeof(struct hb_mc_drvdata));
171 	if (!mci)
172 		return -ENOMEM;
173 
174 	mci->pdev = &pdev->dev;
175 	drvdata = mci->pvt_info;
176 	platform_set_drvdata(pdev, mci);
177 
178 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
179 		res = -ENOMEM;
180 		goto free;
181 	}
182 
183 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
184 	if (!r) {
185 		dev_err(&pdev->dev, "Unable to get mem resource\n");
186 		res = -ENODEV;
187 		goto err;
188 	}
189 
190 	if (!devm_request_mem_region(&pdev->dev, r->start,
191 				     resource_size(r), dev_name(&pdev->dev))) {
192 		dev_err(&pdev->dev, "Error while requesting mem region\n");
193 		res = -EBUSY;
194 		goto err;
195 	}
196 
197 	base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
198 	if (!base) {
199 		dev_err(&pdev->dev, "Unable to map regs\n");
200 		res = -ENOMEM;
201 		goto err;
202 	}
203 
204 	settings = id->data;
205 	drvdata->mc_err_base = base + settings->err_offset;
206 	drvdata->mc_int_base = base + settings->int_offset;
207 
208 	control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
209 	if (!control || (control == 0x2)) {
210 		dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
211 		res = -ENODEV;
212 		goto err;
213 	}
214 
215 	mci->mtype_cap = MEM_FLAG_DDR3;
216 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
217 	mci->edac_cap = EDAC_FLAG_SECDED;
218 	mci->mod_name = pdev->dev.driver->name;
219 	mci->ctl_name = id->compatible;
220 	mci->dev_name = dev_name(&pdev->dev);
221 	mci->scrub_mode = SCRUB_SW_SRC;
222 
223 	/* Only a single 4GB DIMM is supported */
224 	dimm = *mci->dimms;
225 	dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
226 	dimm->grain = 8;
227 	dimm->dtype = DEV_X8;
228 	dimm->mtype = MEM_DDR3;
229 	dimm->edac_mode = EDAC_SECDED;
230 
231 	res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
232 	if (res < 0)
233 		goto err;
234 
235 	irq = platform_get_irq(pdev, 0);
236 	res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
237 			       0, dev_name(&pdev->dev), mci);
238 	if (res < 0) {
239 		dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
240 		goto err2;
241 	}
242 
243 	devres_close_group(&pdev->dev, NULL);
244 	return 0;
245 err2:
246 	edac_mc_del_mc(&pdev->dev);
247 err:
248 	devres_release_group(&pdev->dev, NULL);
249 free:
250 	edac_mc_free(mci);
251 	return res;
252 }
253 
254 static int highbank_mc_remove(struct platform_device *pdev)
255 {
256 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
257 
258 	edac_mc_del_mc(&pdev->dev);
259 	edac_mc_free(mci);
260 	return 0;
261 }
262 
263 static struct platform_driver highbank_mc_edac_driver = {
264 	.probe = highbank_mc_probe,
265 	.remove = highbank_mc_remove,
266 	.driver = {
267 		.name = "hb_mc_edac",
268 		.of_match_table = hb_ddr_ctrl_of_match,
269 	},
270 };
271 
272 module_platform_driver(highbank_mc_edac_driver);
273 
274 MODULE_LICENSE("GPL v2");
275 MODULE_AUTHOR("Calxeda, Inc.");
276 MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");
277