xref: /linux/drivers/fpga/dfl-fme-error.c (revision 13845bdc869f136f92ad3d40ea09b867bb4ce467)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Management Engine Error Management
4  *
5  * Copyright 2019 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Kang Luwei <luwei.kang@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  *   Wu Hao <hao.wu@intel.com>
11  *   Joseph Grecco <joe.grecco@intel.com>
12  *   Enno Luebbers <enno.luebbers@intel.com>
13  *   Tim Whisonant <tim.whisonant@intel.com>
14  *   Ananda Ravuri <ananda.ravuri@intel.com>
15  *   Mitchel, Henry <henry.mitchel@intel.com>
16  */
17 
18 #include <linux/fpga-dfl.h>
19 #include <linux/uaccess.h>
20 
21 #include "dfl.h"
22 #include "dfl-fme.h"
23 
24 #define FME_ERROR_MASK		0x8
25 #define FME_ERROR		0x10
26 #define MBP_ERROR		BIT_ULL(6)
27 #define PCIE0_ERROR_MASK	0x18
28 #define PCIE0_ERROR		0x20
29 #define PCIE1_ERROR_MASK	0x28
30 #define PCIE1_ERROR		0x30
31 #define FME_FIRST_ERROR		0x38
32 #define FME_NEXT_ERROR		0x40
33 #define RAS_NONFAT_ERROR_MASK	0x48
34 #define RAS_NONFAT_ERROR	0x50
35 #define RAS_CATFAT_ERROR_MASK	0x58
36 #define RAS_CATFAT_ERROR	0x60
37 #define RAS_ERROR_INJECT	0x68
38 #define INJECT_ERROR_MASK	GENMASK_ULL(2, 0)
39 
40 #define ERROR_MASK		GENMASK_ULL(63, 0)
41 
42 static ssize_t pcie0_errors_show(struct device *dev,
43 				 struct device_attribute *attr, char *buf)
44 {
45 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
46 	void __iomem *base;
47 	u64 value;
48 
49 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
50 
51 	mutex_lock(&fdata->lock);
52 	value = readq(base + PCIE0_ERROR);
53 	mutex_unlock(&fdata->lock);
54 
55 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
56 }
57 
58 static ssize_t pcie0_errors_store(struct device *dev,
59 				  struct device_attribute *attr,
60 				  const char *buf, size_t count)
61 {
62 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
63 	void __iomem *base;
64 	int ret = 0;
65 	u64 v, val;
66 
67 	if (kstrtou64(buf, 0, &val))
68 		return -EINVAL;
69 
70 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
71 
72 	mutex_lock(&fdata->lock);
73 	writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
74 
75 	v = readq(base + PCIE0_ERROR);
76 	if (val == v)
77 		writeq(v, base + PCIE0_ERROR);
78 	else
79 		ret = -EINVAL;
80 
81 	writeq(0ULL, base + PCIE0_ERROR_MASK);
82 	mutex_unlock(&fdata->lock);
83 	return ret ? ret : count;
84 }
85 static DEVICE_ATTR_RW(pcie0_errors);
86 
87 static ssize_t pcie1_errors_show(struct device *dev,
88 				 struct device_attribute *attr, char *buf)
89 {
90 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
91 	void __iomem *base;
92 	u64 value;
93 
94 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
95 
96 	mutex_lock(&fdata->lock);
97 	value = readq(base + PCIE1_ERROR);
98 	mutex_unlock(&fdata->lock);
99 
100 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
101 }
102 
103 static ssize_t pcie1_errors_store(struct device *dev,
104 				  struct device_attribute *attr,
105 				  const char *buf, size_t count)
106 {
107 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
108 	void __iomem *base;
109 	int ret = 0;
110 	u64 v, val;
111 
112 	if (kstrtou64(buf, 0, &val))
113 		return -EINVAL;
114 
115 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
116 
117 	mutex_lock(&fdata->lock);
118 	writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
119 
120 	v = readq(base + PCIE1_ERROR);
121 	if (val == v)
122 		writeq(v, base + PCIE1_ERROR);
123 	else
124 		ret = -EINVAL;
125 
126 	writeq(0ULL, base + PCIE1_ERROR_MASK);
127 	mutex_unlock(&fdata->lock);
128 	return ret ? ret : count;
129 }
130 static DEVICE_ATTR_RW(pcie1_errors);
131 
132 static ssize_t nonfatal_errors_show(struct device *dev,
133 				    struct device_attribute *attr, char *buf)
134 {
135 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
136 	void __iomem *base;
137 
138 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
139 
140 	return sprintf(buf, "0x%llx\n",
141 		       (unsigned long long)readq(base + RAS_NONFAT_ERROR));
142 }
143 static DEVICE_ATTR_RO(nonfatal_errors);
144 
145 static ssize_t catfatal_errors_show(struct device *dev,
146 				    struct device_attribute *attr, char *buf)
147 {
148 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
149 	void __iomem *base;
150 
151 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
152 
153 	return sprintf(buf, "0x%llx\n",
154 		       (unsigned long long)readq(base + RAS_CATFAT_ERROR));
155 }
156 static DEVICE_ATTR_RO(catfatal_errors);
157 
158 static ssize_t inject_errors_show(struct device *dev,
159 				  struct device_attribute *attr, char *buf)
160 {
161 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
162 	void __iomem *base;
163 	u64 v;
164 
165 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
166 
167 	mutex_lock(&fdata->lock);
168 	v = readq(base + RAS_ERROR_INJECT);
169 	mutex_unlock(&fdata->lock);
170 
171 	return sprintf(buf, "0x%llx\n",
172 		       (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
173 }
174 
175 static ssize_t inject_errors_store(struct device *dev,
176 				   struct device_attribute *attr,
177 				   const char *buf, size_t count)
178 {
179 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
180 	void __iomem *base;
181 	u8 inject_error;
182 	u64 v;
183 
184 	if (kstrtou8(buf, 0, &inject_error))
185 		return -EINVAL;
186 
187 	if (inject_error & ~INJECT_ERROR_MASK)
188 		return -EINVAL;
189 
190 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
191 
192 	mutex_lock(&fdata->lock);
193 	v = readq(base + RAS_ERROR_INJECT);
194 	v &= ~INJECT_ERROR_MASK;
195 	v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
196 	writeq(v, base + RAS_ERROR_INJECT);
197 	mutex_unlock(&fdata->lock);
198 
199 	return count;
200 }
201 static DEVICE_ATTR_RW(inject_errors);
202 
203 static ssize_t fme_errors_show(struct device *dev,
204 			       struct device_attribute *attr, char *buf)
205 {
206 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
207 	void __iomem *base;
208 	u64 value;
209 
210 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
211 
212 	mutex_lock(&fdata->lock);
213 	value = readq(base + FME_ERROR);
214 	mutex_unlock(&fdata->lock);
215 
216 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
217 }
218 
219 static ssize_t fme_errors_store(struct device *dev,
220 				struct device_attribute *attr,
221 				const char *buf, size_t count)
222 {
223 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
224 	void __iomem *base;
225 	u64 v, val;
226 	int ret = 0;
227 
228 	if (kstrtou64(buf, 0, &val))
229 		return -EINVAL;
230 
231 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
232 
233 	mutex_lock(&fdata->lock);
234 	writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
235 
236 	v = readq(base + FME_ERROR);
237 	if (val == v)
238 		writeq(v, base + FME_ERROR);
239 	else
240 		ret = -EINVAL;
241 
242 	/* Workaround: disable MBP_ERROR if feature revision is 0 */
243 	writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
244 	       base + FME_ERROR_MASK);
245 	mutex_unlock(&fdata->lock);
246 	return ret ? ret : count;
247 }
248 static DEVICE_ATTR_RW(fme_errors);
249 
250 static ssize_t first_error_show(struct device *dev,
251 				struct device_attribute *attr, char *buf)
252 {
253 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
254 	void __iomem *base;
255 	u64 value;
256 
257 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
258 
259 	mutex_lock(&fdata->lock);
260 	value = readq(base + FME_FIRST_ERROR);
261 	mutex_unlock(&fdata->lock);
262 
263 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
264 }
265 static DEVICE_ATTR_RO(first_error);
266 
267 static ssize_t next_error_show(struct device *dev,
268 			       struct device_attribute *attr, char *buf)
269 {
270 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
271 	void __iomem *base;
272 	u64 value;
273 
274 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
275 
276 	mutex_lock(&fdata->lock);
277 	value = readq(base + FME_NEXT_ERROR);
278 	mutex_unlock(&fdata->lock);
279 
280 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
281 }
282 static DEVICE_ATTR_RO(next_error);
283 
284 static struct attribute *fme_global_err_attrs[] = {
285 	&dev_attr_pcie0_errors.attr,
286 	&dev_attr_pcie1_errors.attr,
287 	&dev_attr_nonfatal_errors.attr,
288 	&dev_attr_catfatal_errors.attr,
289 	&dev_attr_inject_errors.attr,
290 	&dev_attr_fme_errors.attr,
291 	&dev_attr_first_error.attr,
292 	&dev_attr_next_error.attr,
293 	NULL,
294 };
295 
296 static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
297 					    struct attribute *attr, int n)
298 {
299 	struct device *dev = kobj_to_dev(kobj);
300 	struct dfl_feature_dev_data *fdata;
301 
302 	fdata = to_dfl_feature_dev_data(dev);
303 	/*
304 	 * sysfs entries are visible only if related private feature is
305 	 * enumerated.
306 	 */
307 	if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR))
308 		return 0;
309 
310 	return attr->mode;
311 }
312 
313 const struct attribute_group fme_global_err_group = {
314 	.name       = "errors",
315 	.attrs      = fme_global_err_attrs,
316 	.is_visible = fme_global_err_attrs_visible,
317 };
318 
319 static void fme_err_mask(struct device *dev, bool mask)
320 {
321 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
322 	void __iomem *base;
323 
324 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
325 
326 	mutex_lock(&fdata->lock);
327 
328 	/* Workaround: keep MBP_ERROR always masked if revision is 0 */
329 	if (dfl_feature_revision(base))
330 		writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
331 	else
332 		writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
333 
334 	writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
335 	writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
336 	writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
337 	writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
338 
339 	mutex_unlock(&fdata->lock);
340 }
341 
342 static int fme_global_err_init(struct platform_device *pdev,
343 			       struct dfl_feature *feature)
344 {
345 	fme_err_mask(&pdev->dev, false);
346 
347 	return 0;
348 }
349 
350 static void fme_global_err_uinit(struct platform_device *pdev,
351 				 struct dfl_feature *feature)
352 {
353 	fme_err_mask(&pdev->dev, true);
354 }
355 
356 static long
357 fme_global_error_ioctl(struct platform_device *pdev,
358 		       struct dfl_feature *feature,
359 		       unsigned int cmd, unsigned long arg)
360 {
361 	switch (cmd) {
362 	case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
363 		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
364 	case DFL_FPGA_FME_ERR_SET_IRQ:
365 		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
366 	default:
367 		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
368 		return -ENODEV;
369 	}
370 }
371 
372 const struct dfl_feature_id fme_global_err_id_table[] = {
373 	{.id = FME_FEATURE_ID_GLOBAL_ERR,},
374 	{0,}
375 };
376 
377 const struct dfl_feature_ops fme_global_err_ops = {
378 	.init = fme_global_err_init,
379 	.uinit = fme_global_err_uinit,
380 	.ioctl = fme_global_error_ioctl,
381 };
382