xref: /linux/drivers/edac/edac_mc_sysfs.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * edac_mc kernel module
3  * (C) 2005-2007 Linux Networx (http://lnxi.com)
4  *
5  * This file may be distributed under the terms of the
6  * GNU General Public License.
7  *
8  * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9  *
10  * (c) 2012-2013 - Mauro Carvalho Chehab
11  *	The entire API were re-written, and ported to use struct device
12  *
13  */
14 
15 #include <linux/ctype.h>
16 #include <linux/slab.h>
17 #include <linux/edac.h>
18 #include <linux/bug.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/uaccess.h>
21 
22 #include "edac_mc.h"
23 #include "edac_module.h"
24 
25 /* MC EDAC Controls, setable by module parameter, and sysfs */
26 static int edac_mc_log_ue = 1;
27 static int edac_mc_log_ce = 1;
28 static int edac_mc_panic_on_ue;
29 static int edac_mc_poll_msec = 1000;
30 
31 /* Getter functions for above */
32 int edac_mc_get_log_ue(void)
33 {
34 	return edac_mc_log_ue;
35 }
36 
37 int edac_mc_get_log_ce(void)
38 {
39 	return edac_mc_log_ce;
40 }
41 
42 int edac_mc_get_panic_on_ue(void)
43 {
44 	return edac_mc_panic_on_ue;
45 }
46 
47 /* this is temporary */
48 int edac_mc_get_poll_msec(void)
49 {
50 	return edac_mc_poll_msec;
51 }
52 
53 static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
54 {
55 	unsigned long l;
56 	int ret;
57 
58 	if (!val)
59 		return -EINVAL;
60 
61 	ret = kstrtoul(val, 0, &l);
62 	if (ret)
63 		return ret;
64 
65 	if (l < 1000)
66 		return -EINVAL;
67 
68 	*((unsigned long *)kp->arg) = l;
69 
70 	/* notify edac_mc engine to reset the poll period */
71 	edac_mc_reset_delay_period(l);
72 
73 	return 0;
74 }
75 
76 /* Parameter declarations for above */
77 module_param(edac_mc_panic_on_ue, int, 0644);
78 MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
79 module_param(edac_mc_log_ue, int, 0644);
80 MODULE_PARM_DESC(edac_mc_log_ue,
81 		 "Log uncorrectable error to console: 0=off 1=on");
82 module_param(edac_mc_log_ce, int, 0644);
83 MODULE_PARM_DESC(edac_mc_log_ce,
84 		 "Log correctable error to console: 0=off 1=on");
85 module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
86 		  &edac_mc_poll_msec, 0644);
87 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
88 
89 static struct device *mci_pdev;
90 
91 /*
92  * various constants for Memory Controllers
93  */
94 static const char * const dev_types[] = {
95 	[DEV_UNKNOWN] = "Unknown",
96 	[DEV_X1] = "x1",
97 	[DEV_X2] = "x2",
98 	[DEV_X4] = "x4",
99 	[DEV_X8] = "x8",
100 	[DEV_X16] = "x16",
101 	[DEV_X32] = "x32",
102 	[DEV_X64] = "x64"
103 };
104 
105 static const char * const edac_caps[] = {
106 	[EDAC_UNKNOWN] = "Unknown",
107 	[EDAC_NONE] = "None",
108 	[EDAC_RESERVED] = "Reserved",
109 	[EDAC_PARITY] = "PARITY",
110 	[EDAC_EC] = "EC",
111 	[EDAC_SECDED] = "SECDED",
112 	[EDAC_S2ECD2ED] = "S2ECD2ED",
113 	[EDAC_S4ECD4ED] = "S4ECD4ED",
114 	[EDAC_S8ECD8ED] = "S8ECD8ED",
115 	[EDAC_S16ECD16ED] = "S16ECD16ED"
116 };
117 
118 #ifdef CONFIG_EDAC_LEGACY_SYSFS
119 /*
120  * EDAC sysfs CSROW data structures and methods
121  */
122 
123 #define to_csrow(k) container_of(k, struct csrow_info, dev)
124 
125 /*
126  * We need it to avoid namespace conflicts between the legacy API
127  * and the per-dimm/per-rank one
128  */
129 #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
130 	static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
131 
132 struct dev_ch_attribute {
133 	struct device_attribute attr;
134 	int channel;
135 };
136 
137 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
138 	static struct dev_ch_attribute dev_attr_legacy_##_name = \
139 		{ __ATTR(_name, _mode, _show, _store), (_var) }
140 
141 #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
142 
143 /* Set of more default csrow<id> attribute show/store functions */
144 static ssize_t csrow_ue_count_show(struct device *dev,
145 				   struct device_attribute *mattr, char *data)
146 {
147 	struct csrow_info *csrow = to_csrow(dev);
148 
149 	return sprintf(data, "%u\n", csrow->ue_count);
150 }
151 
152 static ssize_t csrow_ce_count_show(struct device *dev,
153 				   struct device_attribute *mattr, char *data)
154 {
155 	struct csrow_info *csrow = to_csrow(dev);
156 
157 	return sprintf(data, "%u\n", csrow->ce_count);
158 }
159 
160 static ssize_t csrow_size_show(struct device *dev,
161 			       struct device_attribute *mattr, char *data)
162 {
163 	struct csrow_info *csrow = to_csrow(dev);
164 	int i;
165 	u32 nr_pages = 0;
166 
167 	for (i = 0; i < csrow->nr_channels; i++)
168 		nr_pages += csrow->channels[i]->dimm->nr_pages;
169 	return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
170 }
171 
172 static ssize_t csrow_mem_type_show(struct device *dev,
173 				   struct device_attribute *mattr, char *data)
174 {
175 	struct csrow_info *csrow = to_csrow(dev);
176 
177 	return sprintf(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
178 }
179 
180 static ssize_t csrow_dev_type_show(struct device *dev,
181 				   struct device_attribute *mattr, char *data)
182 {
183 	struct csrow_info *csrow = to_csrow(dev);
184 
185 	return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
186 }
187 
188 static ssize_t csrow_edac_mode_show(struct device *dev,
189 				    struct device_attribute *mattr,
190 				    char *data)
191 {
192 	struct csrow_info *csrow = to_csrow(dev);
193 
194 	return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
195 }
196 
197 /* show/store functions for DIMM Label attributes */
198 static ssize_t channel_dimm_label_show(struct device *dev,
199 				       struct device_attribute *mattr,
200 				       char *data)
201 {
202 	struct csrow_info *csrow = to_csrow(dev);
203 	unsigned chan = to_channel(mattr);
204 	struct rank_info *rank = csrow->channels[chan];
205 
206 	/* if field has not been initialized, there is nothing to send */
207 	if (!rank->dimm->label[0])
208 		return 0;
209 
210 	return snprintf(data, sizeof(rank->dimm->label) + 1, "%s\n",
211 			rank->dimm->label);
212 }
213 
214 static ssize_t channel_dimm_label_store(struct device *dev,
215 					struct device_attribute *mattr,
216 					const char *data, size_t count)
217 {
218 	struct csrow_info *csrow = to_csrow(dev);
219 	unsigned chan = to_channel(mattr);
220 	struct rank_info *rank = csrow->channels[chan];
221 	size_t copy_count = count;
222 
223 	if (count == 0)
224 		return -EINVAL;
225 
226 	if (data[count - 1] == '\0' || data[count - 1] == '\n')
227 		copy_count -= 1;
228 
229 	if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
230 		return -EINVAL;
231 
232 	strncpy(rank->dimm->label, data, copy_count);
233 	rank->dimm->label[copy_count] = '\0';
234 
235 	return count;
236 }
237 
238 /* show function for dynamic chX_ce_count attribute */
239 static ssize_t channel_ce_count_show(struct device *dev,
240 				     struct device_attribute *mattr, char *data)
241 {
242 	struct csrow_info *csrow = to_csrow(dev);
243 	unsigned chan = to_channel(mattr);
244 	struct rank_info *rank = csrow->channels[chan];
245 
246 	return sprintf(data, "%u\n", rank->ce_count);
247 }
248 
249 /* cwrow<id>/attribute files */
250 DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
251 DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
252 DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
253 DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
254 DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
255 DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
256 
257 /* default attributes of the CSROW<id> object */
258 static struct attribute *csrow_attrs[] = {
259 	&dev_attr_legacy_dev_type.attr,
260 	&dev_attr_legacy_mem_type.attr,
261 	&dev_attr_legacy_edac_mode.attr,
262 	&dev_attr_legacy_size_mb.attr,
263 	&dev_attr_legacy_ue_count.attr,
264 	&dev_attr_legacy_ce_count.attr,
265 	NULL,
266 };
267 
268 static const struct attribute_group csrow_attr_grp = {
269 	.attrs	= csrow_attrs,
270 };
271 
272 static const struct attribute_group *csrow_attr_groups[] = {
273 	&csrow_attr_grp,
274 	NULL
275 };
276 
277 static void csrow_attr_release(struct device *dev)
278 {
279 	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
280 
281 	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
282 	kfree(csrow);
283 }
284 
285 static const struct device_type csrow_attr_type = {
286 	.groups		= csrow_attr_groups,
287 	.release	= csrow_attr_release,
288 };
289 
290 /*
291  * possible dynamic channel DIMM Label attribute files
292  *
293  */
294 DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
295 	channel_dimm_label_show, channel_dimm_label_store, 0);
296 DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
297 	channel_dimm_label_show, channel_dimm_label_store, 1);
298 DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
299 	channel_dimm_label_show, channel_dimm_label_store, 2);
300 DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
301 	channel_dimm_label_show, channel_dimm_label_store, 3);
302 DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
303 	channel_dimm_label_show, channel_dimm_label_store, 4);
304 DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
305 	channel_dimm_label_show, channel_dimm_label_store, 5);
306 DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
307 	channel_dimm_label_show, channel_dimm_label_store, 6);
308 DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
309 	channel_dimm_label_show, channel_dimm_label_store, 7);
310 
311 /* Total possible dynamic DIMM Label attribute file table */
312 static struct attribute *dynamic_csrow_dimm_attr[] = {
313 	&dev_attr_legacy_ch0_dimm_label.attr.attr,
314 	&dev_attr_legacy_ch1_dimm_label.attr.attr,
315 	&dev_attr_legacy_ch2_dimm_label.attr.attr,
316 	&dev_attr_legacy_ch3_dimm_label.attr.attr,
317 	&dev_attr_legacy_ch4_dimm_label.attr.attr,
318 	&dev_attr_legacy_ch5_dimm_label.attr.attr,
319 	&dev_attr_legacy_ch6_dimm_label.attr.attr,
320 	&dev_attr_legacy_ch7_dimm_label.attr.attr,
321 	NULL
322 };
323 
324 /* possible dynamic channel ce_count attribute files */
325 DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
326 		   channel_ce_count_show, NULL, 0);
327 DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
328 		   channel_ce_count_show, NULL, 1);
329 DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
330 		   channel_ce_count_show, NULL, 2);
331 DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
332 		   channel_ce_count_show, NULL, 3);
333 DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
334 		   channel_ce_count_show, NULL, 4);
335 DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
336 		   channel_ce_count_show, NULL, 5);
337 DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
338 		   channel_ce_count_show, NULL, 6);
339 DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
340 		   channel_ce_count_show, NULL, 7);
341 
342 /* Total possible dynamic ce_count attribute file table */
343 static struct attribute *dynamic_csrow_ce_count_attr[] = {
344 	&dev_attr_legacy_ch0_ce_count.attr.attr,
345 	&dev_attr_legacy_ch1_ce_count.attr.attr,
346 	&dev_attr_legacy_ch2_ce_count.attr.attr,
347 	&dev_attr_legacy_ch3_ce_count.attr.attr,
348 	&dev_attr_legacy_ch4_ce_count.attr.attr,
349 	&dev_attr_legacy_ch5_ce_count.attr.attr,
350 	&dev_attr_legacy_ch6_ce_count.attr.attr,
351 	&dev_attr_legacy_ch7_ce_count.attr.attr,
352 	NULL
353 };
354 
355 static umode_t csrow_dev_is_visible(struct kobject *kobj,
356 				    struct attribute *attr, int idx)
357 {
358 	struct device *dev = kobj_to_dev(kobj);
359 	struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
360 
361 	if (idx >= csrow->nr_channels)
362 		return 0;
363 
364 	if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
365 		WARN_ONCE(1, "idx: %d\n", idx);
366 		return 0;
367 	}
368 
369 	/* Only expose populated DIMMs */
370 	if (!csrow->channels[idx]->dimm->nr_pages)
371 		return 0;
372 
373 	return attr->mode;
374 }
375 
376 
377 static const struct attribute_group csrow_dev_dimm_group = {
378 	.attrs = dynamic_csrow_dimm_attr,
379 	.is_visible = csrow_dev_is_visible,
380 };
381 
382 static const struct attribute_group csrow_dev_ce_count_group = {
383 	.attrs = dynamic_csrow_ce_count_attr,
384 	.is_visible = csrow_dev_is_visible,
385 };
386 
387 static const struct attribute_group *csrow_dev_groups[] = {
388 	&csrow_dev_dimm_group,
389 	&csrow_dev_ce_count_group,
390 	NULL
391 };
392 
393 static inline int nr_pages_per_csrow(struct csrow_info *csrow)
394 {
395 	int chan, nr_pages = 0;
396 
397 	for (chan = 0; chan < csrow->nr_channels; chan++)
398 		nr_pages += csrow->channels[chan]->dimm->nr_pages;
399 
400 	return nr_pages;
401 }
402 
403 /* Create a CSROW object under specifed edac_mc_device */
404 static int edac_create_csrow_object(struct mem_ctl_info *mci,
405 				    struct csrow_info *csrow, int index)
406 {
407 	csrow->dev.type = &csrow_attr_type;
408 	csrow->dev.groups = csrow_dev_groups;
409 	device_initialize(&csrow->dev);
410 	csrow->dev.parent = &mci->dev;
411 	csrow->mci = mci;
412 	dev_set_name(&csrow->dev, "csrow%d", index);
413 	dev_set_drvdata(&csrow->dev, csrow);
414 
415 	edac_dbg(0, "creating (virtual) csrow node %s\n",
416 		 dev_name(&csrow->dev));
417 
418 	return device_add(&csrow->dev);
419 }
420 
421 /* Create a CSROW object under specifed edac_mc_device */
422 static int edac_create_csrow_objects(struct mem_ctl_info *mci)
423 {
424 	int err, i;
425 	struct csrow_info *csrow;
426 
427 	for (i = 0; i < mci->nr_csrows; i++) {
428 		csrow = mci->csrows[i];
429 		if (!nr_pages_per_csrow(csrow))
430 			continue;
431 		err = edac_create_csrow_object(mci, mci->csrows[i], i);
432 		if (err < 0) {
433 			edac_dbg(1,
434 				 "failure: create csrow objects for csrow %d\n",
435 				 i);
436 			goto error;
437 		}
438 	}
439 	return 0;
440 
441 error:
442 	for (--i; i >= 0; i--) {
443 		csrow = mci->csrows[i];
444 		if (!nr_pages_per_csrow(csrow))
445 			continue;
446 		put_device(&mci->csrows[i]->dev);
447 	}
448 
449 	return err;
450 }
451 
452 static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
453 {
454 	int i;
455 	struct csrow_info *csrow;
456 
457 	for (i = mci->nr_csrows - 1; i >= 0; i--) {
458 		csrow = mci->csrows[i];
459 		if (!nr_pages_per_csrow(csrow))
460 			continue;
461 		device_unregister(&mci->csrows[i]->dev);
462 	}
463 }
464 #endif
465 
466 /*
467  * Per-dimm (or per-rank) devices
468  */
469 
470 #define to_dimm(k) container_of(k, struct dimm_info, dev)
471 
472 /* show/store functions for DIMM Label attributes */
473 static ssize_t dimmdev_location_show(struct device *dev,
474 				     struct device_attribute *mattr, char *data)
475 {
476 	struct dimm_info *dimm = to_dimm(dev);
477 
478 	return edac_dimm_info_location(dimm, data, PAGE_SIZE);
479 }
480 
481 static ssize_t dimmdev_label_show(struct device *dev,
482 				  struct device_attribute *mattr, char *data)
483 {
484 	struct dimm_info *dimm = to_dimm(dev);
485 
486 	/* if field has not been initialized, there is nothing to send */
487 	if (!dimm->label[0])
488 		return 0;
489 
490 	return snprintf(data, sizeof(dimm->label) + 1, "%s\n", dimm->label);
491 }
492 
493 static ssize_t dimmdev_label_store(struct device *dev,
494 				   struct device_attribute *mattr,
495 				   const char *data,
496 				   size_t count)
497 {
498 	struct dimm_info *dimm = to_dimm(dev);
499 	size_t copy_count = count;
500 
501 	if (count == 0)
502 		return -EINVAL;
503 
504 	if (data[count - 1] == '\0' || data[count - 1] == '\n')
505 		copy_count -= 1;
506 
507 	if (copy_count == 0 || copy_count >= sizeof(dimm->label))
508 		return -EINVAL;
509 
510 	strncpy(dimm->label, data, copy_count);
511 	dimm->label[copy_count] = '\0';
512 
513 	return count;
514 }
515 
516 static ssize_t dimmdev_size_show(struct device *dev,
517 				 struct device_attribute *mattr, char *data)
518 {
519 	struct dimm_info *dimm = to_dimm(dev);
520 
521 	return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
522 }
523 
524 static ssize_t dimmdev_mem_type_show(struct device *dev,
525 				     struct device_attribute *mattr, char *data)
526 {
527 	struct dimm_info *dimm = to_dimm(dev);
528 
529 	return sprintf(data, "%s\n", edac_mem_types[dimm->mtype]);
530 }
531 
532 static ssize_t dimmdev_dev_type_show(struct device *dev,
533 				     struct device_attribute *mattr, char *data)
534 {
535 	struct dimm_info *dimm = to_dimm(dev);
536 
537 	return sprintf(data, "%s\n", dev_types[dimm->dtype]);
538 }
539 
540 static ssize_t dimmdev_edac_mode_show(struct device *dev,
541 				      struct device_attribute *mattr,
542 				      char *data)
543 {
544 	struct dimm_info *dimm = to_dimm(dev);
545 
546 	return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
547 }
548 
549 static ssize_t dimmdev_ce_count_show(struct device *dev,
550 				      struct device_attribute *mattr,
551 				      char *data)
552 {
553 	struct dimm_info *dimm = to_dimm(dev);
554 	u32 count;
555 	int off;
556 
557 	off = EDAC_DIMM_OFF(dimm->mci->layers,
558 			    dimm->mci->n_layers,
559 			    dimm->location[0],
560 			    dimm->location[1],
561 			    dimm->location[2]);
562 	count = dimm->mci->ce_per_layer[dimm->mci->n_layers-1][off];
563 	return sprintf(data, "%u\n", count);
564 }
565 
566 static ssize_t dimmdev_ue_count_show(struct device *dev,
567 				      struct device_attribute *mattr,
568 				      char *data)
569 {
570 	struct dimm_info *dimm = to_dimm(dev);
571 	u32 count;
572 	int off;
573 
574 	off = EDAC_DIMM_OFF(dimm->mci->layers,
575 			    dimm->mci->n_layers,
576 			    dimm->location[0],
577 			    dimm->location[1],
578 			    dimm->location[2]);
579 	count = dimm->mci->ue_per_layer[dimm->mci->n_layers-1][off];
580 	return sprintf(data, "%u\n", count);
581 }
582 
583 /* dimm/rank attribute files */
584 static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
585 		   dimmdev_label_show, dimmdev_label_store);
586 static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
587 static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
588 static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
589 static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
590 static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
591 static DEVICE_ATTR(dimm_ce_count, S_IRUGO, dimmdev_ce_count_show, NULL);
592 static DEVICE_ATTR(dimm_ue_count, S_IRUGO, dimmdev_ue_count_show, NULL);
593 
594 /* attributes of the dimm<id>/rank<id> object */
595 static struct attribute *dimm_attrs[] = {
596 	&dev_attr_dimm_label.attr,
597 	&dev_attr_dimm_location.attr,
598 	&dev_attr_size.attr,
599 	&dev_attr_dimm_mem_type.attr,
600 	&dev_attr_dimm_dev_type.attr,
601 	&dev_attr_dimm_edac_mode.attr,
602 	&dev_attr_dimm_ce_count.attr,
603 	&dev_attr_dimm_ue_count.attr,
604 	NULL,
605 };
606 
607 static const struct attribute_group dimm_attr_grp = {
608 	.attrs	= dimm_attrs,
609 };
610 
611 static const struct attribute_group *dimm_attr_groups[] = {
612 	&dimm_attr_grp,
613 	NULL
614 };
615 
616 static void dimm_attr_release(struct device *dev)
617 {
618 	struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
619 
620 	edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
621 	kfree(dimm);
622 }
623 
624 static const struct device_type dimm_attr_type = {
625 	.groups		= dimm_attr_groups,
626 	.release	= dimm_attr_release,
627 };
628 
629 /* Create a DIMM object under specifed memory controller device */
630 static int edac_create_dimm_object(struct mem_ctl_info *mci,
631 				   struct dimm_info *dimm,
632 				   int index)
633 {
634 	int err;
635 	dimm->mci = mci;
636 
637 	dimm->dev.type = &dimm_attr_type;
638 	device_initialize(&dimm->dev);
639 
640 	dimm->dev.parent = &mci->dev;
641 	if (mci->csbased)
642 		dev_set_name(&dimm->dev, "rank%d", index);
643 	else
644 		dev_set_name(&dimm->dev, "dimm%d", index);
645 	dev_set_drvdata(&dimm->dev, dimm);
646 	pm_runtime_forbid(&mci->dev);
647 
648 	err =  device_add(&dimm->dev);
649 
650 	edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
651 
652 	return err;
653 }
654 
655 /*
656  * Memory controller device
657  */
658 
659 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
660 
661 static ssize_t mci_reset_counters_store(struct device *dev,
662 					struct device_attribute *mattr,
663 					const char *data, size_t count)
664 {
665 	struct mem_ctl_info *mci = to_mci(dev);
666 	int cnt, row, chan, i;
667 	mci->ue_mc = 0;
668 	mci->ce_mc = 0;
669 	mci->ue_noinfo_count = 0;
670 	mci->ce_noinfo_count = 0;
671 
672 	for (row = 0; row < mci->nr_csrows; row++) {
673 		struct csrow_info *ri = mci->csrows[row];
674 
675 		ri->ue_count = 0;
676 		ri->ce_count = 0;
677 
678 		for (chan = 0; chan < ri->nr_channels; chan++)
679 			ri->channels[chan]->ce_count = 0;
680 	}
681 
682 	cnt = 1;
683 	for (i = 0; i < mci->n_layers; i++) {
684 		cnt *= mci->layers[i].size;
685 		memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
686 		memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
687 	}
688 
689 	mci->start_time = jiffies;
690 	return count;
691 }
692 
693 /* Memory scrubbing interface:
694  *
695  * A MC driver can limit the scrubbing bandwidth based on the CPU type.
696  * Therefore, ->set_sdram_scrub_rate should be made to return the actual
697  * bandwidth that is accepted or 0 when scrubbing is to be disabled.
698  *
699  * Negative value still means that an error has occurred while setting
700  * the scrub rate.
701  */
702 static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
703 					  struct device_attribute *mattr,
704 					  const char *data, size_t count)
705 {
706 	struct mem_ctl_info *mci = to_mci(dev);
707 	unsigned long bandwidth = 0;
708 	int new_bw = 0;
709 
710 	if (kstrtoul(data, 10, &bandwidth) < 0)
711 		return -EINVAL;
712 
713 	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
714 	if (new_bw < 0) {
715 		edac_printk(KERN_WARNING, EDAC_MC,
716 			    "Error setting scrub rate to: %lu\n", bandwidth);
717 		return -EINVAL;
718 	}
719 
720 	return count;
721 }
722 
723 /*
724  * ->get_sdram_scrub_rate() return value semantics same as above.
725  */
726 static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
727 					 struct device_attribute *mattr,
728 					 char *data)
729 {
730 	struct mem_ctl_info *mci = to_mci(dev);
731 	int bandwidth = 0;
732 
733 	bandwidth = mci->get_sdram_scrub_rate(mci);
734 	if (bandwidth < 0) {
735 		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
736 		return bandwidth;
737 	}
738 
739 	return sprintf(data, "%d\n", bandwidth);
740 }
741 
742 /* default attribute files for the MCI object */
743 static ssize_t mci_ue_count_show(struct device *dev,
744 				 struct device_attribute *mattr,
745 				 char *data)
746 {
747 	struct mem_ctl_info *mci = to_mci(dev);
748 
749 	return sprintf(data, "%d\n", mci->ue_mc);
750 }
751 
752 static ssize_t mci_ce_count_show(struct device *dev,
753 				 struct device_attribute *mattr,
754 				 char *data)
755 {
756 	struct mem_ctl_info *mci = to_mci(dev);
757 
758 	return sprintf(data, "%d\n", mci->ce_mc);
759 }
760 
761 static ssize_t mci_ce_noinfo_show(struct device *dev,
762 				  struct device_attribute *mattr,
763 				  char *data)
764 {
765 	struct mem_ctl_info *mci = to_mci(dev);
766 
767 	return sprintf(data, "%d\n", mci->ce_noinfo_count);
768 }
769 
770 static ssize_t mci_ue_noinfo_show(struct device *dev,
771 				  struct device_attribute *mattr,
772 				  char *data)
773 {
774 	struct mem_ctl_info *mci = to_mci(dev);
775 
776 	return sprintf(data, "%d\n", mci->ue_noinfo_count);
777 }
778 
779 static ssize_t mci_seconds_show(struct device *dev,
780 				struct device_attribute *mattr,
781 				char *data)
782 {
783 	struct mem_ctl_info *mci = to_mci(dev);
784 
785 	return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
786 }
787 
788 static ssize_t mci_ctl_name_show(struct device *dev,
789 				 struct device_attribute *mattr,
790 				 char *data)
791 {
792 	struct mem_ctl_info *mci = to_mci(dev);
793 
794 	return sprintf(data, "%s\n", mci->ctl_name);
795 }
796 
797 static ssize_t mci_size_mb_show(struct device *dev,
798 				struct device_attribute *mattr,
799 				char *data)
800 {
801 	struct mem_ctl_info *mci = to_mci(dev);
802 	int total_pages = 0, csrow_idx, j;
803 
804 	for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
805 		struct csrow_info *csrow = mci->csrows[csrow_idx];
806 
807 		for (j = 0; j < csrow->nr_channels; j++) {
808 			struct dimm_info *dimm = csrow->channels[j]->dimm;
809 
810 			total_pages += dimm->nr_pages;
811 		}
812 	}
813 
814 	return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
815 }
816 
817 static ssize_t mci_max_location_show(struct device *dev,
818 				     struct device_attribute *mattr,
819 				     char *data)
820 {
821 	struct mem_ctl_info *mci = to_mci(dev);
822 	int i;
823 	char *p = data;
824 
825 	for (i = 0; i < mci->n_layers; i++) {
826 		p += sprintf(p, "%s %d ",
827 			     edac_layer_name[mci->layers[i].type],
828 			     mci->layers[i].size - 1);
829 	}
830 
831 	return p - data;
832 }
833 
834 /* default Control file */
835 static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
836 
837 /* default Attribute files */
838 static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
839 static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
840 static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
841 static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
842 static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
843 static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
844 static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
845 static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
846 
847 /* memory scrubber attribute file */
848 static DEVICE_ATTR(sdram_scrub_rate, 0, mci_sdram_scrub_rate_show,
849 	    mci_sdram_scrub_rate_store); /* umode set later in is_visible */
850 
851 static struct attribute *mci_attrs[] = {
852 	&dev_attr_reset_counters.attr,
853 	&dev_attr_mc_name.attr,
854 	&dev_attr_size_mb.attr,
855 	&dev_attr_seconds_since_reset.attr,
856 	&dev_attr_ue_noinfo_count.attr,
857 	&dev_attr_ce_noinfo_count.attr,
858 	&dev_attr_ue_count.attr,
859 	&dev_attr_ce_count.attr,
860 	&dev_attr_max_location.attr,
861 	&dev_attr_sdram_scrub_rate.attr,
862 	NULL
863 };
864 
865 static umode_t mci_attr_is_visible(struct kobject *kobj,
866 				   struct attribute *attr, int idx)
867 {
868 	struct device *dev = kobj_to_dev(kobj);
869 	struct mem_ctl_info *mci = to_mci(dev);
870 	umode_t mode = 0;
871 
872 	if (attr != &dev_attr_sdram_scrub_rate.attr)
873 		return attr->mode;
874 	if (mci->get_sdram_scrub_rate)
875 		mode |= S_IRUGO;
876 	if (mci->set_sdram_scrub_rate)
877 		mode |= S_IWUSR;
878 	return mode;
879 }
880 
881 static const struct attribute_group mci_attr_grp = {
882 	.attrs	= mci_attrs,
883 	.is_visible = mci_attr_is_visible,
884 };
885 
886 static const struct attribute_group *mci_attr_groups[] = {
887 	&mci_attr_grp,
888 	NULL
889 };
890 
891 static void mci_attr_release(struct device *dev)
892 {
893 	struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
894 
895 	edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
896 	kfree(mci);
897 }
898 
899 static const struct device_type mci_attr_type = {
900 	.groups		= mci_attr_groups,
901 	.release	= mci_attr_release,
902 };
903 
904 /*
905  * Create a new Memory Controller kobject instance,
906  *	mc<id> under the 'mc' directory
907  *
908  * Return:
909  *	0	Success
910  *	!0	Failure
911  */
912 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
913 				 const struct attribute_group **groups)
914 {
915 	int i, err;
916 
917 	/* get the /sys/devices/system/edac subsys reference */
918 	mci->dev.type = &mci_attr_type;
919 	device_initialize(&mci->dev);
920 
921 	mci->dev.parent = mci_pdev;
922 	mci->dev.groups = groups;
923 	dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
924 	dev_set_drvdata(&mci->dev, mci);
925 	pm_runtime_forbid(&mci->dev);
926 
927 	edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
928 	err = device_add(&mci->dev);
929 	if (err < 0) {
930 		edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
931 		goto out;
932 	}
933 
934 	/*
935 	 * Create the dimm/rank devices
936 	 */
937 	for (i = 0; i < mci->tot_dimms; i++) {
938 		struct dimm_info *dimm = mci->dimms[i];
939 		/* Only expose populated DIMMs */
940 		if (!dimm->nr_pages)
941 			continue;
942 
943 #ifdef CONFIG_EDAC_DEBUG
944 		edac_dbg(1, "creating dimm%d, located at ", i);
945 		if (edac_debug_level >= 1) {
946 			int lay;
947 			for (lay = 0; lay < mci->n_layers; lay++)
948 				printk(KERN_CONT "%s %d ",
949 					edac_layer_name[mci->layers[lay].type],
950 					dimm->location[lay]);
951 			printk(KERN_CONT "\n");
952 		}
953 #endif
954 		err = edac_create_dimm_object(mci, dimm, i);
955 		if (err) {
956 			edac_dbg(1, "failure: create dimm %d obj\n", i);
957 			goto fail_unregister_dimm;
958 		}
959 	}
960 
961 #ifdef CONFIG_EDAC_LEGACY_SYSFS
962 	err = edac_create_csrow_objects(mci);
963 	if (err < 0)
964 		goto fail_unregister_dimm;
965 #endif
966 
967 	edac_create_debugfs_nodes(mci);
968 	return 0;
969 
970 fail_unregister_dimm:
971 	for (i--; i >= 0; i--) {
972 		struct dimm_info *dimm = mci->dimms[i];
973 		if (!dimm->nr_pages)
974 			continue;
975 
976 		device_unregister(&dimm->dev);
977 	}
978 	device_unregister(&mci->dev);
979 
980 out:
981 	return err;
982 }
983 
984 /*
985  * remove a Memory Controller instance
986  */
987 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
988 {
989 	int i;
990 
991 	edac_dbg(0, "\n");
992 
993 #ifdef CONFIG_EDAC_DEBUG
994 	edac_debugfs_remove_recursive(mci->debugfs);
995 #endif
996 #ifdef CONFIG_EDAC_LEGACY_SYSFS
997 	edac_delete_csrow_objects(mci);
998 #endif
999 
1000 	for (i = 0; i < mci->tot_dimms; i++) {
1001 		struct dimm_info *dimm = mci->dimms[i];
1002 		if (dimm->nr_pages == 0)
1003 			continue;
1004 		edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
1005 		device_unregister(&dimm->dev);
1006 	}
1007 }
1008 
1009 void edac_unregister_sysfs(struct mem_ctl_info *mci)
1010 {
1011 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1012 	device_unregister(&mci->dev);
1013 }
1014 
1015 static void mc_attr_release(struct device *dev)
1016 {
1017 	/*
1018 	 * There's no container structure here, as this is just the mci
1019 	 * parent device, used to create the /sys/devices/mc sysfs node.
1020 	 * So, there are no attributes on it.
1021 	 */
1022 	edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1023 	kfree(dev);
1024 }
1025 
1026 static const struct device_type mc_attr_type = {
1027 	.release	= mc_attr_release,
1028 };
1029 /*
1030  * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1031  */
1032 int __init edac_mc_sysfs_init(void)
1033 {
1034 	int err;
1035 
1036 	mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1037 	if (!mci_pdev) {
1038 		err = -ENOMEM;
1039 		goto out;
1040 	}
1041 
1042 	mci_pdev->bus = edac_get_sysfs_subsys();
1043 	mci_pdev->type = &mc_attr_type;
1044 	device_initialize(mci_pdev);
1045 	dev_set_name(mci_pdev, "mc");
1046 
1047 	err = device_add(mci_pdev);
1048 	if (err < 0)
1049 		goto out_put_device;
1050 
1051 	edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1052 
1053 	return 0;
1054 
1055  out_put_device:
1056 	put_device(mci_pdev);
1057  out:
1058 	return err;
1059 }
1060 
1061 void edac_mc_sysfs_exit(void)
1062 {
1063 	device_unregister(mci_pdev);
1064 }
1065