xref: /linux/drivers/edac/edac_device.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 
2 /*
3  * edac_device.c
4  * (C) 2007 www.douglaskthompson.com
5  *
6  * This file may be distributed under the terms of the
7  * GNU General Public License.
8  *
9  * Written by Doug Thompson <norsk5@xmission.com>
10  *
11  * edac_device API implementation
12  * 19 Jan 2007
13  */
14 
15 #include <asm/page.h>
16 #include <linux/uaccess.h>
17 #include <linux/ctype.h>
18 #include <linux/highmem.h>
19 #include <linux/init.h>
20 #include <linux/jiffies.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/spinlock.h>
25 #include <linux/sysctl.h>
26 #include <linux/timer.h>
27 
28 #include "edac_device.h"
29 #include "edac_module.h"
30 
31 /* lock for the list: 'edac_device_list', manipulation of this list
32  * is protected by the 'device_ctls_mutex' lock
33  */
34 static DEFINE_MUTEX(device_ctls_mutex);
35 static LIST_HEAD(edac_device_list);
36 
37 /* Default workqueue processing interval on this instance, in msecs */
38 #define DEFAULT_POLL_INTERVAL 1000
39 
40 #ifdef CONFIG_EDAC_DEBUG
41 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42 {
43 	edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
44 		 edac_dev, edac_dev->dev_idx);
45 	edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
46 	edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
47 	edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
48 		 edac_dev->mod_name, edac_dev->ctl_name);
49 	edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
50 }
51 #endif				/* CONFIG_EDAC_DEBUG */
52 
53 /*
54  * @off_val: zero, 1, or other based offset
55  */
56 struct edac_device_ctl_info *
57 edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances,
58 			   char *blk_name, unsigned nr_blocks, unsigned off_val,
59 			   int device_index)
60 {
61 	struct edac_device_block *dev_blk, *blk_p, *blk;
62 	struct edac_device_instance *dev_inst, *inst;
63 	struct edac_device_ctl_info *dev_ctl;
64 	unsigned instance, block;
65 	void *pvt;
66 	int err;
67 
68 	edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
69 
70 	dev_ctl = kzalloc_obj(struct edac_device_ctl_info, GFP_KERNEL);
71 	if (!dev_ctl)
72 		return NULL;
73 
74 	dev_inst = kzalloc_objs(struct edac_device_instance, nr_instances,
75 				GFP_KERNEL);
76 	if (!dev_inst)
77 		goto free;
78 
79 	dev_ctl->instances = dev_inst;
80 
81 	dev_blk = kzalloc_objs(struct edac_device_block,
82 			       nr_instances * nr_blocks, GFP_KERNEL);
83 	if (!dev_blk)
84 		goto free;
85 
86 	dev_ctl->blocks = dev_blk;
87 
88 	if (pvt_sz) {
89 		pvt = kzalloc(pvt_sz, GFP_KERNEL);
90 		if (!pvt)
91 			goto free;
92 
93 		dev_ctl->pvt_info = pvt;
94 	}
95 
96 	dev_ctl->dev_idx	= device_index;
97 	dev_ctl->nr_instances	= nr_instances;
98 
99 	/* Default logging of CEs and UEs */
100 	dev_ctl->log_ce = 1;
101 	dev_ctl->log_ue = 1;
102 
103 	/* Name of this edac device */
104 	snprintf(dev_ctl->name, sizeof(dev_ctl->name),"%s", dev_name);
105 
106 	/* Initialize every Instance */
107 	for (instance = 0; instance < nr_instances; instance++) {
108 		inst = &dev_inst[instance];
109 		inst->ctl = dev_ctl;
110 		inst->nr_blocks = nr_blocks;
111 		blk_p = &dev_blk[instance * nr_blocks];
112 		inst->blocks = blk_p;
113 
114 		/* name of this instance */
115 		snprintf(inst->name, sizeof(inst->name), "%s%u", dev_name, instance);
116 
117 		/* Initialize every block in each instance */
118 		for (block = 0; block < nr_blocks; block++) {
119 			blk = &blk_p[block];
120 			blk->instance = inst;
121 			snprintf(blk->name, sizeof(blk->name),
122 				 "%s%d", blk_name, block + off_val);
123 
124 			edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
125 				 instance, inst, block, blk, blk->name);
126 		}
127 	}
128 
129 	/* Mark this instance as merely ALLOCATED */
130 	dev_ctl->op_state = OP_ALLOC;
131 
132 	/*
133 	 * Initialize the 'root' kobj for the edac_device controller
134 	 */
135 	err = edac_device_register_sysfs_main_kobj(dev_ctl);
136 	if (err)
137 		goto free;
138 
139 	/* at this point, the root kobj is valid, and in order to
140 	 * 'free' the object, then the function:
141 	 *	edac_device_unregister_sysfs_main_kobj() must be called
142 	 * which will perform kobj unregistration and the actual free
143 	 * will occur during the kobject callback operation
144 	 */
145 
146 	return dev_ctl;
147 
148 free:
149 	__edac_device_free_ctl_info(dev_ctl);
150 
151 	return NULL;
152 }
153 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
154 
155 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
156 {
157 	edac_device_unregister_sysfs_main_kobj(ctl_info);
158 }
159 EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
160 
161 /*
162  * find_edac_device_by_dev
163  *	scans the edac_device list for a specific 'struct device *'
164  *
165  *	lock to be held prior to call:	device_ctls_mutex
166  *
167  *	Return:
168  *		pointer to control structure managing 'dev'
169  *		NULL if not found on list
170  */
171 static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
172 {
173 	struct edac_device_ctl_info *edac_dev;
174 	struct list_head *item;
175 
176 	edac_dbg(0, "\n");
177 
178 	list_for_each(item, &edac_device_list) {
179 		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
180 
181 		if (edac_dev->dev == dev)
182 			return edac_dev;
183 	}
184 
185 	return NULL;
186 }
187 
188 /*
189  * add_edac_dev_to_global_list
190  *	Before calling this function, caller must
191  *	assign a unique value to edac_dev->dev_idx.
192  *
193  *	lock to be held prior to call:	device_ctls_mutex
194  *
195  *	Return:
196  *		0 on success
197  *		1 on failure.
198  */
199 static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
200 {
201 	struct list_head *item, *insert_before;
202 	struct edac_device_ctl_info *rover;
203 
204 	insert_before = &edac_device_list;
205 
206 	/* Determine if already on the list */
207 	rover = find_edac_device_by_dev(edac_dev->dev);
208 	if (unlikely(rover != NULL))
209 		goto fail0;
210 
211 	/* Insert in ascending order by 'dev_idx', so find position */
212 	list_for_each(item, &edac_device_list) {
213 		rover = list_entry(item, struct edac_device_ctl_info, link);
214 
215 		if (rover->dev_idx >= edac_dev->dev_idx) {
216 			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
217 				goto fail1;
218 
219 			insert_before = item;
220 			break;
221 		}
222 	}
223 
224 	list_add_tail_rcu(&edac_dev->link, insert_before);
225 	return 0;
226 
227 fail0:
228 	edac_printk(KERN_WARNING, EDAC_MC,
229 			"%s (%s) %s %s already assigned %d\n",
230 			dev_name(rover->dev), edac_dev_name(rover),
231 			rover->mod_name, rover->ctl_name, rover->dev_idx);
232 	return 1;
233 
234 fail1:
235 	edac_printk(KERN_WARNING, EDAC_MC,
236 			"bug in low-level driver: attempt to assign\n"
237 			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
238 			__func__);
239 	return 1;
240 }
241 
242 /*
243  * del_edac_device_from_global_list
244  */
245 static void del_edac_device_from_global_list(struct edac_device_ctl_info
246 						*edac_device)
247 {
248 	list_del_rcu(&edac_device->link);
249 
250 	/* these are for safe removal of devices from global list while
251 	 * NMI handlers may be traversing list
252 	 */
253 	synchronize_rcu();
254 	INIT_LIST_HEAD(&edac_device->link);
255 }
256 
257 /*
258  * edac_device_workq_function
259  *	performs the operation scheduled by a workq request
260  *
261  *	this workq is embedded within an edac_device_ctl_info
262  *	structure, that needs to be polled for possible error events.
263  *
264  *	This operation is to acquire the list mutex lock
265  *	(thus preventing insertation or deletion)
266  *	and then call the device's poll function IFF this device is
267  *	running polled and there is a poll function defined.
268  */
269 static void edac_device_workq_function(struct work_struct *work_req)
270 {
271 	struct delayed_work *d_work = to_delayed_work(work_req);
272 	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
273 
274 	mutex_lock(&device_ctls_mutex);
275 
276 	/* If we are being removed, bail out immediately */
277 	if (edac_dev->op_state == OP_OFFLINE) {
278 		mutex_unlock(&device_ctls_mutex);
279 		return;
280 	}
281 
282 	/* Only poll controllers that are running polled and have a check */
283 	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
284 		(edac_dev->edac_check != NULL)) {
285 			edac_dev->edac_check(edac_dev);
286 	}
287 
288 	mutex_unlock(&device_ctls_mutex);
289 
290 	/* Reschedule the workq for the next time period to start again
291 	 * if the number of msec is for 1 sec, then adjust to the next
292 	 * whole one second to save timers firing all over the period
293 	 * between integral seconds
294 	 */
295 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
296 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
297 	else
298 		edac_queue_work(&edac_dev->work, edac_dev->delay);
299 }
300 
301 /*
302  * edac_device_workq_setup
303  *	initialize a workq item for this edac_device instance
304  *	passing in the new delay period in msec
305  */
306 static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
307 				    unsigned msec)
308 {
309 	edac_dbg(0, "\n");
310 
311 	/* take the arg 'msec' and set it into the control structure
312 	 * to used in the time period calculation
313 	 * then calc the number of jiffies that represents
314 	 */
315 	edac_dev->poll_msec = msec;
316 	edac_dev->delay = msecs_to_jiffies(msec);
317 
318 	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
319 
320 	/* optimize here for the 1 second case, which will be normal value, to
321 	 * fire ON the 1 second time event. This helps reduce all sorts of
322 	 * timers firing on sub-second basis, while they are happy
323 	 * to fire together on the 1 second exactly
324 	 */
325 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
326 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
327 	else
328 		edac_queue_work(&edac_dev->work, edac_dev->delay);
329 }
330 
331 /*
332  * edac_device_workq_teardown
333  *	stop the workq processing on this edac_dev
334  */
335 static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
336 {
337 	if (!edac_dev->edac_check)
338 		return;
339 
340 	edac_dev->op_state = OP_OFFLINE;
341 
342 	edac_stop_work(&edac_dev->work);
343 }
344 
345 /*
346  * edac_device_reset_delay_period
347  *
348  *	need to stop any outstanding workq queued up at this time
349  *	because we will be resetting the sleep time.
350  *	Then restart the workq on the new delay
351  */
352 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
353 				    unsigned long msec)
354 {
355 	edac_dev->poll_msec = msec;
356 	edac_dev->delay	    = msecs_to_jiffies(msec);
357 
358 	/* See comment in edac_device_workq_setup() above */
359 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
360 		edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
361 	else
362 		edac_mod_work(&edac_dev->work, edac_dev->delay);
363 }
364 
365 int edac_device_alloc_index(void)
366 {
367 	static atomic_t device_indexes = ATOMIC_INIT(0);
368 
369 	return atomic_inc_return(&device_indexes) - 1;
370 }
371 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
372 
373 int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
374 {
375 	edac_dbg(0, "\n");
376 
377 #ifdef CONFIG_EDAC_DEBUG
378 	if (edac_debug_level >= 3)
379 		edac_device_dump_device(edac_dev);
380 #endif
381 	mutex_lock(&device_ctls_mutex);
382 
383 	if (add_edac_dev_to_global_list(edac_dev))
384 		goto fail0;
385 
386 	/* set load time so that error rate can be tracked */
387 	edac_dev->start_time = jiffies;
388 
389 	/* create this instance's sysfs entries */
390 	if (edac_device_create_sysfs(edac_dev)) {
391 		edac_device_printk(edac_dev, KERN_WARNING,
392 					"failed to create sysfs device\n");
393 		goto fail1;
394 	}
395 
396 	/* If there IS a check routine, then we are running POLLED */
397 	if (edac_dev->edac_check != NULL) {
398 		/* This instance is NOW RUNNING */
399 		edac_dev->op_state = OP_RUNNING_POLL;
400 
401 		edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
402 	} else {
403 		edac_dev->op_state = OP_RUNNING_INTERRUPT;
404 	}
405 
406 	/* Report action taken */
407 	edac_device_printk(edac_dev, KERN_INFO,
408 		"Giving out device to module %s controller %s: DEV %s (%s)\n",
409 		edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name,
410 		edac_op_state_to_string(edac_dev->op_state));
411 
412 	mutex_unlock(&device_ctls_mutex);
413 	return 0;
414 
415 fail1:
416 	/* Some error, so remove the entry from the lsit */
417 	del_edac_device_from_global_list(edac_dev);
418 
419 fail0:
420 	mutex_unlock(&device_ctls_mutex);
421 	return 1;
422 }
423 EXPORT_SYMBOL_GPL(edac_device_add_device);
424 
425 struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
426 {
427 	struct edac_device_ctl_info *edac_dev;
428 
429 	edac_dbg(0, "\n");
430 
431 	mutex_lock(&device_ctls_mutex);
432 
433 	/* Find the structure on the list, if not there, then leave */
434 	edac_dev = find_edac_device_by_dev(dev);
435 	if (edac_dev == NULL) {
436 		mutex_unlock(&device_ctls_mutex);
437 		return NULL;
438 	}
439 
440 	/* mark this instance as OFFLINE */
441 	edac_dev->op_state = OP_OFFLINE;
442 
443 	/* deregister from global list */
444 	del_edac_device_from_global_list(edac_dev);
445 
446 	mutex_unlock(&device_ctls_mutex);
447 
448 	/* clear workq processing on this instance */
449 	edac_device_workq_teardown(edac_dev);
450 
451 	/* Tear down the sysfs entries for this instance */
452 	edac_device_remove_sysfs(edac_dev);
453 
454 	edac_printk(KERN_INFO, EDAC_MC,
455 		"Removed device %d for %s %s: DEV %s\n",
456 		edac_dev->dev_idx,
457 		edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
458 
459 	return edac_dev;
460 }
461 EXPORT_SYMBOL_GPL(edac_device_del_device);
462 
463 static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
464 {
465 	return edac_dev->log_ce;
466 }
467 
468 static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
469 {
470 	return edac_dev->log_ue;
471 }
472 
473 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
474 					*edac_dev)
475 {
476 	return edac_dev->panic_on_ue;
477 }
478 
479 void edac_device_handle_ce_count(struct edac_device_ctl_info *edac_dev,
480 				 unsigned int count, int inst_nr, int block_nr,
481 				 const char *msg)
482 {
483 	struct edac_device_instance *instance;
484 	struct edac_device_block *block = NULL;
485 
486 	if (!count)
487 		return;
488 
489 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
490 		edac_device_printk(edac_dev, KERN_ERR,
491 				"INTERNAL ERROR: 'instance' out of range "
492 				"(%d >= %d)\n", inst_nr,
493 				edac_dev->nr_instances);
494 		return;
495 	}
496 
497 	instance = edac_dev->instances + inst_nr;
498 
499 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
500 		edac_device_printk(edac_dev, KERN_ERR,
501 				"INTERNAL ERROR: instance %d 'block' "
502 				"out of range (%d >= %d)\n",
503 				inst_nr, block_nr,
504 				instance->nr_blocks);
505 		return;
506 	}
507 
508 	if (instance->nr_blocks > 0) {
509 		block = instance->blocks + block_nr;
510 		block->counters.ce_count += count;
511 	}
512 
513 	/* Propagate the count up the 'totals' tree */
514 	instance->counters.ce_count += count;
515 	edac_dev->counters.ce_count += count;
516 
517 	if (edac_device_get_log_ce(edac_dev))
518 		edac_device_printk(edac_dev, KERN_WARNING,
519 				   "CE: %s instance: %s block: %s count: %d '%s'\n",
520 				   edac_dev->ctl_name, instance->name,
521 				   block ? block->name : "N/A", count, msg);
522 }
523 EXPORT_SYMBOL_GPL(edac_device_handle_ce_count);
524 
525 void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
526 				 unsigned int count, int inst_nr, int block_nr,
527 				 const char *msg)
528 {
529 	struct edac_device_instance *instance;
530 	struct edac_device_block *block = NULL;
531 
532 	if (!count)
533 		return;
534 
535 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
536 		edac_device_printk(edac_dev, KERN_ERR,
537 				"INTERNAL ERROR: 'instance' out of range "
538 				"(%d >= %d)\n", inst_nr,
539 				edac_dev->nr_instances);
540 		return;
541 	}
542 
543 	instance = edac_dev->instances + inst_nr;
544 
545 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
546 		edac_device_printk(edac_dev, KERN_ERR,
547 				"INTERNAL ERROR: instance %d 'block' "
548 				"out of range (%d >= %d)\n",
549 				inst_nr, block_nr,
550 				instance->nr_blocks);
551 		return;
552 	}
553 
554 	if (instance->nr_blocks > 0) {
555 		block = instance->blocks + block_nr;
556 		block->counters.ue_count += count;
557 	}
558 
559 	/* Propagate the count up the 'totals' tree */
560 	instance->counters.ue_count += count;
561 	edac_dev->counters.ue_count += count;
562 
563 	if (edac_device_get_log_ue(edac_dev))
564 		edac_device_printk(edac_dev, KERN_EMERG,
565 				   "UE: %s instance: %s block: %s count: %d '%s'\n",
566 				   edac_dev->ctl_name, instance->name,
567 				   block ? block->name : "N/A", count, msg);
568 
569 	if (edac_device_get_panic_on_ue(edac_dev))
570 		panic("EDAC %s: UE instance: %s block %s count: %d '%s'\n",
571 		      edac_dev->ctl_name, instance->name,
572 		      block ? block->name : "N/A", count, msg);
573 }
574 EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
575 
576 static void edac_dev_release(struct device *dev)
577 {
578 	struct edac_dev_feat_ctx *ctx = container_of(dev, struct edac_dev_feat_ctx, dev);
579 
580 	kfree(ctx->mem_repair);
581 	kfree(ctx->scrub);
582 	kfree(ctx->dev.groups);
583 	kfree(ctx);
584 }
585 
586 static const struct device_type edac_dev_type = {
587 	.name = "edac_dev",
588 	.release = edac_dev_release,
589 };
590 
591 static void edac_dev_unreg(void *data)
592 {
593 	device_unregister(data);
594 }
595 
596 /**
597  * edac_dev_register - register device for RAS features with EDAC
598  * @parent: parent device.
599  * @name: name for the folder in the /sys/bus/edac/devices/,
600  *	  which is derived from the parent device.
601  *	  For e.g. /sys/bus/edac/devices/cxl_mem0/
602  * @private: parent driver's data to store in the context if any.
603  * @num_features: number of RAS features to register.
604  * @ras_features: list of RAS features to register.
605  *
606  * Return:
607  *  * %0       - Success.
608  *  * %-EINVAL - Invalid parameters passed.
609  *  * %-ENOMEM - Dynamic memory allocation failed.
610  *
611  */
612 int edac_dev_register(struct device *parent, char *name,
613 		      void *private, int num_features,
614 		      const struct edac_dev_feature *ras_features)
615 {
616 	const struct attribute_group **ras_attr_groups;
617 	struct edac_dev_data *dev_data;
618 	struct edac_dev_feat_ctx *ctx;
619 	int mem_repair_cnt = 0;
620 	int attr_gcnt = 0;
621 	int ret = -ENOMEM;
622 	int scrub_cnt = 0;
623 	int feat;
624 
625 	if (!parent || !name || !num_features || !ras_features)
626 		return -EINVAL;
627 
628 	/* Double parse to make space for attributes */
629 	for (feat = 0; feat < num_features; feat++) {
630 		switch (ras_features[feat].ft_type) {
631 		case RAS_FEAT_SCRUB:
632 			attr_gcnt++;
633 			scrub_cnt++;
634 			break;
635 		case RAS_FEAT_ECS:
636 			attr_gcnt += ras_features[feat].ecs_info.num_media_frus;
637 			break;
638 		case RAS_FEAT_MEM_REPAIR:
639 			attr_gcnt++;
640 			mem_repair_cnt++;
641 			break;
642 		default:
643 			return -EINVAL;
644 		}
645 	}
646 
647 	ctx = kzalloc_obj(*ctx, GFP_KERNEL);
648 	if (!ctx)
649 		return -ENOMEM;
650 
651 	ras_attr_groups = kzalloc_objs(*ras_attr_groups, attr_gcnt + 1,
652 				       GFP_KERNEL);
653 	if (!ras_attr_groups)
654 		goto ctx_free;
655 
656 	if (scrub_cnt) {
657 		ctx->scrub = kzalloc_objs(*ctx->scrub, scrub_cnt, GFP_KERNEL);
658 		if (!ctx->scrub)
659 			goto groups_free;
660 	}
661 
662 	if (mem_repair_cnt) {
663 		ctx->mem_repair = kzalloc_objs(*ctx->mem_repair, mem_repair_cnt,
664 					       GFP_KERNEL);
665 		if (!ctx->mem_repair)
666 			goto data_mem_free;
667 	}
668 
669 	attr_gcnt = 0;
670 	scrub_cnt = 0;
671 	mem_repair_cnt = 0;
672 	for (feat = 0; feat < num_features; feat++, ras_features++) {
673 		switch (ras_features->ft_type) {
674 		case RAS_FEAT_SCRUB:
675 			if (!ras_features->scrub_ops || scrub_cnt != ras_features->instance) {
676 				ret = -EINVAL;
677 				goto data_mem_free;
678 			}
679 
680 			dev_data = &ctx->scrub[scrub_cnt];
681 			dev_data->instance = scrub_cnt;
682 			dev_data->scrub_ops = ras_features->scrub_ops;
683 			dev_data->private = ras_features->ctx;
684 			ret = edac_scrub_get_desc(parent, &ras_attr_groups[attr_gcnt],
685 						  ras_features->instance);
686 			if (ret)
687 				goto data_mem_free;
688 
689 			scrub_cnt++;
690 			attr_gcnt++;
691 			break;
692 		case RAS_FEAT_ECS:
693 			if (!ras_features->ecs_ops) {
694 				ret = -EINVAL;
695 				goto data_mem_free;
696 			}
697 
698 			dev_data = &ctx->ecs;
699 			dev_data->ecs_ops = ras_features->ecs_ops;
700 			dev_data->private = ras_features->ctx;
701 			ret = edac_ecs_get_desc(parent, &ras_attr_groups[attr_gcnt],
702 						ras_features->ecs_info.num_media_frus);
703 			if (ret)
704 				goto data_mem_free;
705 
706 			attr_gcnt += ras_features->ecs_info.num_media_frus;
707 			break;
708 		case RAS_FEAT_MEM_REPAIR:
709 			if (!ras_features->mem_repair_ops ||
710 			    mem_repair_cnt != ras_features->instance) {
711 				ret = -EINVAL;
712 				goto data_mem_free;
713 			}
714 
715 			dev_data = &ctx->mem_repair[mem_repair_cnt];
716 			dev_data->instance = mem_repair_cnt;
717 			dev_data->mem_repair_ops = ras_features->mem_repair_ops;
718 			dev_data->private = ras_features->ctx;
719 			ret = edac_mem_repair_get_desc(parent, &ras_attr_groups[attr_gcnt],
720 						       ras_features->instance);
721 			if (ret)
722 				goto data_mem_free;
723 
724 			mem_repair_cnt++;
725 			attr_gcnt++;
726 			break;
727 		default:
728 			ret = -EINVAL;
729 			goto data_mem_free;
730 		}
731 	}
732 
733 	ctx->dev.parent = parent;
734 	ctx->dev.bus = edac_get_sysfs_subsys();
735 	ctx->dev.type = &edac_dev_type;
736 	ctx->dev.groups = ras_attr_groups;
737 	ctx->private = private;
738 	dev_set_drvdata(&ctx->dev, ctx);
739 
740 	ret = dev_set_name(&ctx->dev, "%s", name);
741 	if (ret)
742 		goto data_mem_free;
743 
744 	ret = device_register(&ctx->dev);
745 	if (ret) {
746 		put_device(&ctx->dev);
747 		return ret;
748 	}
749 
750 	return devm_add_action_or_reset(parent, edac_dev_unreg, &ctx->dev);
751 
752 data_mem_free:
753 	kfree(ctx->mem_repair);
754 	kfree(ctx->scrub);
755 groups_free:
756 	kfree(ras_attr_groups);
757 ctx_free:
758 	kfree(ctx);
759 	return ret;
760 }
761 EXPORT_SYMBOL_GPL(edac_dev_register);
762