xref: /linux/drivers/edac/edac_device.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 
2 /*
3  * edac_device.c
4  * (C) 2007 www.douglaskthompson.com
5  *
6  * This file may be distributed under the terms of the
7  * GNU General Public License.
8  *
9  * Written by Doug Thompson <norsk5@xmission.com>
10  *
11  * edac_device API implementation
12  * 19 Jan 2007
13  */
14 
15 #include <asm/page.h>
16 #include <linux/uaccess.h>
17 #include <linux/ctype.h>
18 #include <linux/highmem.h>
19 #include <linux/init.h>
20 #include <linux/jiffies.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/spinlock.h>
25 #include <linux/sysctl.h>
26 #include <linux/timer.h>
27 
28 #include "edac_device.h"
29 #include "edac_module.h"
30 
31 /* lock for the list: 'edac_device_list', manipulation of this list
32  * is protected by the 'device_ctls_mutex' lock
33  */
34 static DEFINE_MUTEX(device_ctls_mutex);
35 static LIST_HEAD(edac_device_list);
36 
37 /* Default workqueue processing interval on this instance, in msecs */
38 #define DEFAULT_POLL_INTERVAL 1000
39 
40 #ifdef CONFIG_EDAC_DEBUG
edac_device_dump_device(struct edac_device_ctl_info * edac_dev)41 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42 {
43 	edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
44 		 edac_dev, edac_dev->dev_idx);
45 	edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
46 	edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
47 	edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
48 		 edac_dev->mod_name, edac_dev->ctl_name);
49 	edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
50 }
51 #endif				/* CONFIG_EDAC_DEBUG */
52 
53 /*
54  * @off_val: zero, 1, or other based offset
55  */
56 struct edac_device_ctl_info *
edac_device_alloc_ctl_info(unsigned pvt_sz,char * dev_name,unsigned nr_instances,char * blk_name,unsigned nr_blocks,unsigned off_val,int device_index)57 edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances,
58 			   char *blk_name, unsigned nr_blocks, unsigned off_val,
59 			   int device_index)
60 {
61 	struct edac_device_block *dev_blk, *blk_p, *blk;
62 	struct edac_device_instance *dev_inst, *inst;
63 	struct edac_device_ctl_info *dev_ctl;
64 	unsigned instance, block;
65 	void *pvt;
66 	int err;
67 
68 	edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
69 
70 	dev_ctl = kzalloc_obj(struct edac_device_ctl_info);
71 	if (!dev_ctl)
72 		return NULL;
73 
74 	dev_inst = kzalloc_objs(struct edac_device_instance, nr_instances);
75 	if (!dev_inst)
76 		goto free;
77 
78 	dev_ctl->instances = dev_inst;
79 
80 	dev_blk = kzalloc_objs(struct edac_device_block,
81 			       nr_instances * nr_blocks);
82 	if (!dev_blk)
83 		goto free;
84 
85 	dev_ctl->blocks = dev_blk;
86 
87 	if (pvt_sz) {
88 		pvt = kzalloc(pvt_sz, GFP_KERNEL);
89 		if (!pvt)
90 			goto free;
91 
92 		dev_ctl->pvt_info = pvt;
93 	}
94 
95 	dev_ctl->dev_idx	= device_index;
96 	dev_ctl->nr_instances	= nr_instances;
97 
98 	/* Default logging of CEs and UEs */
99 	dev_ctl->log_ce = 1;
100 	dev_ctl->log_ue = 1;
101 
102 	/* Name of this edac device */
103 	snprintf(dev_ctl->name, sizeof(dev_ctl->name),"%s", dev_name);
104 
105 	/* Initialize every Instance */
106 	for (instance = 0; instance < nr_instances; instance++) {
107 		inst = &dev_inst[instance];
108 		inst->ctl = dev_ctl;
109 		inst->nr_blocks = nr_blocks;
110 		blk_p = &dev_blk[instance * nr_blocks];
111 		inst->blocks = blk_p;
112 
113 		/* name of this instance */
114 		snprintf(inst->name, sizeof(inst->name), "%s%u", dev_name, instance);
115 
116 		/* Initialize every block in each instance */
117 		for (block = 0; block < nr_blocks; block++) {
118 			blk = &blk_p[block];
119 			blk->instance = inst;
120 			snprintf(blk->name, sizeof(blk->name),
121 				 "%s%d", blk_name, block + off_val);
122 
123 			edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
124 				 instance, inst, block, blk, blk->name);
125 		}
126 	}
127 
128 	/* Mark this instance as merely ALLOCATED */
129 	dev_ctl->op_state = OP_ALLOC;
130 
131 	/*
132 	 * Initialize the 'root' kobj for the edac_device controller
133 	 */
134 	err = edac_device_register_sysfs_main_kobj(dev_ctl);
135 	if (err)
136 		goto free;
137 
138 	/* at this point, the root kobj is valid, and in order to
139 	 * 'free' the object, then the function:
140 	 *	edac_device_unregister_sysfs_main_kobj() must be called
141 	 * which will perform kobj unregistration and the actual free
142 	 * will occur during the kobject callback operation
143 	 */
144 
145 	return dev_ctl;
146 
147 free:
148 	__edac_device_free_ctl_info(dev_ctl);
149 
150 	return NULL;
151 }
152 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
153 
edac_device_free_ctl_info(struct edac_device_ctl_info * ctl_info)154 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
155 {
156 	edac_device_unregister_sysfs_main_kobj(ctl_info);
157 }
158 EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
159 
160 /*
161  * find_edac_device_by_dev
162  *	scans the edac_device list for a specific 'struct device *'
163  *
164  *	lock to be held prior to call:	device_ctls_mutex
165  *
166  *	Return:
167  *		pointer to control structure managing 'dev'
168  *		NULL if not found on list
169  */
find_edac_device_by_dev(struct device * dev)170 static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
171 {
172 	struct edac_device_ctl_info *edac_dev;
173 	struct list_head *item;
174 
175 	edac_dbg(0, "\n");
176 
177 	list_for_each(item, &edac_device_list) {
178 		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
179 
180 		if (edac_dev->dev == dev)
181 			return edac_dev;
182 	}
183 
184 	return NULL;
185 }
186 
187 /*
188  * add_edac_dev_to_global_list
189  *	Before calling this function, caller must
190  *	assign a unique value to edac_dev->dev_idx.
191  *
192  *	lock to be held prior to call:	device_ctls_mutex
193  *
194  *	Return:
195  *		0 on success
196  *		1 on failure.
197  */
add_edac_dev_to_global_list(struct edac_device_ctl_info * edac_dev)198 static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
199 {
200 	struct list_head *item, *insert_before;
201 	struct edac_device_ctl_info *rover;
202 
203 	insert_before = &edac_device_list;
204 
205 	/* Determine if already on the list */
206 	rover = find_edac_device_by_dev(edac_dev->dev);
207 	if (unlikely(rover != NULL))
208 		goto fail0;
209 
210 	/* Insert in ascending order by 'dev_idx', so find position */
211 	list_for_each(item, &edac_device_list) {
212 		rover = list_entry(item, struct edac_device_ctl_info, link);
213 
214 		if (rover->dev_idx >= edac_dev->dev_idx) {
215 			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
216 				goto fail1;
217 
218 			insert_before = item;
219 			break;
220 		}
221 	}
222 
223 	list_add_tail_rcu(&edac_dev->link, insert_before);
224 	return 0;
225 
226 fail0:
227 	edac_printk(KERN_WARNING, EDAC_MC,
228 			"%s (%s) %s %s already assigned %d\n",
229 			dev_name(rover->dev), edac_dev_name(rover),
230 			rover->mod_name, rover->ctl_name, rover->dev_idx);
231 	return 1;
232 
233 fail1:
234 	edac_printk(KERN_WARNING, EDAC_MC,
235 			"bug in low-level driver: attempt to assign\n"
236 			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
237 			__func__);
238 	return 1;
239 }
240 
241 /*
242  * del_edac_device_from_global_list
243  */
del_edac_device_from_global_list(struct edac_device_ctl_info * edac_device)244 static void del_edac_device_from_global_list(struct edac_device_ctl_info
245 						*edac_device)
246 {
247 	list_del_rcu(&edac_device->link);
248 
249 	/* these are for safe removal of devices from global list while
250 	 * NMI handlers may be traversing list
251 	 */
252 	synchronize_rcu();
253 	INIT_LIST_HEAD(&edac_device->link);
254 }
255 
256 /*
257  * edac_device_workq_function
258  *	performs the operation scheduled by a workq request
259  *
260  *	this workq is embedded within an edac_device_ctl_info
261  *	structure, that needs to be polled for possible error events.
262  *
263  *	This operation is to acquire the list mutex lock
264  *	(thus preventing insertation or deletion)
265  *	and then call the device's poll function IFF this device is
266  *	running polled and there is a poll function defined.
267  */
edac_device_workq_function(struct work_struct * work_req)268 static void edac_device_workq_function(struct work_struct *work_req)
269 {
270 	struct delayed_work *d_work = to_delayed_work(work_req);
271 	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
272 
273 	mutex_lock(&device_ctls_mutex);
274 
275 	/* If we are being removed, bail out immediately */
276 	if (edac_dev->op_state == OP_OFFLINE) {
277 		mutex_unlock(&device_ctls_mutex);
278 		return;
279 	}
280 
281 	/* Only poll controllers that are running polled and have a check */
282 	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
283 		(edac_dev->edac_check != NULL)) {
284 			edac_dev->edac_check(edac_dev);
285 	}
286 
287 	mutex_unlock(&device_ctls_mutex);
288 
289 	/* Reschedule the workq for the next time period to start again
290 	 * if the number of msec is for 1 sec, then adjust to the next
291 	 * whole one second to save timers firing all over the period
292 	 * between integral seconds
293 	 */
294 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
295 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
296 	else
297 		edac_queue_work(&edac_dev->work, edac_dev->delay);
298 }
299 
300 /*
301  * edac_device_workq_setup
302  *	initialize a workq item for this edac_device instance
303  *	passing in the new delay period in msec
304  */
edac_device_workq_setup(struct edac_device_ctl_info * edac_dev,unsigned msec)305 static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
306 				    unsigned msec)
307 {
308 	edac_dbg(0, "\n");
309 
310 	/* take the arg 'msec' and set it into the control structure
311 	 * to used in the time period calculation
312 	 * then calc the number of jiffies that represents
313 	 */
314 	edac_dev->poll_msec = msec;
315 	edac_dev->delay = msecs_to_jiffies(msec);
316 
317 	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
318 
319 	/* optimize here for the 1 second case, which will be normal value, to
320 	 * fire ON the 1 second time event. This helps reduce all sorts of
321 	 * timers firing on sub-second basis, while they are happy
322 	 * to fire together on the 1 second exactly
323 	 */
324 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
325 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
326 	else
327 		edac_queue_work(&edac_dev->work, edac_dev->delay);
328 }
329 
330 /*
331  * edac_device_workq_teardown
332  *	stop the workq processing on this edac_dev
333  */
edac_device_workq_teardown(struct edac_device_ctl_info * edac_dev)334 static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
335 {
336 	if (!edac_dev->edac_check)
337 		return;
338 
339 	edac_dev->op_state = OP_OFFLINE;
340 
341 	edac_stop_work(&edac_dev->work);
342 }
343 
344 /*
345  * edac_device_reset_delay_period
346  *
347  *	need to stop any outstanding workq queued up at this time
348  *	because we will be resetting the sleep time.
349  *	Then restart the workq on the new delay
350  */
edac_device_reset_delay_period(struct edac_device_ctl_info * edac_dev,unsigned long msec)351 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
352 				    unsigned long msec)
353 {
354 	edac_dev->poll_msec = msec;
355 	edac_dev->delay	    = msecs_to_jiffies(msec);
356 
357 	/* See comment in edac_device_workq_setup() above */
358 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
359 		edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
360 	else
361 		edac_mod_work(&edac_dev->work, edac_dev->delay);
362 }
363 
edac_device_alloc_index(void)364 int edac_device_alloc_index(void)
365 {
366 	static atomic_t device_indexes = ATOMIC_INIT(0);
367 
368 	return atomic_inc_return(&device_indexes) - 1;
369 }
370 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
371 
edac_device_add_device(struct edac_device_ctl_info * edac_dev)372 int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
373 {
374 	edac_dbg(0, "\n");
375 
376 #ifdef CONFIG_EDAC_DEBUG
377 	if (edac_debug_level >= 3)
378 		edac_device_dump_device(edac_dev);
379 #endif
380 	mutex_lock(&device_ctls_mutex);
381 
382 	if (add_edac_dev_to_global_list(edac_dev))
383 		goto fail0;
384 
385 	/* set load time so that error rate can be tracked */
386 	edac_dev->start_time = jiffies;
387 
388 	/* create this instance's sysfs entries */
389 	if (edac_device_create_sysfs(edac_dev)) {
390 		edac_device_printk(edac_dev, KERN_WARNING,
391 					"failed to create sysfs device\n");
392 		goto fail1;
393 	}
394 
395 	/* If there IS a check routine, then we are running POLLED */
396 	if (edac_dev->edac_check != NULL) {
397 		/* This instance is NOW RUNNING */
398 		edac_dev->op_state = OP_RUNNING_POLL;
399 
400 		edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
401 	} else {
402 		edac_dev->op_state = OP_RUNNING_INTERRUPT;
403 	}
404 
405 	/* Report action taken */
406 	edac_device_printk(edac_dev, KERN_INFO,
407 		"Giving out device to module %s controller %s: DEV %s (%s)\n",
408 		edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name,
409 		edac_op_state_to_string(edac_dev->op_state));
410 
411 	mutex_unlock(&device_ctls_mutex);
412 	return 0;
413 
414 fail1:
415 	/* Some error, so remove the entry from the lsit */
416 	del_edac_device_from_global_list(edac_dev);
417 
418 fail0:
419 	mutex_unlock(&device_ctls_mutex);
420 	return 1;
421 }
422 EXPORT_SYMBOL_GPL(edac_device_add_device);
423 
edac_device_del_device(struct device * dev)424 struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
425 {
426 	struct edac_device_ctl_info *edac_dev;
427 
428 	edac_dbg(0, "\n");
429 
430 	mutex_lock(&device_ctls_mutex);
431 
432 	/* Find the structure on the list, if not there, then leave */
433 	edac_dev = find_edac_device_by_dev(dev);
434 	if (edac_dev == NULL) {
435 		mutex_unlock(&device_ctls_mutex);
436 		return NULL;
437 	}
438 
439 	/* mark this instance as OFFLINE */
440 	edac_dev->op_state = OP_OFFLINE;
441 
442 	/* deregister from global list */
443 	del_edac_device_from_global_list(edac_dev);
444 
445 	mutex_unlock(&device_ctls_mutex);
446 
447 	/* clear workq processing on this instance */
448 	edac_device_workq_teardown(edac_dev);
449 
450 	/* Tear down the sysfs entries for this instance */
451 	edac_device_remove_sysfs(edac_dev);
452 
453 	edac_printk(KERN_INFO, EDAC_MC,
454 		"Removed device %d for %s %s: DEV %s\n",
455 		edac_dev->dev_idx,
456 		edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
457 
458 	return edac_dev;
459 }
460 EXPORT_SYMBOL_GPL(edac_device_del_device);
461 
edac_device_get_log_ce(struct edac_device_ctl_info * edac_dev)462 static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
463 {
464 	return edac_dev->log_ce;
465 }
466 
edac_device_get_log_ue(struct edac_device_ctl_info * edac_dev)467 static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
468 {
469 	return edac_dev->log_ue;
470 }
471 
edac_device_get_panic_on_ue(struct edac_device_ctl_info * edac_dev)472 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
473 					*edac_dev)
474 {
475 	return edac_dev->panic_on_ue;
476 }
477 
edac_device_handle_ce_count(struct edac_device_ctl_info * edac_dev,unsigned int count,int inst_nr,int block_nr,const char * msg)478 void edac_device_handle_ce_count(struct edac_device_ctl_info *edac_dev,
479 				 unsigned int count, int inst_nr, int block_nr,
480 				 const char *msg)
481 {
482 	struct edac_device_instance *instance;
483 	struct edac_device_block *block = NULL;
484 
485 	if (!count)
486 		return;
487 
488 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
489 		edac_device_printk(edac_dev, KERN_ERR,
490 				"INTERNAL ERROR: 'instance' out of range "
491 				"(%d >= %d)\n", inst_nr,
492 				edac_dev->nr_instances);
493 		return;
494 	}
495 
496 	instance = edac_dev->instances + inst_nr;
497 
498 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
499 		edac_device_printk(edac_dev, KERN_ERR,
500 				"INTERNAL ERROR: instance %d 'block' "
501 				"out of range (%d >= %d)\n",
502 				inst_nr, block_nr,
503 				instance->nr_blocks);
504 		return;
505 	}
506 
507 	if (instance->nr_blocks > 0) {
508 		block = instance->blocks + block_nr;
509 		block->counters.ce_count += count;
510 	}
511 
512 	/* Propagate the count up the 'totals' tree */
513 	instance->counters.ce_count += count;
514 	edac_dev->counters.ce_count += count;
515 
516 	if (edac_device_get_log_ce(edac_dev))
517 		edac_device_printk(edac_dev, KERN_WARNING,
518 				   "CE: %s instance: %s block: %s count: %d '%s'\n",
519 				   edac_dev->ctl_name, instance->name,
520 				   block ? block->name : "N/A", count, msg);
521 }
522 EXPORT_SYMBOL_GPL(edac_device_handle_ce_count);
523 
edac_device_handle_ue_count(struct edac_device_ctl_info * edac_dev,unsigned int count,int inst_nr,int block_nr,const char * msg)524 void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
525 				 unsigned int count, int inst_nr, int block_nr,
526 				 const char *msg)
527 {
528 	struct edac_device_instance *instance;
529 	struct edac_device_block *block = NULL;
530 
531 	if (!count)
532 		return;
533 
534 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
535 		edac_device_printk(edac_dev, KERN_ERR,
536 				"INTERNAL ERROR: 'instance' out of range "
537 				"(%d >= %d)\n", inst_nr,
538 				edac_dev->nr_instances);
539 		return;
540 	}
541 
542 	instance = edac_dev->instances + inst_nr;
543 
544 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
545 		edac_device_printk(edac_dev, KERN_ERR,
546 				"INTERNAL ERROR: instance %d 'block' "
547 				"out of range (%d >= %d)\n",
548 				inst_nr, block_nr,
549 				instance->nr_blocks);
550 		return;
551 	}
552 
553 	if (instance->nr_blocks > 0) {
554 		block = instance->blocks + block_nr;
555 		block->counters.ue_count += count;
556 	}
557 
558 	/* Propagate the count up the 'totals' tree */
559 	instance->counters.ue_count += count;
560 	edac_dev->counters.ue_count += count;
561 
562 	if (edac_device_get_log_ue(edac_dev))
563 		edac_device_printk(edac_dev, KERN_EMERG,
564 				   "UE: %s instance: %s block: %s count: %d '%s'\n",
565 				   edac_dev->ctl_name, instance->name,
566 				   block ? block->name : "N/A", count, msg);
567 
568 	if (edac_device_get_panic_on_ue(edac_dev))
569 		panic("EDAC %s: UE instance: %s block %s count: %d '%s'\n",
570 		      edac_dev->ctl_name, instance->name,
571 		      block ? block->name : "N/A", count, msg);
572 }
573 EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
574 
edac_dev_release(struct device * dev)575 static void edac_dev_release(struct device *dev)
576 {
577 	struct edac_dev_feat_ctx *ctx = container_of(dev, struct edac_dev_feat_ctx, dev);
578 
579 	kfree(ctx->mem_repair);
580 	kfree(ctx->scrub);
581 	kfree(ctx->dev.groups);
582 	kfree(ctx);
583 }
584 
585 static const struct device_type edac_dev_type = {
586 	.name = "edac_dev",
587 	.release = edac_dev_release,
588 };
589 
edac_dev_unreg(void * data)590 static void edac_dev_unreg(void *data)
591 {
592 	device_unregister(data);
593 }
594 
595 /**
596  * edac_dev_register - register device for RAS features with EDAC
597  * @parent: parent device.
598  * @name: name for the folder in the /sys/bus/edac/devices/,
599  *	  which is derived from the parent device.
600  *	  For e.g. /sys/bus/edac/devices/cxl_mem0/
601  * @private: parent driver's data to store in the context if any.
602  * @num_features: number of RAS features to register.
603  * @ras_features: list of RAS features to register.
604  *
605  * Return:
606  *  * %0       - Success.
607  *  * %-EINVAL - Invalid parameters passed.
608  *  * %-ENOMEM - Dynamic memory allocation failed.
609  *
610  */
edac_dev_register(struct device * parent,char * name,void * private,int num_features,const struct edac_dev_feature * ras_features)611 int edac_dev_register(struct device *parent, char *name,
612 		      void *private, int num_features,
613 		      const struct edac_dev_feature *ras_features)
614 {
615 	const struct attribute_group **ras_attr_groups;
616 	struct edac_dev_data *dev_data;
617 	struct edac_dev_feat_ctx *ctx;
618 	int mem_repair_cnt = 0;
619 	int attr_gcnt = 0;
620 	int ret = -ENOMEM;
621 	int scrub_cnt = 0;
622 	int feat;
623 
624 	if (!parent || !name || !num_features || !ras_features)
625 		return -EINVAL;
626 
627 	/* Double parse to make space for attributes */
628 	for (feat = 0; feat < num_features; feat++) {
629 		switch (ras_features[feat].ft_type) {
630 		case RAS_FEAT_SCRUB:
631 			attr_gcnt++;
632 			scrub_cnt++;
633 			break;
634 		case RAS_FEAT_ECS:
635 			attr_gcnt += ras_features[feat].ecs_info.num_media_frus;
636 			break;
637 		case RAS_FEAT_MEM_REPAIR:
638 			attr_gcnt++;
639 			mem_repair_cnt++;
640 			break;
641 		default:
642 			return -EINVAL;
643 		}
644 	}
645 
646 	ctx = kzalloc_obj(*ctx);
647 	if (!ctx)
648 		return -ENOMEM;
649 
650 	ras_attr_groups = kzalloc_objs(*ras_attr_groups, attr_gcnt + 1);
651 	if (!ras_attr_groups)
652 		goto ctx_free;
653 
654 	if (scrub_cnt) {
655 		ctx->scrub = kzalloc_objs(*ctx->scrub, scrub_cnt);
656 		if (!ctx->scrub)
657 			goto groups_free;
658 	}
659 
660 	if (mem_repair_cnt) {
661 		ctx->mem_repair = kzalloc_objs(*ctx->mem_repair, mem_repair_cnt);
662 		if (!ctx->mem_repair)
663 			goto data_mem_free;
664 	}
665 
666 	attr_gcnt = 0;
667 	scrub_cnt = 0;
668 	mem_repair_cnt = 0;
669 	for (feat = 0; feat < num_features; feat++, ras_features++) {
670 		switch (ras_features->ft_type) {
671 		case RAS_FEAT_SCRUB:
672 			if (!ras_features->scrub_ops || scrub_cnt != ras_features->instance) {
673 				ret = -EINVAL;
674 				goto data_mem_free;
675 			}
676 
677 			dev_data = &ctx->scrub[scrub_cnt];
678 			dev_data->instance = scrub_cnt;
679 			dev_data->scrub_ops = ras_features->scrub_ops;
680 			dev_data->private = ras_features->ctx;
681 			ret = edac_scrub_get_desc(parent, &ras_attr_groups[attr_gcnt],
682 						  ras_features->instance);
683 			if (ret)
684 				goto data_mem_free;
685 
686 			scrub_cnt++;
687 			attr_gcnt++;
688 			break;
689 		case RAS_FEAT_ECS:
690 			if (!ras_features->ecs_ops) {
691 				ret = -EINVAL;
692 				goto data_mem_free;
693 			}
694 
695 			dev_data = &ctx->ecs;
696 			dev_data->ecs_ops = ras_features->ecs_ops;
697 			dev_data->private = ras_features->ctx;
698 			ret = edac_ecs_get_desc(parent, &ras_attr_groups[attr_gcnt],
699 						ras_features->ecs_info.num_media_frus);
700 			if (ret)
701 				goto data_mem_free;
702 
703 			attr_gcnt += ras_features->ecs_info.num_media_frus;
704 			break;
705 		case RAS_FEAT_MEM_REPAIR:
706 			if (!ras_features->mem_repair_ops ||
707 			    mem_repair_cnt != ras_features->instance) {
708 				ret = -EINVAL;
709 				goto data_mem_free;
710 			}
711 
712 			dev_data = &ctx->mem_repair[mem_repair_cnt];
713 			dev_data->instance = mem_repair_cnt;
714 			dev_data->mem_repair_ops = ras_features->mem_repair_ops;
715 			dev_data->private = ras_features->ctx;
716 			ret = edac_mem_repair_get_desc(parent, &ras_attr_groups[attr_gcnt],
717 						       ras_features->instance);
718 			if (ret)
719 				goto data_mem_free;
720 
721 			mem_repair_cnt++;
722 			attr_gcnt++;
723 			break;
724 		default:
725 			ret = -EINVAL;
726 			goto data_mem_free;
727 		}
728 	}
729 
730 	ctx->dev.parent = parent;
731 	ctx->dev.bus = edac_get_sysfs_subsys();
732 	ctx->dev.type = &edac_dev_type;
733 	ctx->dev.groups = ras_attr_groups;
734 	ctx->private = private;
735 	dev_set_drvdata(&ctx->dev, ctx);
736 
737 	ret = dev_set_name(&ctx->dev, "%s", name);
738 	if (ret)
739 		goto data_mem_free;
740 
741 	ret = device_register(&ctx->dev);
742 	if (ret) {
743 		put_device(&ctx->dev);
744 		return ret;
745 	}
746 
747 	return devm_add_action_or_reset(parent, edac_dev_unreg, &ctx->dev);
748 
749 data_mem_free:
750 	kfree(ctx->mem_repair);
751 	kfree(ctx->scrub);
752 groups_free:
753 	kfree(ras_attr_groups);
754 ctx_free:
755 	kfree(ctx);
756 	return ret;
757 }
758 EXPORT_SYMBOL_GPL(edac_dev_register);
759