xref: /linux/drivers/mtd/mtdcore.c (revision 3aed61d1eb06b8b19b7bb09d49b222ebc3f83347)
1 /*
2  * Core registration and callback routines for MTD
3  * drivers and users.
4  *
5  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6  * Copyright © 2006      Red Hat UK Limited
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/ptrace.h>
27 #include <linux/seq_file.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/err.h>
33 #include <linux/ioctl.h>
34 #include <linux/init.h>
35 #include <linux/proc_fs.h>
36 #include <linux/idr.h>
37 #include <linux/backing-dev.h>
38 #include <linux/gfp.h>
39 #include <linux/slab.h>
40 #include <linux/reboot.h>
41 #include <linux/kconfig.h>
42 
43 #include <linux/mtd/mtd.h>
44 #include <linux/mtd/partitions.h>
45 
46 #include "mtdcore.h"
47 
48 static struct backing_dev_info mtd_bdi = {
49 };
50 
51 #ifdef CONFIG_PM_SLEEP
52 
53 static int mtd_cls_suspend(struct device *dev)
54 {
55 	struct mtd_info *mtd = dev_get_drvdata(dev);
56 
57 	return mtd ? mtd_suspend(mtd) : 0;
58 }
59 
60 static int mtd_cls_resume(struct device *dev)
61 {
62 	struct mtd_info *mtd = dev_get_drvdata(dev);
63 
64 	if (mtd)
65 		mtd_resume(mtd);
66 	return 0;
67 }
68 
69 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
70 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
71 #else
72 #define MTD_CLS_PM_OPS NULL
73 #endif
74 
75 static struct class mtd_class = {
76 	.name = "mtd",
77 	.owner = THIS_MODULE,
78 	.pm = MTD_CLS_PM_OPS,
79 };
80 
81 static DEFINE_IDR(mtd_idr);
82 
83 /* These are exported solely for the purpose of mtd_blkdevs.c. You
84    should not use them for _anything_ else */
85 DEFINE_MUTEX(mtd_table_mutex);
86 EXPORT_SYMBOL_GPL(mtd_table_mutex);
87 
88 struct mtd_info *__mtd_next_device(int i)
89 {
90 	return idr_get_next(&mtd_idr, &i);
91 }
92 EXPORT_SYMBOL_GPL(__mtd_next_device);
93 
94 static LIST_HEAD(mtd_notifiers);
95 
96 
97 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
98 
99 /* REVISIT once MTD uses the driver model better, whoever allocates
100  * the mtd_info will probably want to use the release() hook...
101  */
102 static void mtd_release(struct device *dev)
103 {
104 	struct mtd_info *mtd = dev_get_drvdata(dev);
105 	dev_t index = MTD_DEVT(mtd->index);
106 
107 	/* remove /dev/mtdXro node */
108 	device_destroy(&mtd_class, index + 1);
109 }
110 
111 static ssize_t mtd_type_show(struct device *dev,
112 		struct device_attribute *attr, char *buf)
113 {
114 	struct mtd_info *mtd = dev_get_drvdata(dev);
115 	char *type;
116 
117 	switch (mtd->type) {
118 	case MTD_ABSENT:
119 		type = "absent";
120 		break;
121 	case MTD_RAM:
122 		type = "ram";
123 		break;
124 	case MTD_ROM:
125 		type = "rom";
126 		break;
127 	case MTD_NORFLASH:
128 		type = "nor";
129 		break;
130 	case MTD_NANDFLASH:
131 		type = "nand";
132 		break;
133 	case MTD_DATAFLASH:
134 		type = "dataflash";
135 		break;
136 	case MTD_UBIVOLUME:
137 		type = "ubi";
138 		break;
139 	case MTD_MLCNANDFLASH:
140 		type = "mlc-nand";
141 		break;
142 	default:
143 		type = "unknown";
144 	}
145 
146 	return snprintf(buf, PAGE_SIZE, "%s\n", type);
147 }
148 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
149 
150 static ssize_t mtd_flags_show(struct device *dev,
151 		struct device_attribute *attr, char *buf)
152 {
153 	struct mtd_info *mtd = dev_get_drvdata(dev);
154 
155 	return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
156 
157 }
158 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
159 
160 static ssize_t mtd_size_show(struct device *dev,
161 		struct device_attribute *attr, char *buf)
162 {
163 	struct mtd_info *mtd = dev_get_drvdata(dev);
164 
165 	return snprintf(buf, PAGE_SIZE, "%llu\n",
166 		(unsigned long long)mtd->size);
167 
168 }
169 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
170 
171 static ssize_t mtd_erasesize_show(struct device *dev,
172 		struct device_attribute *attr, char *buf)
173 {
174 	struct mtd_info *mtd = dev_get_drvdata(dev);
175 
176 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
177 
178 }
179 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
180 
181 static ssize_t mtd_writesize_show(struct device *dev,
182 		struct device_attribute *attr, char *buf)
183 {
184 	struct mtd_info *mtd = dev_get_drvdata(dev);
185 
186 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
187 
188 }
189 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
190 
191 static ssize_t mtd_subpagesize_show(struct device *dev,
192 		struct device_attribute *attr, char *buf)
193 {
194 	struct mtd_info *mtd = dev_get_drvdata(dev);
195 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
196 
197 	return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
198 
199 }
200 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
201 
202 static ssize_t mtd_oobsize_show(struct device *dev,
203 		struct device_attribute *attr, char *buf)
204 {
205 	struct mtd_info *mtd = dev_get_drvdata(dev);
206 
207 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
208 
209 }
210 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
211 
212 static ssize_t mtd_numeraseregions_show(struct device *dev,
213 		struct device_attribute *attr, char *buf)
214 {
215 	struct mtd_info *mtd = dev_get_drvdata(dev);
216 
217 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
218 
219 }
220 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
221 	NULL);
222 
223 static ssize_t mtd_name_show(struct device *dev,
224 		struct device_attribute *attr, char *buf)
225 {
226 	struct mtd_info *mtd = dev_get_drvdata(dev);
227 
228 	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
229 
230 }
231 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
232 
233 static ssize_t mtd_ecc_strength_show(struct device *dev,
234 				     struct device_attribute *attr, char *buf)
235 {
236 	struct mtd_info *mtd = dev_get_drvdata(dev);
237 
238 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
239 }
240 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
241 
242 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
243 					  struct device_attribute *attr,
244 					  char *buf)
245 {
246 	struct mtd_info *mtd = dev_get_drvdata(dev);
247 
248 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
249 }
250 
251 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
252 					   struct device_attribute *attr,
253 					   const char *buf, size_t count)
254 {
255 	struct mtd_info *mtd = dev_get_drvdata(dev);
256 	unsigned int bitflip_threshold;
257 	int retval;
258 
259 	retval = kstrtouint(buf, 0, &bitflip_threshold);
260 	if (retval)
261 		return retval;
262 
263 	mtd->bitflip_threshold = bitflip_threshold;
264 	return count;
265 }
266 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
267 		   mtd_bitflip_threshold_show,
268 		   mtd_bitflip_threshold_store);
269 
270 static ssize_t mtd_ecc_step_size_show(struct device *dev,
271 		struct device_attribute *attr, char *buf)
272 {
273 	struct mtd_info *mtd = dev_get_drvdata(dev);
274 
275 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
276 
277 }
278 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
279 
280 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
281 		struct device_attribute *attr, char *buf)
282 {
283 	struct mtd_info *mtd = dev_get_drvdata(dev);
284 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
285 
286 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
287 }
288 static DEVICE_ATTR(corrected_bits, S_IRUGO,
289 		   mtd_ecc_stats_corrected_show, NULL);
290 
291 static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
292 		struct device_attribute *attr, char *buf)
293 {
294 	struct mtd_info *mtd = dev_get_drvdata(dev);
295 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296 
297 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
298 }
299 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
300 
301 static ssize_t mtd_badblocks_show(struct device *dev,
302 		struct device_attribute *attr, char *buf)
303 {
304 	struct mtd_info *mtd = dev_get_drvdata(dev);
305 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306 
307 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
308 }
309 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
310 
311 static ssize_t mtd_bbtblocks_show(struct device *dev,
312 		struct device_attribute *attr, char *buf)
313 {
314 	struct mtd_info *mtd = dev_get_drvdata(dev);
315 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
316 
317 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
318 }
319 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
320 
321 static struct attribute *mtd_attrs[] = {
322 	&dev_attr_type.attr,
323 	&dev_attr_flags.attr,
324 	&dev_attr_size.attr,
325 	&dev_attr_erasesize.attr,
326 	&dev_attr_writesize.attr,
327 	&dev_attr_subpagesize.attr,
328 	&dev_attr_oobsize.attr,
329 	&dev_attr_numeraseregions.attr,
330 	&dev_attr_name.attr,
331 	&dev_attr_ecc_strength.attr,
332 	&dev_attr_ecc_step_size.attr,
333 	&dev_attr_corrected_bits.attr,
334 	&dev_attr_ecc_failures.attr,
335 	&dev_attr_bad_blocks.attr,
336 	&dev_attr_bbt_blocks.attr,
337 	&dev_attr_bitflip_threshold.attr,
338 	NULL,
339 };
340 ATTRIBUTE_GROUPS(mtd);
341 
342 static struct device_type mtd_devtype = {
343 	.name		= "mtd",
344 	.groups		= mtd_groups,
345 	.release	= mtd_release,
346 };
347 
348 #ifndef CONFIG_MMU
349 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
350 {
351 	switch (mtd->type) {
352 	case MTD_RAM:
353 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
354 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
355 	case MTD_ROM:
356 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
357 			NOMMU_MAP_READ;
358 	default:
359 		return NOMMU_MAP_COPY;
360 	}
361 }
362 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
363 #endif
364 
365 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
366 			       void *cmd)
367 {
368 	struct mtd_info *mtd;
369 
370 	mtd = container_of(n, struct mtd_info, reboot_notifier);
371 	mtd->_reboot(mtd);
372 
373 	return NOTIFY_DONE;
374 }
375 
376 /**
377  *	add_mtd_device - register an MTD device
378  *	@mtd: pointer to new MTD device info structure
379  *
380  *	Add a device to the list of MTD devices present in the system, and
381  *	notify each currently active MTD 'user' of its arrival. Returns
382  *	zero on success or non-zero on failure.
383  */
384 
385 int add_mtd_device(struct mtd_info *mtd)
386 {
387 	struct mtd_notifier *not;
388 	int i, error;
389 
390 	mtd->backing_dev_info = &mtd_bdi;
391 
392 	BUG_ON(mtd->writesize == 0);
393 	mutex_lock(&mtd_table_mutex);
394 
395 	i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
396 	if (i < 0) {
397 		error = i;
398 		goto fail_locked;
399 	}
400 
401 	mtd->index = i;
402 	mtd->usecount = 0;
403 
404 	/* default value if not set by driver */
405 	if (mtd->bitflip_threshold == 0)
406 		mtd->bitflip_threshold = mtd->ecc_strength;
407 
408 	if (is_power_of_2(mtd->erasesize))
409 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
410 	else
411 		mtd->erasesize_shift = 0;
412 
413 	if (is_power_of_2(mtd->writesize))
414 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
415 	else
416 		mtd->writesize_shift = 0;
417 
418 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
419 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
420 
421 	if (mtd->dev.parent) {
422 		if (!mtd->owner && mtd->dev.parent->driver)
423 			mtd->owner = mtd->dev.parent->driver->owner;
424 		if (!mtd->name)
425 			mtd->name = dev_name(mtd->dev.parent);
426 	} else {
427 		pr_debug("mtd device won't show a device symlink in sysfs\n");
428 	}
429 
430 	/* Some chips always power up locked. Unlock them now */
431 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
432 		error = mtd_unlock(mtd, 0, mtd->size);
433 		if (error && error != -EOPNOTSUPP)
434 			printk(KERN_WARNING
435 			       "%s: unlock failed, writes may not work\n",
436 			       mtd->name);
437 		/* Ignore unlock failures? */
438 		error = 0;
439 	}
440 
441 	/* Caller should have set dev.parent to match the
442 	 * physical device, if appropriate.
443 	 */
444 	mtd->dev.type = &mtd_devtype;
445 	mtd->dev.class = &mtd_class;
446 	mtd->dev.devt = MTD_DEVT(i);
447 	dev_set_name(&mtd->dev, "mtd%d", i);
448 	dev_set_drvdata(&mtd->dev, mtd);
449 	error = device_register(&mtd->dev);
450 	if (error)
451 		goto fail_added;
452 
453 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
454 		      "mtd%dro", i);
455 
456 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
457 	/* No need to get a refcount on the module containing
458 	   the notifier, since we hold the mtd_table_mutex */
459 	list_for_each_entry(not, &mtd_notifiers, list)
460 		not->add(mtd);
461 
462 	mutex_unlock(&mtd_table_mutex);
463 	/* We _know_ we aren't being removed, because
464 	   our caller is still holding us here. So none
465 	   of this try_ nonsense, and no bitching about it
466 	   either. :) */
467 	__module_get(THIS_MODULE);
468 	return 0;
469 
470 fail_added:
471 	idr_remove(&mtd_idr, i);
472 fail_locked:
473 	mutex_unlock(&mtd_table_mutex);
474 	return error;
475 }
476 
477 /**
478  *	del_mtd_device - unregister an MTD device
479  *	@mtd: pointer to MTD device info structure
480  *
481  *	Remove a device from the list of MTD devices present in the system,
482  *	and notify each currently active MTD 'user' of its departure.
483  *	Returns zero on success or 1 on failure, which currently will happen
484  *	if the requested device does not appear to be present in the list.
485  */
486 
487 int del_mtd_device(struct mtd_info *mtd)
488 {
489 	int ret;
490 	struct mtd_notifier *not;
491 
492 	mutex_lock(&mtd_table_mutex);
493 
494 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
495 		ret = -ENODEV;
496 		goto out_error;
497 	}
498 
499 	/* No need to get a refcount on the module containing
500 		the notifier, since we hold the mtd_table_mutex */
501 	list_for_each_entry(not, &mtd_notifiers, list)
502 		not->remove(mtd);
503 
504 	if (mtd->usecount) {
505 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
506 		       mtd->index, mtd->name, mtd->usecount);
507 		ret = -EBUSY;
508 	} else {
509 		device_unregister(&mtd->dev);
510 
511 		idr_remove(&mtd_idr, mtd->index);
512 
513 		module_put(THIS_MODULE);
514 		ret = 0;
515 	}
516 
517 out_error:
518 	mutex_unlock(&mtd_table_mutex);
519 	return ret;
520 }
521 
522 static int mtd_add_device_partitions(struct mtd_info *mtd,
523 				     struct mtd_partition *real_parts,
524 				     int nbparts)
525 {
526 	int ret;
527 
528 	if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
529 		ret = add_mtd_device(mtd);
530 		if (ret)
531 			return ret;
532 	}
533 
534 	if (nbparts > 0) {
535 		ret = add_mtd_partitions(mtd, real_parts, nbparts);
536 		if (ret && IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
537 			del_mtd_device(mtd);
538 		return ret;
539 	}
540 
541 	return 0;
542 }
543 
544 
545 /**
546  * mtd_device_parse_register - parse partitions and register an MTD device.
547  *
548  * @mtd: the MTD device to register
549  * @types: the list of MTD partition probes to try, see
550  *         'parse_mtd_partitions()' for more information
551  * @parser_data: MTD partition parser-specific data
552  * @parts: fallback partition information to register, if parsing fails;
553  *         only valid if %nr_parts > %0
554  * @nr_parts: the number of partitions in parts, if zero then the full
555  *            MTD device is registered if no partition info is found
556  *
557  * This function aggregates MTD partitions parsing (done by
558  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
559  * basically follows the most common pattern found in many MTD drivers:
560  *
561  * * It first tries to probe partitions on MTD device @mtd using parsers
562  *   specified in @types (if @types is %NULL, then the default list of parsers
563  *   is used, see 'parse_mtd_partitions()' for more information). If none are
564  *   found this functions tries to fallback to information specified in
565  *   @parts/@nr_parts.
566  * * If any partitioning info was found, this function registers the found
567  *   partitions. If the MTD_PARTITIONED_MASTER option is set, then the device
568  *   as a whole is registered first.
569  * * If no partitions were found this function just registers the MTD device
570  *   @mtd and exits.
571  *
572  * Returns zero in case of success and a negative error code in case of failure.
573  */
574 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
575 			      struct mtd_part_parser_data *parser_data,
576 			      const struct mtd_partition *parts,
577 			      int nr_parts)
578 {
579 	int ret;
580 	struct mtd_partition *real_parts = NULL;
581 
582 	ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
583 	if (ret <= 0 && nr_parts && parts) {
584 		real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
585 				     GFP_KERNEL);
586 		if (!real_parts)
587 			ret = -ENOMEM;
588 		else
589 			ret = nr_parts;
590 	}
591 
592 	if (ret >= 0)
593 		ret = mtd_add_device_partitions(mtd, real_parts, ret);
594 
595 	/*
596 	 * FIXME: some drivers unfortunately call this function more than once.
597 	 * So we have to check if we've already assigned the reboot notifier.
598 	 *
599 	 * Generally, we can make multiple calls work for most cases, but it
600 	 * does cause problems with parse_mtd_partitions() above (e.g.,
601 	 * cmdlineparts will register partitions more than once).
602 	 */
603 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
604 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
605 		register_reboot_notifier(&mtd->reboot_notifier);
606 	}
607 
608 	kfree(real_parts);
609 	return ret;
610 }
611 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
612 
613 /**
614  * mtd_device_unregister - unregister an existing MTD device.
615  *
616  * @master: the MTD device to unregister.  This will unregister both the master
617  *          and any partitions if registered.
618  */
619 int mtd_device_unregister(struct mtd_info *master)
620 {
621 	int err;
622 
623 	if (master->_reboot)
624 		unregister_reboot_notifier(&master->reboot_notifier);
625 
626 	err = del_mtd_partitions(master);
627 	if (err)
628 		return err;
629 
630 	if (!device_is_registered(&master->dev))
631 		return 0;
632 
633 	return del_mtd_device(master);
634 }
635 EXPORT_SYMBOL_GPL(mtd_device_unregister);
636 
637 /**
638  *	register_mtd_user - register a 'user' of MTD devices.
639  *	@new: pointer to notifier info structure
640  *
641  *	Registers a pair of callbacks function to be called upon addition
642  *	or removal of MTD devices. Causes the 'add' callback to be immediately
643  *	invoked for each MTD device currently present in the system.
644  */
645 void register_mtd_user (struct mtd_notifier *new)
646 {
647 	struct mtd_info *mtd;
648 
649 	mutex_lock(&mtd_table_mutex);
650 
651 	list_add(&new->list, &mtd_notifiers);
652 
653 	__module_get(THIS_MODULE);
654 
655 	mtd_for_each_device(mtd)
656 		new->add(mtd);
657 
658 	mutex_unlock(&mtd_table_mutex);
659 }
660 EXPORT_SYMBOL_GPL(register_mtd_user);
661 
662 /**
663  *	unregister_mtd_user - unregister a 'user' of MTD devices.
664  *	@old: pointer to notifier info structure
665  *
666  *	Removes a callback function pair from the list of 'users' to be
667  *	notified upon addition or removal of MTD devices. Causes the
668  *	'remove' callback to be immediately invoked for each MTD device
669  *	currently present in the system.
670  */
671 int unregister_mtd_user (struct mtd_notifier *old)
672 {
673 	struct mtd_info *mtd;
674 
675 	mutex_lock(&mtd_table_mutex);
676 
677 	module_put(THIS_MODULE);
678 
679 	mtd_for_each_device(mtd)
680 		old->remove(mtd);
681 
682 	list_del(&old->list);
683 	mutex_unlock(&mtd_table_mutex);
684 	return 0;
685 }
686 EXPORT_SYMBOL_GPL(unregister_mtd_user);
687 
688 /**
689  *	get_mtd_device - obtain a validated handle for an MTD device
690  *	@mtd: last known address of the required MTD device
691  *	@num: internal device number of the required MTD device
692  *
693  *	Given a number and NULL address, return the num'th entry in the device
694  *	table, if any.	Given an address and num == -1, search the device table
695  *	for a device with that address and return if it's still present. Given
696  *	both, return the num'th driver only if its address matches. Return
697  *	error code if not.
698  */
699 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
700 {
701 	struct mtd_info *ret = NULL, *other;
702 	int err = -ENODEV;
703 
704 	mutex_lock(&mtd_table_mutex);
705 
706 	if (num == -1) {
707 		mtd_for_each_device(other) {
708 			if (other == mtd) {
709 				ret = mtd;
710 				break;
711 			}
712 		}
713 	} else if (num >= 0) {
714 		ret = idr_find(&mtd_idr, num);
715 		if (mtd && mtd != ret)
716 			ret = NULL;
717 	}
718 
719 	if (!ret) {
720 		ret = ERR_PTR(err);
721 		goto out;
722 	}
723 
724 	err = __get_mtd_device(ret);
725 	if (err)
726 		ret = ERR_PTR(err);
727 out:
728 	mutex_unlock(&mtd_table_mutex);
729 	return ret;
730 }
731 EXPORT_SYMBOL_GPL(get_mtd_device);
732 
733 
734 int __get_mtd_device(struct mtd_info *mtd)
735 {
736 	int err;
737 
738 	if (!try_module_get(mtd->owner))
739 		return -ENODEV;
740 
741 	if (mtd->_get_device) {
742 		err = mtd->_get_device(mtd);
743 
744 		if (err) {
745 			module_put(mtd->owner);
746 			return err;
747 		}
748 	}
749 	mtd->usecount++;
750 	return 0;
751 }
752 EXPORT_SYMBOL_GPL(__get_mtd_device);
753 
754 /**
755  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
756  *	device name
757  *	@name: MTD device name to open
758  *
759  * 	This function returns MTD device description structure in case of
760  * 	success and an error code in case of failure.
761  */
762 struct mtd_info *get_mtd_device_nm(const char *name)
763 {
764 	int err = -ENODEV;
765 	struct mtd_info *mtd = NULL, *other;
766 
767 	mutex_lock(&mtd_table_mutex);
768 
769 	mtd_for_each_device(other) {
770 		if (!strcmp(name, other->name)) {
771 			mtd = other;
772 			break;
773 		}
774 	}
775 
776 	if (!mtd)
777 		goto out_unlock;
778 
779 	err = __get_mtd_device(mtd);
780 	if (err)
781 		goto out_unlock;
782 
783 	mutex_unlock(&mtd_table_mutex);
784 	return mtd;
785 
786 out_unlock:
787 	mutex_unlock(&mtd_table_mutex);
788 	return ERR_PTR(err);
789 }
790 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
791 
792 void put_mtd_device(struct mtd_info *mtd)
793 {
794 	mutex_lock(&mtd_table_mutex);
795 	__put_mtd_device(mtd);
796 	mutex_unlock(&mtd_table_mutex);
797 
798 }
799 EXPORT_SYMBOL_GPL(put_mtd_device);
800 
801 void __put_mtd_device(struct mtd_info *mtd)
802 {
803 	--mtd->usecount;
804 	BUG_ON(mtd->usecount < 0);
805 
806 	if (mtd->_put_device)
807 		mtd->_put_device(mtd);
808 
809 	module_put(mtd->owner);
810 }
811 EXPORT_SYMBOL_GPL(__put_mtd_device);
812 
813 /*
814  * Erase is an asynchronous operation.  Device drivers are supposed
815  * to call instr->callback() whenever the operation completes, even
816  * if it completes with a failure.
817  * Callers are supposed to pass a callback function and wait for it
818  * to be called before writing to the block.
819  */
820 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
821 {
822 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
823 		return -EINVAL;
824 	if (!(mtd->flags & MTD_WRITEABLE))
825 		return -EROFS;
826 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
827 	if (!instr->len) {
828 		instr->state = MTD_ERASE_DONE;
829 		mtd_erase_callback(instr);
830 		return 0;
831 	}
832 	return mtd->_erase(mtd, instr);
833 }
834 EXPORT_SYMBOL_GPL(mtd_erase);
835 
836 /*
837  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
838  */
839 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
840 	      void **virt, resource_size_t *phys)
841 {
842 	*retlen = 0;
843 	*virt = NULL;
844 	if (phys)
845 		*phys = 0;
846 	if (!mtd->_point)
847 		return -EOPNOTSUPP;
848 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
849 		return -EINVAL;
850 	if (!len)
851 		return 0;
852 	return mtd->_point(mtd, from, len, retlen, virt, phys);
853 }
854 EXPORT_SYMBOL_GPL(mtd_point);
855 
856 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
857 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
858 {
859 	if (!mtd->_point)
860 		return -EOPNOTSUPP;
861 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
862 		return -EINVAL;
863 	if (!len)
864 		return 0;
865 	return mtd->_unpoint(mtd, from, len);
866 }
867 EXPORT_SYMBOL_GPL(mtd_unpoint);
868 
869 /*
870  * Allow NOMMU mmap() to directly map the device (if not NULL)
871  * - return the address to which the offset maps
872  * - return -ENOSYS to indicate refusal to do the mapping
873  */
874 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
875 				    unsigned long offset, unsigned long flags)
876 {
877 	if (!mtd->_get_unmapped_area)
878 		return -EOPNOTSUPP;
879 	if (offset >= mtd->size || len > mtd->size - offset)
880 		return -EINVAL;
881 	return mtd->_get_unmapped_area(mtd, len, offset, flags);
882 }
883 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
884 
885 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
886 	     u_char *buf)
887 {
888 	int ret_code;
889 	*retlen = 0;
890 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
891 		return -EINVAL;
892 	if (!len)
893 		return 0;
894 
895 	/*
896 	 * In the absence of an error, drivers return a non-negative integer
897 	 * representing the maximum number of bitflips that were corrected on
898 	 * any one ecc region (if applicable; zero otherwise).
899 	 */
900 	ret_code = mtd->_read(mtd, from, len, retlen, buf);
901 	if (unlikely(ret_code < 0))
902 		return ret_code;
903 	if (mtd->ecc_strength == 0)
904 		return 0;	/* device lacks ecc */
905 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
906 }
907 EXPORT_SYMBOL_GPL(mtd_read);
908 
909 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
910 	      const u_char *buf)
911 {
912 	*retlen = 0;
913 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
914 		return -EINVAL;
915 	if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
916 		return -EROFS;
917 	if (!len)
918 		return 0;
919 	return mtd->_write(mtd, to, len, retlen, buf);
920 }
921 EXPORT_SYMBOL_GPL(mtd_write);
922 
923 /*
924  * In blackbox flight recorder like scenarios we want to make successful writes
925  * in interrupt context. panic_write() is only intended to be called when its
926  * known the kernel is about to panic and we need the write to succeed. Since
927  * the kernel is not going to be running for much longer, this function can
928  * break locks and delay to ensure the write succeeds (but not sleep).
929  */
930 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
931 		    const u_char *buf)
932 {
933 	*retlen = 0;
934 	if (!mtd->_panic_write)
935 		return -EOPNOTSUPP;
936 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
937 		return -EINVAL;
938 	if (!(mtd->flags & MTD_WRITEABLE))
939 		return -EROFS;
940 	if (!len)
941 		return 0;
942 	return mtd->_panic_write(mtd, to, len, retlen, buf);
943 }
944 EXPORT_SYMBOL_GPL(mtd_panic_write);
945 
946 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
947 {
948 	int ret_code;
949 	ops->retlen = ops->oobretlen = 0;
950 	if (!mtd->_read_oob)
951 		return -EOPNOTSUPP;
952 	/*
953 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
954 	 * similar to mtd->_read(), returning a non-negative integer
955 	 * representing max bitflips. In other cases, mtd->_read_oob() may
956 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
957 	 */
958 	ret_code = mtd->_read_oob(mtd, from, ops);
959 	if (unlikely(ret_code < 0))
960 		return ret_code;
961 	if (mtd->ecc_strength == 0)
962 		return 0;	/* device lacks ecc */
963 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
964 }
965 EXPORT_SYMBOL_GPL(mtd_read_oob);
966 
967 /*
968  * Method to access the protection register area, present in some flash
969  * devices. The user data is one time programmable but the factory data is read
970  * only.
971  */
972 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
973 			   struct otp_info *buf)
974 {
975 	if (!mtd->_get_fact_prot_info)
976 		return -EOPNOTSUPP;
977 	if (!len)
978 		return 0;
979 	return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
980 }
981 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
982 
983 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
984 			   size_t *retlen, u_char *buf)
985 {
986 	*retlen = 0;
987 	if (!mtd->_read_fact_prot_reg)
988 		return -EOPNOTSUPP;
989 	if (!len)
990 		return 0;
991 	return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
992 }
993 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
994 
995 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
996 			   struct otp_info *buf)
997 {
998 	if (!mtd->_get_user_prot_info)
999 		return -EOPNOTSUPP;
1000 	if (!len)
1001 		return 0;
1002 	return mtd->_get_user_prot_info(mtd, len, retlen, buf);
1003 }
1004 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1005 
1006 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1007 			   size_t *retlen, u_char *buf)
1008 {
1009 	*retlen = 0;
1010 	if (!mtd->_read_user_prot_reg)
1011 		return -EOPNOTSUPP;
1012 	if (!len)
1013 		return 0;
1014 	return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1015 }
1016 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1017 
1018 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1019 			    size_t *retlen, u_char *buf)
1020 {
1021 	int ret;
1022 
1023 	*retlen = 0;
1024 	if (!mtd->_write_user_prot_reg)
1025 		return -EOPNOTSUPP;
1026 	if (!len)
1027 		return 0;
1028 	ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1029 	if (ret)
1030 		return ret;
1031 
1032 	/*
1033 	 * If no data could be written at all, we are out of memory and
1034 	 * must return -ENOSPC.
1035 	 */
1036 	return (*retlen) ? 0 : -ENOSPC;
1037 }
1038 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1039 
1040 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1041 {
1042 	if (!mtd->_lock_user_prot_reg)
1043 		return -EOPNOTSUPP;
1044 	if (!len)
1045 		return 0;
1046 	return mtd->_lock_user_prot_reg(mtd, from, len);
1047 }
1048 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1049 
1050 /* Chip-supported device locking */
1051 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1052 {
1053 	if (!mtd->_lock)
1054 		return -EOPNOTSUPP;
1055 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1056 		return -EINVAL;
1057 	if (!len)
1058 		return 0;
1059 	return mtd->_lock(mtd, ofs, len);
1060 }
1061 EXPORT_SYMBOL_GPL(mtd_lock);
1062 
1063 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1064 {
1065 	if (!mtd->_unlock)
1066 		return -EOPNOTSUPP;
1067 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1068 		return -EINVAL;
1069 	if (!len)
1070 		return 0;
1071 	return mtd->_unlock(mtd, ofs, len);
1072 }
1073 EXPORT_SYMBOL_GPL(mtd_unlock);
1074 
1075 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1076 {
1077 	if (!mtd->_is_locked)
1078 		return -EOPNOTSUPP;
1079 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1080 		return -EINVAL;
1081 	if (!len)
1082 		return 0;
1083 	return mtd->_is_locked(mtd, ofs, len);
1084 }
1085 EXPORT_SYMBOL_GPL(mtd_is_locked);
1086 
1087 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1088 {
1089 	if (ofs < 0 || ofs >= mtd->size)
1090 		return -EINVAL;
1091 	if (!mtd->_block_isreserved)
1092 		return 0;
1093 	return mtd->_block_isreserved(mtd, ofs);
1094 }
1095 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1096 
1097 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1098 {
1099 	if (ofs < 0 || ofs >= mtd->size)
1100 		return -EINVAL;
1101 	if (!mtd->_block_isbad)
1102 		return 0;
1103 	return mtd->_block_isbad(mtd, ofs);
1104 }
1105 EXPORT_SYMBOL_GPL(mtd_block_isbad);
1106 
1107 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1108 {
1109 	if (!mtd->_block_markbad)
1110 		return -EOPNOTSUPP;
1111 	if (ofs < 0 || ofs >= mtd->size)
1112 		return -EINVAL;
1113 	if (!(mtd->flags & MTD_WRITEABLE))
1114 		return -EROFS;
1115 	return mtd->_block_markbad(mtd, ofs);
1116 }
1117 EXPORT_SYMBOL_GPL(mtd_block_markbad);
1118 
1119 /*
1120  * default_mtd_writev - the default writev method
1121  * @mtd: mtd device description object pointer
1122  * @vecs: the vectors to write
1123  * @count: count of vectors in @vecs
1124  * @to: the MTD device offset to write to
1125  * @retlen: on exit contains the count of bytes written to the MTD device.
1126  *
1127  * This function returns zero in case of success and a negative error code in
1128  * case of failure.
1129  */
1130 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1131 			      unsigned long count, loff_t to, size_t *retlen)
1132 {
1133 	unsigned long i;
1134 	size_t totlen = 0, thislen;
1135 	int ret = 0;
1136 
1137 	for (i = 0; i < count; i++) {
1138 		if (!vecs[i].iov_len)
1139 			continue;
1140 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1141 				vecs[i].iov_base);
1142 		totlen += thislen;
1143 		if (ret || thislen != vecs[i].iov_len)
1144 			break;
1145 		to += vecs[i].iov_len;
1146 	}
1147 	*retlen = totlen;
1148 	return ret;
1149 }
1150 
1151 /*
1152  * mtd_writev - the vector-based MTD write method
1153  * @mtd: mtd device description object pointer
1154  * @vecs: the vectors to write
1155  * @count: count of vectors in @vecs
1156  * @to: the MTD device offset to write to
1157  * @retlen: on exit contains the count of bytes written to the MTD device.
1158  *
1159  * This function returns zero in case of success and a negative error code in
1160  * case of failure.
1161  */
1162 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1163 	       unsigned long count, loff_t to, size_t *retlen)
1164 {
1165 	*retlen = 0;
1166 	if (!(mtd->flags & MTD_WRITEABLE))
1167 		return -EROFS;
1168 	if (!mtd->_writev)
1169 		return default_mtd_writev(mtd, vecs, count, to, retlen);
1170 	return mtd->_writev(mtd, vecs, count, to, retlen);
1171 }
1172 EXPORT_SYMBOL_GPL(mtd_writev);
1173 
1174 /**
1175  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
1176  * @mtd: mtd device description object pointer
1177  * @size: a pointer to the ideal or maximum size of the allocation, points
1178  *        to the actual allocation size on success.
1179  *
1180  * This routine attempts to allocate a contiguous kernel buffer up to
1181  * the specified size, backing off the size of the request exponentially
1182  * until the request succeeds or until the allocation size falls below
1183  * the system page size. This attempts to make sure it does not adversely
1184  * impact system performance, so when allocating more than one page, we
1185  * ask the memory allocator to avoid re-trying, swapping, writing back
1186  * or performing I/O.
1187  *
1188  * Note, this function also makes sure that the allocated buffer is aligned to
1189  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
1190  *
1191  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
1192  * to handle smaller (i.e. degraded) buffer allocations under low- or
1193  * fragmented-memory situations where such reduced allocations, from a
1194  * requested ideal, are allowed.
1195  *
1196  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
1197  */
1198 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1199 {
1200 	gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1201 		       __GFP_NORETRY | __GFP_NO_KSWAPD;
1202 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1203 	void *kbuf;
1204 
1205 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1206 
1207 	while (*size > min_alloc) {
1208 		kbuf = kmalloc(*size, flags);
1209 		if (kbuf)
1210 			return kbuf;
1211 
1212 		*size >>= 1;
1213 		*size = ALIGN(*size, mtd->writesize);
1214 	}
1215 
1216 	/*
1217 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
1218 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
1219 	 */
1220 	return kmalloc(*size, GFP_KERNEL);
1221 }
1222 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1223 
1224 #ifdef CONFIG_PROC_FS
1225 
1226 /*====================================================================*/
1227 /* Support for /proc/mtd */
1228 
1229 static int mtd_proc_show(struct seq_file *m, void *v)
1230 {
1231 	struct mtd_info *mtd;
1232 
1233 	seq_puts(m, "dev:    size   erasesize  name\n");
1234 	mutex_lock(&mtd_table_mutex);
1235 	mtd_for_each_device(mtd) {
1236 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1237 			   mtd->index, (unsigned long long)mtd->size,
1238 			   mtd->erasesize, mtd->name);
1239 	}
1240 	mutex_unlock(&mtd_table_mutex);
1241 	return 0;
1242 }
1243 
1244 static int mtd_proc_open(struct inode *inode, struct file *file)
1245 {
1246 	return single_open(file, mtd_proc_show, NULL);
1247 }
1248 
1249 static const struct file_operations mtd_proc_ops = {
1250 	.open		= mtd_proc_open,
1251 	.read		= seq_read,
1252 	.llseek		= seq_lseek,
1253 	.release	= single_release,
1254 };
1255 #endif /* CONFIG_PROC_FS */
1256 
1257 /*====================================================================*/
1258 /* Init code */
1259 
1260 static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1261 {
1262 	int ret;
1263 
1264 	ret = bdi_init(bdi);
1265 	if (!ret)
1266 		ret = bdi_register(bdi, NULL, "%s", name);
1267 
1268 	if (ret)
1269 		bdi_destroy(bdi);
1270 
1271 	return ret;
1272 }
1273 
1274 static struct proc_dir_entry *proc_mtd;
1275 
1276 static int __init init_mtd(void)
1277 {
1278 	int ret;
1279 
1280 	ret = class_register(&mtd_class);
1281 	if (ret)
1282 		goto err_reg;
1283 
1284 	ret = mtd_bdi_init(&mtd_bdi, "mtd");
1285 	if (ret)
1286 		goto err_bdi;
1287 
1288 	proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1289 
1290 	ret = init_mtdchar();
1291 	if (ret)
1292 		goto out_procfs;
1293 
1294 	return 0;
1295 
1296 out_procfs:
1297 	if (proc_mtd)
1298 		remove_proc_entry("mtd", NULL);
1299 err_bdi:
1300 	class_unregister(&mtd_class);
1301 err_reg:
1302 	pr_err("Error registering mtd class or bdi: %d\n", ret);
1303 	return ret;
1304 }
1305 
1306 static void __exit cleanup_mtd(void)
1307 {
1308 	cleanup_mtdchar();
1309 	if (proc_mtd)
1310 		remove_proc_entry("mtd", NULL);
1311 	class_unregister(&mtd_class);
1312 	bdi_destroy(&mtd_bdi);
1313 	idr_destroy(&mtd_idr);
1314 }
1315 
1316 module_init(init_mtd);
1317 module_exit(cleanup_mtd);
1318 
1319 MODULE_LICENSE("GPL");
1320 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1321 MODULE_DESCRIPTION("Core MTD registration and access routines");
1322