xref: /linux/drivers/mtd/mtdcore.c (revision 3607ac37a4f378cd5f673d6bdb3776e45a899e2c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Core registration and callback routines for MTD
4  * drivers and users.
5  *
6  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7  * Copyright © 2006      Red Hat UK Limited
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29 #include <linux/leds.h>
30 #include <linux/debugfs.h>
31 #include <linux/nvmem-provider.h>
32 #include <linux/root_dev.h>
33 
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/partitions.h>
36 
37 #include "mtdcore.h"
38 
39 struct backing_dev_info *mtd_bdi;
40 
41 #ifdef CONFIG_PM_SLEEP
42 
43 static int mtd_cls_suspend(struct device *dev)
44 {
45 	struct mtd_info *mtd = dev_get_drvdata(dev);
46 
47 	return mtd ? mtd_suspend(mtd) : 0;
48 }
49 
50 static int mtd_cls_resume(struct device *dev)
51 {
52 	struct mtd_info *mtd = dev_get_drvdata(dev);
53 
54 	if (mtd)
55 		mtd_resume(mtd);
56 	return 0;
57 }
58 
59 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
60 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
61 #else
62 #define MTD_CLS_PM_OPS NULL
63 #endif
64 
65 static struct class mtd_class = {
66 	.name = "mtd",
67 	.pm = MTD_CLS_PM_OPS,
68 };
69 
70 static DEFINE_IDR(mtd_idr);
71 
72 /* These are exported solely for the purpose of mtd_blkdevs.c. You
73    should not use them for _anything_ else */
74 DEFINE_MUTEX(mtd_table_mutex);
75 EXPORT_SYMBOL_GPL(mtd_table_mutex);
76 
77 struct mtd_info *__mtd_next_device(int i)
78 {
79 	return idr_get_next(&mtd_idr, &i);
80 }
81 EXPORT_SYMBOL_GPL(__mtd_next_device);
82 
83 static LIST_HEAD(mtd_notifiers);
84 
85 
86 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
87 
88 /* REVISIT once MTD uses the driver model better, whoever allocates
89  * the mtd_info will probably want to use the release() hook...
90  */
91 static void mtd_release(struct device *dev)
92 {
93 	struct mtd_info *mtd = dev_get_drvdata(dev);
94 	dev_t index = MTD_DEVT(mtd->index);
95 
96 	idr_remove(&mtd_idr, mtd->index);
97 	of_node_put(mtd_get_of_node(mtd));
98 
99 	if (mtd_is_partition(mtd))
100 		release_mtd_partition(mtd);
101 
102 	/* remove /dev/mtdXro node */
103 	device_destroy(&mtd_class, index + 1);
104 }
105 
106 static void mtd_device_release(struct kref *kref)
107 {
108 	struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
109 	bool is_partition = mtd_is_partition(mtd);
110 
111 	debugfs_remove_recursive(mtd->dbg.dfs_dir);
112 
113 	/* Try to remove the NVMEM provider */
114 	nvmem_unregister(mtd->nvmem);
115 
116 	device_unregister(&mtd->dev);
117 
118 	/*
119 	 *  Clear dev so mtd can be safely re-registered later if desired.
120 	 *  Should not be done for partition,
121 	 *  as it was already destroyed in device_unregister().
122 	 */
123 	if (!is_partition)
124 		memset(&mtd->dev, 0, sizeof(mtd->dev));
125 
126 	module_put(THIS_MODULE);
127 }
128 
129 #define MTD_DEVICE_ATTR_RO(name) \
130 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
131 
132 #define MTD_DEVICE_ATTR_RW(name) \
133 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
134 
135 static ssize_t mtd_type_show(struct device *dev,
136 		struct device_attribute *attr, char *buf)
137 {
138 	struct mtd_info *mtd = dev_get_drvdata(dev);
139 	char *type;
140 
141 	switch (mtd->type) {
142 	case MTD_ABSENT:
143 		type = "absent";
144 		break;
145 	case MTD_RAM:
146 		type = "ram";
147 		break;
148 	case MTD_ROM:
149 		type = "rom";
150 		break;
151 	case MTD_NORFLASH:
152 		type = "nor";
153 		break;
154 	case MTD_NANDFLASH:
155 		type = "nand";
156 		break;
157 	case MTD_DATAFLASH:
158 		type = "dataflash";
159 		break;
160 	case MTD_UBIVOLUME:
161 		type = "ubi";
162 		break;
163 	case MTD_MLCNANDFLASH:
164 		type = "mlc-nand";
165 		break;
166 	default:
167 		type = "unknown";
168 	}
169 
170 	return sysfs_emit(buf, "%s\n", type);
171 }
172 MTD_DEVICE_ATTR_RO(type);
173 
174 static ssize_t mtd_flags_show(struct device *dev,
175 		struct device_attribute *attr, char *buf)
176 {
177 	struct mtd_info *mtd = dev_get_drvdata(dev);
178 
179 	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
180 }
181 MTD_DEVICE_ATTR_RO(flags);
182 
183 static ssize_t mtd_size_show(struct device *dev,
184 		struct device_attribute *attr, char *buf)
185 {
186 	struct mtd_info *mtd = dev_get_drvdata(dev);
187 
188 	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
189 }
190 MTD_DEVICE_ATTR_RO(size);
191 
192 static ssize_t mtd_erasesize_show(struct device *dev,
193 		struct device_attribute *attr, char *buf)
194 {
195 	struct mtd_info *mtd = dev_get_drvdata(dev);
196 
197 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
198 }
199 MTD_DEVICE_ATTR_RO(erasesize);
200 
201 static ssize_t mtd_writesize_show(struct device *dev,
202 		struct device_attribute *attr, char *buf)
203 {
204 	struct mtd_info *mtd = dev_get_drvdata(dev);
205 
206 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
207 }
208 MTD_DEVICE_ATTR_RO(writesize);
209 
210 static ssize_t mtd_subpagesize_show(struct device *dev,
211 		struct device_attribute *attr, char *buf)
212 {
213 	struct mtd_info *mtd = dev_get_drvdata(dev);
214 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
215 
216 	return sysfs_emit(buf, "%u\n", subpagesize);
217 }
218 MTD_DEVICE_ATTR_RO(subpagesize);
219 
220 static ssize_t mtd_oobsize_show(struct device *dev,
221 		struct device_attribute *attr, char *buf)
222 {
223 	struct mtd_info *mtd = dev_get_drvdata(dev);
224 
225 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
226 }
227 MTD_DEVICE_ATTR_RO(oobsize);
228 
229 static ssize_t mtd_oobavail_show(struct device *dev,
230 				 struct device_attribute *attr, char *buf)
231 {
232 	struct mtd_info *mtd = dev_get_drvdata(dev);
233 
234 	return sysfs_emit(buf, "%u\n", mtd->oobavail);
235 }
236 MTD_DEVICE_ATTR_RO(oobavail);
237 
238 static ssize_t mtd_numeraseregions_show(struct device *dev,
239 		struct device_attribute *attr, char *buf)
240 {
241 	struct mtd_info *mtd = dev_get_drvdata(dev);
242 
243 	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
244 }
245 MTD_DEVICE_ATTR_RO(numeraseregions);
246 
247 static ssize_t mtd_name_show(struct device *dev,
248 		struct device_attribute *attr, char *buf)
249 {
250 	struct mtd_info *mtd = dev_get_drvdata(dev);
251 
252 	return sysfs_emit(buf, "%s\n", mtd->name);
253 }
254 MTD_DEVICE_ATTR_RO(name);
255 
256 static ssize_t mtd_ecc_strength_show(struct device *dev,
257 				     struct device_attribute *attr, char *buf)
258 {
259 	struct mtd_info *mtd = dev_get_drvdata(dev);
260 
261 	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
262 }
263 MTD_DEVICE_ATTR_RO(ecc_strength);
264 
265 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
266 					  struct device_attribute *attr,
267 					  char *buf)
268 {
269 	struct mtd_info *mtd = dev_get_drvdata(dev);
270 
271 	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
272 }
273 
274 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
275 					   struct device_attribute *attr,
276 					   const char *buf, size_t count)
277 {
278 	struct mtd_info *mtd = dev_get_drvdata(dev);
279 	unsigned int bitflip_threshold;
280 	int retval;
281 
282 	retval = kstrtouint(buf, 0, &bitflip_threshold);
283 	if (retval)
284 		return retval;
285 
286 	mtd->bitflip_threshold = bitflip_threshold;
287 	return count;
288 }
289 MTD_DEVICE_ATTR_RW(bitflip_threshold);
290 
291 static ssize_t mtd_ecc_step_size_show(struct device *dev,
292 		struct device_attribute *attr, char *buf)
293 {
294 	struct mtd_info *mtd = dev_get_drvdata(dev);
295 
296 	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
297 
298 }
299 MTD_DEVICE_ATTR_RO(ecc_step_size);
300 
301 static ssize_t mtd_corrected_bits_show(struct device *dev,
302 		struct device_attribute *attr, char *buf)
303 {
304 	struct mtd_info *mtd = dev_get_drvdata(dev);
305 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306 
307 	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
308 }
309 MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
310 
311 static ssize_t mtd_ecc_failures_show(struct device *dev,
312 		struct device_attribute *attr, char *buf)
313 {
314 	struct mtd_info *mtd = dev_get_drvdata(dev);
315 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
316 
317 	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
318 }
319 MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
320 
321 static ssize_t mtd_bad_blocks_show(struct device *dev,
322 		struct device_attribute *attr, char *buf)
323 {
324 	struct mtd_info *mtd = dev_get_drvdata(dev);
325 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
326 
327 	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
328 }
329 MTD_DEVICE_ATTR_RO(bad_blocks);
330 
331 static ssize_t mtd_bbt_blocks_show(struct device *dev,
332 		struct device_attribute *attr, char *buf)
333 {
334 	struct mtd_info *mtd = dev_get_drvdata(dev);
335 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
336 
337 	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
338 }
339 MTD_DEVICE_ATTR_RO(bbt_blocks);
340 
341 static struct attribute *mtd_attrs[] = {
342 	&dev_attr_type.attr,
343 	&dev_attr_flags.attr,
344 	&dev_attr_size.attr,
345 	&dev_attr_erasesize.attr,
346 	&dev_attr_writesize.attr,
347 	&dev_attr_subpagesize.attr,
348 	&dev_attr_oobsize.attr,
349 	&dev_attr_oobavail.attr,
350 	&dev_attr_numeraseregions.attr,
351 	&dev_attr_name.attr,
352 	&dev_attr_ecc_strength.attr,
353 	&dev_attr_ecc_step_size.attr,
354 	&dev_attr_corrected_bits.attr,
355 	&dev_attr_ecc_failures.attr,
356 	&dev_attr_bad_blocks.attr,
357 	&dev_attr_bbt_blocks.attr,
358 	&dev_attr_bitflip_threshold.attr,
359 	NULL,
360 };
361 ATTRIBUTE_GROUPS(mtd);
362 
363 static const struct device_type mtd_devtype = {
364 	.name		= "mtd",
365 	.groups		= mtd_groups,
366 	.release	= mtd_release,
367 };
368 
369 static bool mtd_expert_analysis_mode;
370 
371 #ifdef CONFIG_DEBUG_FS
372 bool mtd_check_expert_analysis_mode(void)
373 {
374 	const char *mtd_expert_analysis_warning =
375 		"Bad block checks have been entirely disabled.\n"
376 		"This is only reserved for post-mortem forensics and debug purposes.\n"
377 		"Never enable this mode if you do not know what you are doing!\n";
378 
379 	return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
380 }
381 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
382 #endif
383 
384 static struct dentry *dfs_dir_mtd;
385 
386 static void mtd_debugfs_populate(struct mtd_info *mtd)
387 {
388 	struct device *dev = &mtd->dev;
389 
390 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
391 		return;
392 
393 	mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
394 }
395 
396 #ifndef CONFIG_MMU
397 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
398 {
399 	switch (mtd->type) {
400 	case MTD_RAM:
401 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
402 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
403 	case MTD_ROM:
404 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
405 			NOMMU_MAP_READ;
406 	default:
407 		return NOMMU_MAP_COPY;
408 	}
409 }
410 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
411 #endif
412 
413 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
414 			       void *cmd)
415 {
416 	struct mtd_info *mtd;
417 
418 	mtd = container_of(n, struct mtd_info, reboot_notifier);
419 	mtd->_reboot(mtd);
420 
421 	return NOTIFY_DONE;
422 }
423 
424 /**
425  * mtd_wunit_to_pairing_info - get pairing information of a wunit
426  * @mtd: pointer to new MTD device info structure
427  * @wunit: write unit we are interested in
428  * @info: returned pairing information
429  *
430  * Retrieve pairing information associated to the wunit.
431  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
432  * paired together, and where programming a page may influence the page it is
433  * paired with.
434  * The notion of page is replaced by the term wunit (write-unit) to stay
435  * consistent with the ->writesize field.
436  *
437  * The @wunit argument can be extracted from an absolute offset using
438  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
439  * to @wunit.
440  *
441  * From the pairing info the MTD user can find all the wunits paired with
442  * @wunit using the following loop:
443  *
444  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
445  *	info.pair = i;
446  *	mtd_pairing_info_to_wunit(mtd, &info);
447  *	...
448  * }
449  */
450 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
451 			      struct mtd_pairing_info *info)
452 {
453 	struct mtd_info *master = mtd_get_master(mtd);
454 	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
455 
456 	if (wunit < 0 || wunit >= npairs)
457 		return -EINVAL;
458 
459 	if (master->pairing && master->pairing->get_info)
460 		return master->pairing->get_info(master, wunit, info);
461 
462 	info->group = 0;
463 	info->pair = wunit;
464 
465 	return 0;
466 }
467 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
468 
469 /**
470  * mtd_pairing_info_to_wunit - get wunit from pairing information
471  * @mtd: pointer to new MTD device info structure
472  * @info: pairing information struct
473  *
474  * Returns a positive number representing the wunit associated to the info
475  * struct, or a negative error code.
476  *
477  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
478  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
479  * doc).
480  *
481  * It can also be used to only program the first page of each pair (i.e.
482  * page attached to group 0), which allows one to use an MLC NAND in
483  * software-emulated SLC mode:
484  *
485  * info.group = 0;
486  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
487  * for (info.pair = 0; info.pair < npairs; info.pair++) {
488  *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
489  *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
490  *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
491  * }
492  */
493 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
494 			      const struct mtd_pairing_info *info)
495 {
496 	struct mtd_info *master = mtd_get_master(mtd);
497 	int ngroups = mtd_pairing_groups(master);
498 	int npairs = mtd_wunit_per_eb(master) / ngroups;
499 
500 	if (!info || info->pair < 0 || info->pair >= npairs ||
501 	    info->group < 0 || info->group >= ngroups)
502 		return -EINVAL;
503 
504 	if (master->pairing && master->pairing->get_wunit)
505 		return mtd->pairing->get_wunit(master, info);
506 
507 	return info->pair;
508 }
509 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
510 
511 /**
512  * mtd_pairing_groups - get the number of pairing groups
513  * @mtd: pointer to new MTD device info structure
514  *
515  * Returns the number of pairing groups.
516  *
517  * This number is usually equal to the number of bits exposed by a single
518  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
519  * to iterate over all pages of a given pair.
520  */
521 int mtd_pairing_groups(struct mtd_info *mtd)
522 {
523 	struct mtd_info *master = mtd_get_master(mtd);
524 
525 	if (!master->pairing || !master->pairing->ngroups)
526 		return 1;
527 
528 	return master->pairing->ngroups;
529 }
530 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
531 
532 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
533 			      void *val, size_t bytes)
534 {
535 	struct mtd_info *mtd = priv;
536 	size_t retlen;
537 	int err;
538 
539 	err = mtd_read(mtd, offset, bytes, &retlen, val);
540 	if (err && err != -EUCLEAN)
541 		return err;
542 
543 	return retlen == bytes ? 0 : -EIO;
544 }
545 
546 static int mtd_nvmem_add(struct mtd_info *mtd)
547 {
548 	struct device_node *node = mtd_get_of_node(mtd);
549 	struct nvmem_config config = {};
550 
551 	config.id = NVMEM_DEVID_NONE;
552 	config.dev = &mtd->dev;
553 	config.name = dev_name(&mtd->dev);
554 	config.owner = THIS_MODULE;
555 	config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
556 	config.reg_read = mtd_nvmem_reg_read;
557 	config.size = mtd->size;
558 	config.word_size = 1;
559 	config.stride = 1;
560 	config.read_only = true;
561 	config.root_only = true;
562 	config.ignore_wp = true;
563 	config.priv = mtd;
564 
565 	mtd->nvmem = nvmem_register(&config);
566 	if (IS_ERR(mtd->nvmem)) {
567 		/* Just ignore if there is no NVMEM support in the kernel */
568 		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
569 			mtd->nvmem = NULL;
570 		else
571 			return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
572 					     "Failed to register NVMEM device\n");
573 	}
574 
575 	return 0;
576 }
577 
578 static void mtd_check_of_node(struct mtd_info *mtd)
579 {
580 	struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
581 	const char *pname, *prefix = "partition-";
582 	int plen, mtd_name_len, offset, prefix_len;
583 
584 	/* Check if MTD already has a device node */
585 	if (mtd_get_of_node(mtd))
586 		return;
587 
588 	if (!mtd_is_partition(mtd))
589 		return;
590 
591 	parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
592 	if (!parent_dn)
593 		return;
594 
595 	if (mtd_is_partition(mtd->parent))
596 		partitions = of_node_get(parent_dn);
597 	else
598 		partitions = of_get_child_by_name(parent_dn, "partitions");
599 	if (!partitions)
600 		goto exit_parent;
601 
602 	prefix_len = strlen(prefix);
603 	mtd_name_len = strlen(mtd->name);
604 
605 	/* Search if a partition is defined with the same name */
606 	for_each_child_of_node(partitions, mtd_dn) {
607 		/* Skip partition with no/wrong prefix */
608 		if (!of_node_name_prefix(mtd_dn, prefix))
609 			continue;
610 
611 		/* Label have priority. Check that first */
612 		if (!of_property_read_string(mtd_dn, "label", &pname)) {
613 			offset = 0;
614 		} else {
615 			pname = mtd_dn->name;
616 			offset = prefix_len;
617 		}
618 
619 		plen = strlen(pname) - offset;
620 		if (plen == mtd_name_len &&
621 		    !strncmp(mtd->name, pname + offset, plen)) {
622 			mtd_set_of_node(mtd, mtd_dn);
623 			break;
624 		}
625 	}
626 
627 	of_node_put(partitions);
628 exit_parent:
629 	of_node_put(parent_dn);
630 }
631 
632 /**
633  *	add_mtd_device - register an MTD device
634  *	@mtd: pointer to new MTD device info structure
635  *
636  *	Add a device to the list of MTD devices present in the system, and
637  *	notify each currently active MTD 'user' of its arrival. Returns
638  *	zero on success or non-zero on failure.
639  */
640 
641 int add_mtd_device(struct mtd_info *mtd)
642 {
643 	struct device_node *np = mtd_get_of_node(mtd);
644 	struct mtd_info *master = mtd_get_master(mtd);
645 	struct mtd_notifier *not;
646 	int i, error, ofidx;
647 
648 	/*
649 	 * May occur, for instance, on buggy drivers which call
650 	 * mtd_device_parse_register() multiple times on the same master MTD,
651 	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
652 	 */
653 	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
654 		return -EEXIST;
655 
656 	BUG_ON(mtd->writesize == 0);
657 
658 	/*
659 	 * MTD drivers should implement ->_{write,read}() or
660 	 * ->_{write,read}_oob(), but not both.
661 	 */
662 	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
663 		    (mtd->_read && mtd->_read_oob)))
664 		return -EINVAL;
665 
666 	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
667 		    !(mtd->flags & MTD_NO_ERASE)))
668 		return -EINVAL;
669 
670 	/*
671 	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
672 	 * master is an MLC NAND and has a proper pairing scheme defined.
673 	 * We also reject masters that implement ->_writev() for now, because
674 	 * NAND controller drivers don't implement this hook, and adding the
675 	 * SLC -> MLC address/length conversion to this path is useless if we
676 	 * don't have a user.
677 	 */
678 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
679 	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
680 	     !master->pairing || master->_writev))
681 		return -EINVAL;
682 
683 	mutex_lock(&mtd_table_mutex);
684 
685 	ofidx = -1;
686 	if (np)
687 		ofidx = of_alias_get_id(np, "mtd");
688 	if (ofidx >= 0)
689 		i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
690 	else
691 		i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
692 	if (i < 0) {
693 		error = i;
694 		goto fail_locked;
695 	}
696 
697 	mtd->index = i;
698 	kref_init(&mtd->refcnt);
699 
700 	/* default value if not set by driver */
701 	if (mtd->bitflip_threshold == 0)
702 		mtd->bitflip_threshold = mtd->ecc_strength;
703 
704 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
705 		int ngroups = mtd_pairing_groups(master);
706 
707 		mtd->erasesize /= ngroups;
708 		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
709 			    mtd->erasesize;
710 	}
711 
712 	if (is_power_of_2(mtd->erasesize))
713 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
714 	else
715 		mtd->erasesize_shift = 0;
716 
717 	if (is_power_of_2(mtd->writesize))
718 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
719 	else
720 		mtd->writesize_shift = 0;
721 
722 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
723 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
724 
725 	/* Some chips always power up locked. Unlock them now */
726 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
727 		error = mtd_unlock(mtd, 0, mtd->size);
728 		if (error && error != -EOPNOTSUPP)
729 			printk(KERN_WARNING
730 			       "%s: unlock failed, writes may not work\n",
731 			       mtd->name);
732 		/* Ignore unlock failures? */
733 		error = 0;
734 	}
735 
736 	/* Caller should have set dev.parent to match the
737 	 * physical device, if appropriate.
738 	 */
739 	mtd->dev.type = &mtd_devtype;
740 	mtd->dev.class = &mtd_class;
741 	mtd->dev.devt = MTD_DEVT(i);
742 	dev_set_name(&mtd->dev, "mtd%d", i);
743 	dev_set_drvdata(&mtd->dev, mtd);
744 	mtd_check_of_node(mtd);
745 	of_node_get(mtd_get_of_node(mtd));
746 	error = device_register(&mtd->dev);
747 	if (error) {
748 		put_device(&mtd->dev);
749 		goto fail_added;
750 	}
751 
752 	/* Add the nvmem provider */
753 	error = mtd_nvmem_add(mtd);
754 	if (error)
755 		goto fail_nvmem_add;
756 
757 	mtd_debugfs_populate(mtd);
758 
759 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
760 		      "mtd%dro", i);
761 
762 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
763 	/* No need to get a refcount on the module containing
764 	   the notifier, since we hold the mtd_table_mutex */
765 	list_for_each_entry(not, &mtd_notifiers, list)
766 		not->add(mtd);
767 
768 	mutex_unlock(&mtd_table_mutex);
769 
770 	if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
771 		if (IS_BUILTIN(CONFIG_MTD)) {
772 			pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
773 			ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
774 		} else {
775 			pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
776 				mtd->index, mtd->name);
777 		}
778 	}
779 
780 	/* We _know_ we aren't being removed, because
781 	   our caller is still holding us here. So none
782 	   of this try_ nonsense, and no bitching about it
783 	   either. :) */
784 	__module_get(THIS_MODULE);
785 	return 0;
786 
787 fail_nvmem_add:
788 	device_unregister(&mtd->dev);
789 fail_added:
790 	of_node_put(mtd_get_of_node(mtd));
791 	idr_remove(&mtd_idr, i);
792 fail_locked:
793 	mutex_unlock(&mtd_table_mutex);
794 	return error;
795 }
796 
797 /**
798  *	del_mtd_device - unregister an MTD device
799  *	@mtd: pointer to MTD device info structure
800  *
801  *	Remove a device from the list of MTD devices present in the system,
802  *	and notify each currently active MTD 'user' of its departure.
803  *	Returns zero on success or 1 on failure, which currently will happen
804  *	if the requested device does not appear to be present in the list.
805  */
806 
807 int del_mtd_device(struct mtd_info *mtd)
808 {
809 	int ret;
810 	struct mtd_notifier *not;
811 
812 	mutex_lock(&mtd_table_mutex);
813 
814 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
815 		ret = -ENODEV;
816 		goto out_error;
817 	}
818 
819 	/* No need to get a refcount on the module containing
820 		the notifier, since we hold the mtd_table_mutex */
821 	list_for_each_entry(not, &mtd_notifiers, list)
822 		not->remove(mtd);
823 
824 	kref_put(&mtd->refcnt, mtd_device_release);
825 	ret = 0;
826 
827 out_error:
828 	mutex_unlock(&mtd_table_mutex);
829 	return ret;
830 }
831 
832 /*
833  * Set a few defaults based on the parent devices, if not provided by the
834  * driver
835  */
836 static void mtd_set_dev_defaults(struct mtd_info *mtd)
837 {
838 	if (mtd->dev.parent) {
839 		if (!mtd->owner && mtd->dev.parent->driver)
840 			mtd->owner = mtd->dev.parent->driver->owner;
841 		if (!mtd->name)
842 			mtd->name = dev_name(mtd->dev.parent);
843 	} else {
844 		pr_debug("mtd device won't show a device symlink in sysfs\n");
845 	}
846 
847 	INIT_LIST_HEAD(&mtd->partitions);
848 	mutex_init(&mtd->master.partitions_lock);
849 	mutex_init(&mtd->master.chrdev_lock);
850 }
851 
852 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
853 {
854 	struct otp_info *info;
855 	ssize_t size = 0;
856 	unsigned int i;
857 	size_t retlen;
858 	int ret;
859 
860 	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
861 	if (!info)
862 		return -ENOMEM;
863 
864 	if (is_user)
865 		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
866 	else
867 		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
868 	if (ret)
869 		goto err;
870 
871 	for (i = 0; i < retlen / sizeof(*info); i++)
872 		size += info[i].length;
873 
874 	kfree(info);
875 	return size;
876 
877 err:
878 	kfree(info);
879 
880 	/* ENODATA means there is no OTP region. */
881 	return ret == -ENODATA ? 0 : ret;
882 }
883 
884 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
885 						   const char *compatible,
886 						   int size,
887 						   nvmem_reg_read_t reg_read)
888 {
889 	struct nvmem_device *nvmem = NULL;
890 	struct nvmem_config config = {};
891 	struct device_node *np;
892 
893 	/* DT binding is optional */
894 	np = of_get_compatible_child(mtd->dev.of_node, compatible);
895 
896 	/* OTP nvmem will be registered on the physical device */
897 	config.dev = mtd->dev.parent;
898 	config.name = compatible;
899 	config.id = NVMEM_DEVID_AUTO;
900 	config.owner = THIS_MODULE;
901 	config.add_legacy_fixed_of_cells = true;
902 	config.type = NVMEM_TYPE_OTP;
903 	config.root_only = true;
904 	config.ignore_wp = true;
905 	config.reg_read = reg_read;
906 	config.size = size;
907 	config.of_node = np;
908 	config.priv = mtd;
909 
910 	nvmem = nvmem_register(&config);
911 	/* Just ignore if there is no NVMEM support in the kernel */
912 	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
913 		nvmem = NULL;
914 
915 	of_node_put(np);
916 
917 	return nvmem;
918 }
919 
920 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
921 				       void *val, size_t bytes)
922 {
923 	struct mtd_info *mtd = priv;
924 	size_t retlen;
925 	int ret;
926 
927 	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
928 	if (ret)
929 		return ret;
930 
931 	return retlen == bytes ? 0 : -EIO;
932 }
933 
934 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
935 				       void *val, size_t bytes)
936 {
937 	struct mtd_info *mtd = priv;
938 	size_t retlen;
939 	int ret;
940 
941 	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
942 	if (ret)
943 		return ret;
944 
945 	return retlen == bytes ? 0 : -EIO;
946 }
947 
948 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
949 {
950 	struct device *dev = mtd->dev.parent;
951 	struct nvmem_device *nvmem;
952 	ssize_t size;
953 	int err;
954 
955 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
956 		size = mtd_otp_size(mtd, true);
957 		if (size < 0)
958 			return size;
959 
960 		if (size > 0) {
961 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
962 						       mtd_nvmem_user_otp_reg_read);
963 			if (IS_ERR(nvmem)) {
964 				err = PTR_ERR(nvmem);
965 				goto err;
966 			}
967 			mtd->otp_user_nvmem = nvmem;
968 		}
969 	}
970 
971 	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
972 		size = mtd_otp_size(mtd, false);
973 		if (size < 0) {
974 			err = size;
975 			goto err;
976 		}
977 
978 		if (size > 0) {
979 			/*
980 			 * The factory OTP contains thing such as a unique serial
981 			 * number and is small, so let's read it out and put it
982 			 * into the entropy pool.
983 			 */
984 			void *otp;
985 
986 			otp = kmalloc(size, GFP_KERNEL);
987 			if (!otp) {
988 				err = -ENOMEM;
989 				goto err;
990 			}
991 			err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
992 			if (err < 0) {
993 				kfree(otp);
994 				goto err;
995 			}
996 			add_device_randomness(otp, err);
997 			kfree(otp);
998 
999 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
1000 						       mtd_nvmem_fact_otp_reg_read);
1001 			if (IS_ERR(nvmem)) {
1002 				err = PTR_ERR(nvmem);
1003 				goto err;
1004 			}
1005 			mtd->otp_factory_nvmem = nvmem;
1006 		}
1007 	}
1008 
1009 	return 0;
1010 
1011 err:
1012 	nvmem_unregister(mtd->otp_user_nvmem);
1013 	return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
1014 }
1015 
1016 /**
1017  * mtd_device_parse_register - parse partitions and register an MTD device.
1018  *
1019  * @mtd: the MTD device to register
1020  * @types: the list of MTD partition probes to try, see
1021  *         'parse_mtd_partitions()' for more information
1022  * @parser_data: MTD partition parser-specific data
1023  * @parts: fallback partition information to register, if parsing fails;
1024  *         only valid if %nr_parts > %0
1025  * @nr_parts: the number of partitions in parts, if zero then the full
1026  *            MTD device is registered if no partition info is found
1027  *
1028  * This function aggregates MTD partitions parsing (done by
1029  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1030  * basically follows the most common pattern found in many MTD drivers:
1031  *
1032  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1033  *   registered first.
1034  * * Then It tries to probe partitions on MTD device @mtd using parsers
1035  *   specified in @types (if @types is %NULL, then the default list of parsers
1036  *   is used, see 'parse_mtd_partitions()' for more information). If none are
1037  *   found this functions tries to fallback to information specified in
1038  *   @parts/@nr_parts.
1039  * * If no partitions were found this function just registers the MTD device
1040  *   @mtd and exits.
1041  *
1042  * Returns zero in case of success and a negative error code in case of failure.
1043  */
1044 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1045 			      struct mtd_part_parser_data *parser_data,
1046 			      const struct mtd_partition *parts,
1047 			      int nr_parts)
1048 {
1049 	int ret;
1050 
1051 	mtd_set_dev_defaults(mtd);
1052 
1053 	ret = mtd_otp_nvmem_add(mtd);
1054 	if (ret)
1055 		goto out;
1056 
1057 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1058 		ret = add_mtd_device(mtd);
1059 		if (ret)
1060 			goto out;
1061 	}
1062 
1063 	/* Prefer parsed partitions over driver-provided fallback */
1064 	ret = parse_mtd_partitions(mtd, types, parser_data);
1065 	if (ret == -EPROBE_DEFER)
1066 		goto out;
1067 
1068 	if (ret > 0)
1069 		ret = 0;
1070 	else if (nr_parts)
1071 		ret = add_mtd_partitions(mtd, parts, nr_parts);
1072 	else if (!device_is_registered(&mtd->dev))
1073 		ret = add_mtd_device(mtd);
1074 	else
1075 		ret = 0;
1076 
1077 	if (ret)
1078 		goto out;
1079 
1080 	/*
1081 	 * FIXME: some drivers unfortunately call this function more than once.
1082 	 * So we have to check if we've already assigned the reboot notifier.
1083 	 *
1084 	 * Generally, we can make multiple calls work for most cases, but it
1085 	 * does cause problems with parse_mtd_partitions() above (e.g.,
1086 	 * cmdlineparts will register partitions more than once).
1087 	 */
1088 	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1089 		  "MTD already registered\n");
1090 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1091 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1092 		register_reboot_notifier(&mtd->reboot_notifier);
1093 	}
1094 
1095 out:
1096 	if (ret) {
1097 		nvmem_unregister(mtd->otp_user_nvmem);
1098 		nvmem_unregister(mtd->otp_factory_nvmem);
1099 	}
1100 
1101 	if (ret && device_is_registered(&mtd->dev))
1102 		del_mtd_device(mtd);
1103 
1104 	return ret;
1105 }
1106 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1107 
1108 /**
1109  * mtd_device_unregister - unregister an existing MTD device.
1110  *
1111  * @master: the MTD device to unregister.  This will unregister both the master
1112  *          and any partitions if registered.
1113  */
1114 int mtd_device_unregister(struct mtd_info *master)
1115 {
1116 	int err;
1117 
1118 	if (master->_reboot) {
1119 		unregister_reboot_notifier(&master->reboot_notifier);
1120 		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1121 	}
1122 
1123 	nvmem_unregister(master->otp_user_nvmem);
1124 	nvmem_unregister(master->otp_factory_nvmem);
1125 
1126 	err = del_mtd_partitions(master);
1127 	if (err)
1128 		return err;
1129 
1130 	if (!device_is_registered(&master->dev))
1131 		return 0;
1132 
1133 	return del_mtd_device(master);
1134 }
1135 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1136 
1137 /**
1138  *	register_mtd_user - register a 'user' of MTD devices.
1139  *	@new: pointer to notifier info structure
1140  *
1141  *	Registers a pair of callbacks function to be called upon addition
1142  *	or removal of MTD devices. Causes the 'add' callback to be immediately
1143  *	invoked for each MTD device currently present in the system.
1144  */
1145 void register_mtd_user (struct mtd_notifier *new)
1146 {
1147 	struct mtd_info *mtd;
1148 
1149 	mutex_lock(&mtd_table_mutex);
1150 
1151 	list_add(&new->list, &mtd_notifiers);
1152 
1153 	__module_get(THIS_MODULE);
1154 
1155 	mtd_for_each_device(mtd)
1156 		new->add(mtd);
1157 
1158 	mutex_unlock(&mtd_table_mutex);
1159 }
1160 EXPORT_SYMBOL_GPL(register_mtd_user);
1161 
1162 /**
1163  *	unregister_mtd_user - unregister a 'user' of MTD devices.
1164  *	@old: pointer to notifier info structure
1165  *
1166  *	Removes a callback function pair from the list of 'users' to be
1167  *	notified upon addition or removal of MTD devices. Causes the
1168  *	'remove' callback to be immediately invoked for each MTD device
1169  *	currently present in the system.
1170  */
1171 int unregister_mtd_user (struct mtd_notifier *old)
1172 {
1173 	struct mtd_info *mtd;
1174 
1175 	mutex_lock(&mtd_table_mutex);
1176 
1177 	module_put(THIS_MODULE);
1178 
1179 	mtd_for_each_device(mtd)
1180 		old->remove(mtd);
1181 
1182 	list_del(&old->list);
1183 	mutex_unlock(&mtd_table_mutex);
1184 	return 0;
1185 }
1186 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1187 
1188 /**
1189  *	get_mtd_device - obtain a validated handle for an MTD device
1190  *	@mtd: last known address of the required MTD device
1191  *	@num: internal device number of the required MTD device
1192  *
1193  *	Given a number and NULL address, return the num'th entry in the device
1194  *	table, if any.	Given an address and num == -1, search the device table
1195  *	for a device with that address and return if it's still present. Given
1196  *	both, return the num'th driver only if its address matches. Return
1197  *	error code if not.
1198  */
1199 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1200 {
1201 	struct mtd_info *ret = NULL, *other;
1202 	int err = -ENODEV;
1203 
1204 	mutex_lock(&mtd_table_mutex);
1205 
1206 	if (num == -1) {
1207 		mtd_for_each_device(other) {
1208 			if (other == mtd) {
1209 				ret = mtd;
1210 				break;
1211 			}
1212 		}
1213 	} else if (num >= 0) {
1214 		ret = idr_find(&mtd_idr, num);
1215 		if (mtd && mtd != ret)
1216 			ret = NULL;
1217 	}
1218 
1219 	if (!ret) {
1220 		ret = ERR_PTR(err);
1221 		goto out;
1222 	}
1223 
1224 	err = __get_mtd_device(ret);
1225 	if (err)
1226 		ret = ERR_PTR(err);
1227 out:
1228 	mutex_unlock(&mtd_table_mutex);
1229 	return ret;
1230 }
1231 EXPORT_SYMBOL_GPL(get_mtd_device);
1232 
1233 
1234 int __get_mtd_device(struct mtd_info *mtd)
1235 {
1236 	struct mtd_info *master = mtd_get_master(mtd);
1237 	int err;
1238 
1239 	if (master->_get_device) {
1240 		err = master->_get_device(mtd);
1241 		if (err)
1242 			return err;
1243 	}
1244 
1245 	if (!try_module_get(master->owner)) {
1246 		if (master->_put_device)
1247 			master->_put_device(master);
1248 		return -ENODEV;
1249 	}
1250 
1251 	while (mtd) {
1252 		if (mtd != master)
1253 			kref_get(&mtd->refcnt);
1254 		mtd = mtd->parent;
1255 	}
1256 
1257 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1258 		kref_get(&master->refcnt);
1259 
1260 	return 0;
1261 }
1262 EXPORT_SYMBOL_GPL(__get_mtd_device);
1263 
1264 /**
1265  * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1266  *
1267  * @np: device tree node
1268  */
1269 struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1270 {
1271 	struct mtd_info *mtd = NULL;
1272 	struct mtd_info *tmp;
1273 	int err;
1274 
1275 	mutex_lock(&mtd_table_mutex);
1276 
1277 	err = -EPROBE_DEFER;
1278 	mtd_for_each_device(tmp) {
1279 		if (mtd_get_of_node(tmp) == np) {
1280 			mtd = tmp;
1281 			err = __get_mtd_device(mtd);
1282 			break;
1283 		}
1284 	}
1285 
1286 	mutex_unlock(&mtd_table_mutex);
1287 
1288 	return err ? ERR_PTR(err) : mtd;
1289 }
1290 EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1291 
1292 /**
1293  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1294  *	device name
1295  *	@name: MTD device name to open
1296  *
1297  * 	This function returns MTD device description structure in case of
1298  * 	success and an error code in case of failure.
1299  */
1300 struct mtd_info *get_mtd_device_nm(const char *name)
1301 {
1302 	int err = -ENODEV;
1303 	struct mtd_info *mtd = NULL, *other;
1304 
1305 	mutex_lock(&mtd_table_mutex);
1306 
1307 	mtd_for_each_device(other) {
1308 		if (!strcmp(name, other->name)) {
1309 			mtd = other;
1310 			break;
1311 		}
1312 	}
1313 
1314 	if (!mtd)
1315 		goto out_unlock;
1316 
1317 	err = __get_mtd_device(mtd);
1318 	if (err)
1319 		goto out_unlock;
1320 
1321 	mutex_unlock(&mtd_table_mutex);
1322 	return mtd;
1323 
1324 out_unlock:
1325 	mutex_unlock(&mtd_table_mutex);
1326 	return ERR_PTR(err);
1327 }
1328 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1329 
1330 void put_mtd_device(struct mtd_info *mtd)
1331 {
1332 	mutex_lock(&mtd_table_mutex);
1333 	__put_mtd_device(mtd);
1334 	mutex_unlock(&mtd_table_mutex);
1335 
1336 }
1337 EXPORT_SYMBOL_GPL(put_mtd_device);
1338 
1339 void __put_mtd_device(struct mtd_info *mtd)
1340 {
1341 	struct mtd_info *master = mtd_get_master(mtd);
1342 
1343 	while (mtd) {
1344 		/* kref_put() can relese mtd, so keep a reference mtd->parent */
1345 		struct mtd_info *parent = mtd->parent;
1346 
1347 		if (mtd != master)
1348 			kref_put(&mtd->refcnt, mtd_device_release);
1349 		mtd = parent;
1350 	}
1351 
1352 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1353 		kref_put(&master->refcnt, mtd_device_release);
1354 
1355 	module_put(master->owner);
1356 
1357 	/* must be the last as master can be freed in the _put_device */
1358 	if (master->_put_device)
1359 		master->_put_device(master);
1360 }
1361 EXPORT_SYMBOL_GPL(__put_mtd_device);
1362 
1363 /*
1364  * Erase is an synchronous operation. Device drivers are epected to return a
1365  * negative error code if the operation failed and update instr->fail_addr
1366  * to point the portion that was not properly erased.
1367  */
1368 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1369 {
1370 	struct mtd_info *master = mtd_get_master(mtd);
1371 	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1372 	struct erase_info adjinstr;
1373 	int ret;
1374 
1375 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1376 	adjinstr = *instr;
1377 
1378 	if (!mtd->erasesize || !master->_erase)
1379 		return -ENOTSUPP;
1380 
1381 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1382 		return -EINVAL;
1383 	if (!(mtd->flags & MTD_WRITEABLE))
1384 		return -EROFS;
1385 
1386 	if (!instr->len)
1387 		return 0;
1388 
1389 	ledtrig_mtd_activity();
1390 
1391 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1392 		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1393 				master->erasesize;
1394 		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1395 				master->erasesize) -
1396 			       adjinstr.addr;
1397 	}
1398 
1399 	adjinstr.addr += mst_ofs;
1400 
1401 	ret = master->_erase(master, &adjinstr);
1402 
1403 	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1404 		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1405 		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1406 			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1407 							 master);
1408 			instr->fail_addr *= mtd->erasesize;
1409 		}
1410 	}
1411 
1412 	return ret;
1413 }
1414 EXPORT_SYMBOL_GPL(mtd_erase);
1415 
1416 /*
1417  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1418  */
1419 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1420 	      void **virt, resource_size_t *phys)
1421 {
1422 	struct mtd_info *master = mtd_get_master(mtd);
1423 
1424 	*retlen = 0;
1425 	*virt = NULL;
1426 	if (phys)
1427 		*phys = 0;
1428 	if (!master->_point)
1429 		return -EOPNOTSUPP;
1430 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1431 		return -EINVAL;
1432 	if (!len)
1433 		return 0;
1434 
1435 	from = mtd_get_master_ofs(mtd, from);
1436 	return master->_point(master, from, len, retlen, virt, phys);
1437 }
1438 EXPORT_SYMBOL_GPL(mtd_point);
1439 
1440 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1441 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1442 {
1443 	struct mtd_info *master = mtd_get_master(mtd);
1444 
1445 	if (!master->_unpoint)
1446 		return -EOPNOTSUPP;
1447 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1448 		return -EINVAL;
1449 	if (!len)
1450 		return 0;
1451 	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1452 }
1453 EXPORT_SYMBOL_GPL(mtd_unpoint);
1454 
1455 /*
1456  * Allow NOMMU mmap() to directly map the device (if not NULL)
1457  * - return the address to which the offset maps
1458  * - return -ENOSYS to indicate refusal to do the mapping
1459  */
1460 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1461 				    unsigned long offset, unsigned long flags)
1462 {
1463 	size_t retlen;
1464 	void *virt;
1465 	int ret;
1466 
1467 	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1468 	if (ret)
1469 		return ret;
1470 	if (retlen != len) {
1471 		mtd_unpoint(mtd, offset, retlen);
1472 		return -ENOSYS;
1473 	}
1474 	return (unsigned long)virt;
1475 }
1476 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1477 
1478 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1479 				 const struct mtd_ecc_stats *old_stats)
1480 {
1481 	struct mtd_ecc_stats diff;
1482 
1483 	if (master == mtd)
1484 		return;
1485 
1486 	diff = master->ecc_stats;
1487 	diff.failed -= old_stats->failed;
1488 	diff.corrected -= old_stats->corrected;
1489 
1490 	while (mtd->parent) {
1491 		mtd->ecc_stats.failed += diff.failed;
1492 		mtd->ecc_stats.corrected += diff.corrected;
1493 		mtd = mtd->parent;
1494 	}
1495 }
1496 
1497 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1498 	     u_char *buf)
1499 {
1500 	struct mtd_oob_ops ops = {
1501 		.len = len,
1502 		.datbuf = buf,
1503 	};
1504 	int ret;
1505 
1506 	ret = mtd_read_oob(mtd, from, &ops);
1507 	*retlen = ops.retlen;
1508 
1509 	return ret;
1510 }
1511 EXPORT_SYMBOL_GPL(mtd_read);
1512 
1513 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1514 	      const u_char *buf)
1515 {
1516 	struct mtd_oob_ops ops = {
1517 		.len = len,
1518 		.datbuf = (u8 *)buf,
1519 	};
1520 	int ret;
1521 
1522 	ret = mtd_write_oob(mtd, to, &ops);
1523 	*retlen = ops.retlen;
1524 
1525 	return ret;
1526 }
1527 EXPORT_SYMBOL_GPL(mtd_write);
1528 
1529 /*
1530  * In blackbox flight recorder like scenarios we want to make successful writes
1531  * in interrupt context. panic_write() is only intended to be called when its
1532  * known the kernel is about to panic and we need the write to succeed. Since
1533  * the kernel is not going to be running for much longer, this function can
1534  * break locks and delay to ensure the write succeeds (but not sleep).
1535  */
1536 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1537 		    const u_char *buf)
1538 {
1539 	struct mtd_info *master = mtd_get_master(mtd);
1540 
1541 	*retlen = 0;
1542 	if (!master->_panic_write)
1543 		return -EOPNOTSUPP;
1544 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1545 		return -EINVAL;
1546 	if (!(mtd->flags & MTD_WRITEABLE))
1547 		return -EROFS;
1548 	if (!len)
1549 		return 0;
1550 	if (!master->oops_panic_write)
1551 		master->oops_panic_write = true;
1552 
1553 	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1554 				    retlen, buf);
1555 }
1556 EXPORT_SYMBOL_GPL(mtd_panic_write);
1557 
1558 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1559 			     struct mtd_oob_ops *ops)
1560 {
1561 	/*
1562 	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1563 	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1564 	 *  this case.
1565 	 */
1566 	if (!ops->datbuf)
1567 		ops->len = 0;
1568 
1569 	if (!ops->oobbuf)
1570 		ops->ooblen = 0;
1571 
1572 	if (offs < 0 || offs + ops->len > mtd->size)
1573 		return -EINVAL;
1574 
1575 	if (ops->ooblen) {
1576 		size_t maxooblen;
1577 
1578 		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1579 			return -EINVAL;
1580 
1581 		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1582 				      mtd_div_by_ws(offs, mtd)) *
1583 			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1584 		if (ops->ooblen > maxooblen)
1585 			return -EINVAL;
1586 	}
1587 
1588 	return 0;
1589 }
1590 
1591 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1592 			    struct mtd_oob_ops *ops)
1593 {
1594 	struct mtd_info *master = mtd_get_master(mtd);
1595 	int ret;
1596 
1597 	from = mtd_get_master_ofs(mtd, from);
1598 	if (master->_read_oob)
1599 		ret = master->_read_oob(master, from, ops);
1600 	else
1601 		ret = master->_read(master, from, ops->len, &ops->retlen,
1602 				    ops->datbuf);
1603 
1604 	return ret;
1605 }
1606 
1607 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1608 			     struct mtd_oob_ops *ops)
1609 {
1610 	struct mtd_info *master = mtd_get_master(mtd);
1611 	int ret;
1612 
1613 	to = mtd_get_master_ofs(mtd, to);
1614 	if (master->_write_oob)
1615 		ret = master->_write_oob(master, to, ops);
1616 	else
1617 		ret = master->_write(master, to, ops->len, &ops->retlen,
1618 				     ops->datbuf);
1619 
1620 	return ret;
1621 }
1622 
1623 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1624 			       struct mtd_oob_ops *ops)
1625 {
1626 	struct mtd_info *master = mtd_get_master(mtd);
1627 	int ngroups = mtd_pairing_groups(master);
1628 	int npairs = mtd_wunit_per_eb(master) / ngroups;
1629 	struct mtd_oob_ops adjops = *ops;
1630 	unsigned int wunit, oobavail;
1631 	struct mtd_pairing_info info;
1632 	int max_bitflips = 0;
1633 	u32 ebofs, pageofs;
1634 	loff_t base, pos;
1635 
1636 	ebofs = mtd_mod_by_eb(start, mtd);
1637 	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1638 	info.group = 0;
1639 	info.pair = mtd_div_by_ws(ebofs, mtd);
1640 	pageofs = mtd_mod_by_ws(ebofs, mtd);
1641 	oobavail = mtd_oobavail(mtd, ops);
1642 
1643 	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1644 		int ret;
1645 
1646 		if (info.pair >= npairs) {
1647 			info.pair = 0;
1648 			base += master->erasesize;
1649 		}
1650 
1651 		wunit = mtd_pairing_info_to_wunit(master, &info);
1652 		pos = mtd_wunit_to_offset(mtd, base, wunit);
1653 
1654 		adjops.len = ops->len - ops->retlen;
1655 		if (adjops.len > mtd->writesize - pageofs)
1656 			adjops.len = mtd->writesize - pageofs;
1657 
1658 		adjops.ooblen = ops->ooblen - ops->oobretlen;
1659 		if (adjops.ooblen > oobavail - adjops.ooboffs)
1660 			adjops.ooblen = oobavail - adjops.ooboffs;
1661 
1662 		if (read) {
1663 			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1664 			if (ret > 0)
1665 				max_bitflips = max(max_bitflips, ret);
1666 		} else {
1667 			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1668 		}
1669 
1670 		if (ret < 0)
1671 			return ret;
1672 
1673 		max_bitflips = max(max_bitflips, ret);
1674 		ops->retlen += adjops.retlen;
1675 		ops->oobretlen += adjops.oobretlen;
1676 		adjops.datbuf += adjops.retlen;
1677 		adjops.oobbuf += adjops.oobretlen;
1678 		adjops.ooboffs = 0;
1679 		pageofs = 0;
1680 		info.pair++;
1681 	}
1682 
1683 	return max_bitflips;
1684 }
1685 
1686 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1687 {
1688 	struct mtd_info *master = mtd_get_master(mtd);
1689 	struct mtd_ecc_stats old_stats = master->ecc_stats;
1690 	int ret_code;
1691 
1692 	ops->retlen = ops->oobretlen = 0;
1693 
1694 	ret_code = mtd_check_oob_ops(mtd, from, ops);
1695 	if (ret_code)
1696 		return ret_code;
1697 
1698 	ledtrig_mtd_activity();
1699 
1700 	/* Check the validity of a potential fallback on mtd->_read */
1701 	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1702 		return -EOPNOTSUPP;
1703 
1704 	if (ops->stats)
1705 		memset(ops->stats, 0, sizeof(*ops->stats));
1706 
1707 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1708 		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1709 	else
1710 		ret_code = mtd_read_oob_std(mtd, from, ops);
1711 
1712 	mtd_update_ecc_stats(mtd, master, &old_stats);
1713 
1714 	/*
1715 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1716 	 * similar to mtd->_read(), returning a non-negative integer
1717 	 * representing max bitflips. In other cases, mtd->_read_oob() may
1718 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1719 	 */
1720 	if (unlikely(ret_code < 0))
1721 		return ret_code;
1722 	if (mtd->ecc_strength == 0)
1723 		return 0;	/* device lacks ecc */
1724 	if (ops->stats)
1725 		ops->stats->max_bitflips = ret_code;
1726 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1727 }
1728 EXPORT_SYMBOL_GPL(mtd_read_oob);
1729 
1730 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1731 				struct mtd_oob_ops *ops)
1732 {
1733 	struct mtd_info *master = mtd_get_master(mtd);
1734 	int ret;
1735 
1736 	ops->retlen = ops->oobretlen = 0;
1737 
1738 	if (!(mtd->flags & MTD_WRITEABLE))
1739 		return -EROFS;
1740 
1741 	ret = mtd_check_oob_ops(mtd, to, ops);
1742 	if (ret)
1743 		return ret;
1744 
1745 	ledtrig_mtd_activity();
1746 
1747 	/* Check the validity of a potential fallback on mtd->_write */
1748 	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1749 		return -EOPNOTSUPP;
1750 
1751 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1752 		return mtd_io_emulated_slc(mtd, to, false, ops);
1753 
1754 	return mtd_write_oob_std(mtd, to, ops);
1755 }
1756 EXPORT_SYMBOL_GPL(mtd_write_oob);
1757 
1758 /**
1759  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1760  * @mtd: MTD device structure
1761  * @section: ECC section. Depending on the layout you may have all the ECC
1762  *	     bytes stored in a single contiguous section, or one section
1763  *	     per ECC chunk (and sometime several sections for a single ECC
1764  *	     ECC chunk)
1765  * @oobecc: OOB region struct filled with the appropriate ECC position
1766  *	    information
1767  *
1768  * This function returns ECC section information in the OOB area. If you want
1769  * to get all the ECC bytes information, then you should call
1770  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1771  *
1772  * Returns zero on success, a negative error code otherwise.
1773  */
1774 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1775 		      struct mtd_oob_region *oobecc)
1776 {
1777 	struct mtd_info *master = mtd_get_master(mtd);
1778 
1779 	memset(oobecc, 0, sizeof(*oobecc));
1780 
1781 	if (!master || section < 0)
1782 		return -EINVAL;
1783 
1784 	if (!master->ooblayout || !master->ooblayout->ecc)
1785 		return -ENOTSUPP;
1786 
1787 	return master->ooblayout->ecc(master, section, oobecc);
1788 }
1789 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1790 
1791 /**
1792  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1793  *			section
1794  * @mtd: MTD device structure
1795  * @section: Free section you are interested in. Depending on the layout
1796  *	     you may have all the free bytes stored in a single contiguous
1797  *	     section, or one section per ECC chunk plus an extra section
1798  *	     for the remaining bytes (or other funky layout).
1799  * @oobfree: OOB region struct filled with the appropriate free position
1800  *	     information
1801  *
1802  * This function returns free bytes position in the OOB area. If you want
1803  * to get all the free bytes information, then you should call
1804  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1805  *
1806  * Returns zero on success, a negative error code otherwise.
1807  */
1808 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1809 		       struct mtd_oob_region *oobfree)
1810 {
1811 	struct mtd_info *master = mtd_get_master(mtd);
1812 
1813 	memset(oobfree, 0, sizeof(*oobfree));
1814 
1815 	if (!master || section < 0)
1816 		return -EINVAL;
1817 
1818 	if (!master->ooblayout || !master->ooblayout->free)
1819 		return -ENOTSUPP;
1820 
1821 	return master->ooblayout->free(master, section, oobfree);
1822 }
1823 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1824 
1825 /**
1826  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1827  * @mtd: mtd info structure
1828  * @byte: the byte we are searching for
1829  * @sectionp: pointer where the section id will be stored
1830  * @oobregion: used to retrieve the ECC position
1831  * @iter: iterator function. Should be either mtd_ooblayout_free or
1832  *	  mtd_ooblayout_ecc depending on the region type you're searching for
1833  *
1834  * This function returns the section id and oobregion information of a
1835  * specific byte. For example, say you want to know where the 4th ECC byte is
1836  * stored, you'll use:
1837  *
1838  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1839  *
1840  * Returns zero on success, a negative error code otherwise.
1841  */
1842 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1843 				int *sectionp, struct mtd_oob_region *oobregion,
1844 				int (*iter)(struct mtd_info *,
1845 					    int section,
1846 					    struct mtd_oob_region *oobregion))
1847 {
1848 	int pos = 0, ret, section = 0;
1849 
1850 	memset(oobregion, 0, sizeof(*oobregion));
1851 
1852 	while (1) {
1853 		ret = iter(mtd, section, oobregion);
1854 		if (ret)
1855 			return ret;
1856 
1857 		if (pos + oobregion->length > byte)
1858 			break;
1859 
1860 		pos += oobregion->length;
1861 		section++;
1862 	}
1863 
1864 	/*
1865 	 * Adjust region info to make it start at the beginning at the
1866 	 * 'start' ECC byte.
1867 	 */
1868 	oobregion->offset += byte - pos;
1869 	oobregion->length -= byte - pos;
1870 	*sectionp = section;
1871 
1872 	return 0;
1873 }
1874 
1875 /**
1876  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1877  *				  ECC byte
1878  * @mtd: mtd info structure
1879  * @eccbyte: the byte we are searching for
1880  * @section: pointer where the section id will be stored
1881  * @oobregion: OOB region information
1882  *
1883  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1884  * byte.
1885  *
1886  * Returns zero on success, a negative error code otherwise.
1887  */
1888 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1889 				 int *section,
1890 				 struct mtd_oob_region *oobregion)
1891 {
1892 	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1893 					 mtd_ooblayout_ecc);
1894 }
1895 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1896 
1897 /**
1898  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1899  * @mtd: mtd info structure
1900  * @buf: destination buffer to store OOB bytes
1901  * @oobbuf: OOB buffer
1902  * @start: first byte to retrieve
1903  * @nbytes: number of bytes to retrieve
1904  * @iter: section iterator
1905  *
1906  * Extract bytes attached to a specific category (ECC or free)
1907  * from the OOB buffer and copy them into buf.
1908  *
1909  * Returns zero on success, a negative error code otherwise.
1910  */
1911 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1912 				const u8 *oobbuf, int start, int nbytes,
1913 				int (*iter)(struct mtd_info *,
1914 					    int section,
1915 					    struct mtd_oob_region *oobregion))
1916 {
1917 	struct mtd_oob_region oobregion;
1918 	int section, ret;
1919 
1920 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1921 					&oobregion, iter);
1922 
1923 	while (!ret) {
1924 		int cnt;
1925 
1926 		cnt = min_t(int, nbytes, oobregion.length);
1927 		memcpy(buf, oobbuf + oobregion.offset, cnt);
1928 		buf += cnt;
1929 		nbytes -= cnt;
1930 
1931 		if (!nbytes)
1932 			break;
1933 
1934 		ret = iter(mtd, ++section, &oobregion);
1935 	}
1936 
1937 	return ret;
1938 }
1939 
1940 /**
1941  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1942  * @mtd: mtd info structure
1943  * @buf: source buffer to get OOB bytes from
1944  * @oobbuf: OOB buffer
1945  * @start: first OOB byte to set
1946  * @nbytes: number of OOB bytes to set
1947  * @iter: section iterator
1948  *
1949  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1950  * is selected by passing the appropriate iterator.
1951  *
1952  * Returns zero on success, a negative error code otherwise.
1953  */
1954 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1955 				u8 *oobbuf, int start, int nbytes,
1956 				int (*iter)(struct mtd_info *,
1957 					    int section,
1958 					    struct mtd_oob_region *oobregion))
1959 {
1960 	struct mtd_oob_region oobregion;
1961 	int section, ret;
1962 
1963 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1964 					&oobregion, iter);
1965 
1966 	while (!ret) {
1967 		int cnt;
1968 
1969 		cnt = min_t(int, nbytes, oobregion.length);
1970 		memcpy(oobbuf + oobregion.offset, buf, cnt);
1971 		buf += cnt;
1972 		nbytes -= cnt;
1973 
1974 		if (!nbytes)
1975 			break;
1976 
1977 		ret = iter(mtd, ++section, &oobregion);
1978 	}
1979 
1980 	return ret;
1981 }
1982 
1983 /**
1984  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1985  * @mtd: mtd info structure
1986  * @iter: category iterator
1987  *
1988  * Count the number of bytes in a given category.
1989  *
1990  * Returns a positive value on success, a negative error code otherwise.
1991  */
1992 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1993 				int (*iter)(struct mtd_info *,
1994 					    int section,
1995 					    struct mtd_oob_region *oobregion))
1996 {
1997 	struct mtd_oob_region oobregion;
1998 	int section = 0, ret, nbytes = 0;
1999 
2000 	while (1) {
2001 		ret = iter(mtd, section++, &oobregion);
2002 		if (ret) {
2003 			if (ret == -ERANGE)
2004 				ret = nbytes;
2005 			break;
2006 		}
2007 
2008 		nbytes += oobregion.length;
2009 	}
2010 
2011 	return ret;
2012 }
2013 
2014 /**
2015  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
2016  * @mtd: mtd info structure
2017  * @eccbuf: destination buffer to store ECC bytes
2018  * @oobbuf: OOB buffer
2019  * @start: first ECC byte to retrieve
2020  * @nbytes: number of ECC bytes to retrieve
2021  *
2022  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
2023  *
2024  * Returns zero on success, a negative error code otherwise.
2025  */
2026 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
2027 			       const u8 *oobbuf, int start, int nbytes)
2028 {
2029 	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2030 				       mtd_ooblayout_ecc);
2031 }
2032 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
2033 
2034 /**
2035  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
2036  * @mtd: mtd info structure
2037  * @eccbuf: source buffer to get ECC bytes from
2038  * @oobbuf: OOB buffer
2039  * @start: first ECC byte to set
2040  * @nbytes: number of ECC bytes to set
2041  *
2042  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2043  *
2044  * Returns zero on success, a negative error code otherwise.
2045  */
2046 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2047 			       u8 *oobbuf, int start, int nbytes)
2048 {
2049 	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2050 				       mtd_ooblayout_ecc);
2051 }
2052 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2053 
2054 /**
2055  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2056  * @mtd: mtd info structure
2057  * @databuf: destination buffer to store ECC bytes
2058  * @oobbuf: OOB buffer
2059  * @start: first ECC byte to retrieve
2060  * @nbytes: number of ECC bytes to retrieve
2061  *
2062  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2063  *
2064  * Returns zero on success, a negative error code otherwise.
2065  */
2066 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2067 				const u8 *oobbuf, int start, int nbytes)
2068 {
2069 	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2070 				       mtd_ooblayout_free);
2071 }
2072 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2073 
2074 /**
2075  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2076  * @mtd: mtd info structure
2077  * @databuf: source buffer to get data bytes from
2078  * @oobbuf: OOB buffer
2079  * @start: first ECC byte to set
2080  * @nbytes: number of ECC bytes to set
2081  *
2082  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2083  *
2084  * Returns zero on success, a negative error code otherwise.
2085  */
2086 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2087 				u8 *oobbuf, int start, int nbytes)
2088 {
2089 	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2090 				       mtd_ooblayout_free);
2091 }
2092 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2093 
2094 /**
2095  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2096  * @mtd: mtd info structure
2097  *
2098  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2099  *
2100  * Returns zero on success, a negative error code otherwise.
2101  */
2102 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2103 {
2104 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2105 }
2106 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2107 
2108 /**
2109  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2110  * @mtd: mtd info structure
2111  *
2112  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2113  *
2114  * Returns zero on success, a negative error code otherwise.
2115  */
2116 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2117 {
2118 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2119 }
2120 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2121 
2122 /*
2123  * Method to access the protection register area, present in some flash
2124  * devices. The user data is one time programmable but the factory data is read
2125  * only.
2126  */
2127 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2128 			   struct otp_info *buf)
2129 {
2130 	struct mtd_info *master = mtd_get_master(mtd);
2131 
2132 	if (!master->_get_fact_prot_info)
2133 		return -EOPNOTSUPP;
2134 	if (!len)
2135 		return 0;
2136 	return master->_get_fact_prot_info(master, len, retlen, buf);
2137 }
2138 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2139 
2140 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2141 			   size_t *retlen, u_char *buf)
2142 {
2143 	struct mtd_info *master = mtd_get_master(mtd);
2144 
2145 	*retlen = 0;
2146 	if (!master->_read_fact_prot_reg)
2147 		return -EOPNOTSUPP;
2148 	if (!len)
2149 		return 0;
2150 	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2151 }
2152 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2153 
2154 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2155 			   struct otp_info *buf)
2156 {
2157 	struct mtd_info *master = mtd_get_master(mtd);
2158 
2159 	if (!master->_get_user_prot_info)
2160 		return -EOPNOTSUPP;
2161 	if (!len)
2162 		return 0;
2163 	return master->_get_user_prot_info(master, len, retlen, buf);
2164 }
2165 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2166 
2167 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2168 			   size_t *retlen, u_char *buf)
2169 {
2170 	struct mtd_info *master = mtd_get_master(mtd);
2171 
2172 	*retlen = 0;
2173 	if (!master->_read_user_prot_reg)
2174 		return -EOPNOTSUPP;
2175 	if (!len)
2176 		return 0;
2177 	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2178 }
2179 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2180 
2181 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2182 			    size_t *retlen, const u_char *buf)
2183 {
2184 	struct mtd_info *master = mtd_get_master(mtd);
2185 	int ret;
2186 
2187 	*retlen = 0;
2188 	if (!master->_write_user_prot_reg)
2189 		return -EOPNOTSUPP;
2190 	if (!len)
2191 		return 0;
2192 	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2193 	if (ret)
2194 		return ret;
2195 
2196 	/*
2197 	 * If no data could be written at all, we are out of memory and
2198 	 * must return -ENOSPC.
2199 	 */
2200 	return (*retlen) ? 0 : -ENOSPC;
2201 }
2202 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2203 
2204 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2205 {
2206 	struct mtd_info *master = mtd_get_master(mtd);
2207 
2208 	if (!master->_lock_user_prot_reg)
2209 		return -EOPNOTSUPP;
2210 	if (!len)
2211 		return 0;
2212 	return master->_lock_user_prot_reg(master, from, len);
2213 }
2214 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2215 
2216 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2217 {
2218 	struct mtd_info *master = mtd_get_master(mtd);
2219 
2220 	if (!master->_erase_user_prot_reg)
2221 		return -EOPNOTSUPP;
2222 	if (!len)
2223 		return 0;
2224 	return master->_erase_user_prot_reg(master, from, len);
2225 }
2226 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2227 
2228 /* Chip-supported device locking */
2229 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2230 {
2231 	struct mtd_info *master = mtd_get_master(mtd);
2232 
2233 	if (!master->_lock)
2234 		return -EOPNOTSUPP;
2235 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2236 		return -EINVAL;
2237 	if (!len)
2238 		return 0;
2239 
2240 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2241 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2242 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2243 	}
2244 
2245 	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2246 }
2247 EXPORT_SYMBOL_GPL(mtd_lock);
2248 
2249 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2250 {
2251 	struct mtd_info *master = mtd_get_master(mtd);
2252 
2253 	if (!master->_unlock)
2254 		return -EOPNOTSUPP;
2255 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2256 		return -EINVAL;
2257 	if (!len)
2258 		return 0;
2259 
2260 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2261 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2262 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2263 	}
2264 
2265 	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2266 }
2267 EXPORT_SYMBOL_GPL(mtd_unlock);
2268 
2269 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2270 {
2271 	struct mtd_info *master = mtd_get_master(mtd);
2272 
2273 	if (!master->_is_locked)
2274 		return -EOPNOTSUPP;
2275 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2276 		return -EINVAL;
2277 	if (!len)
2278 		return 0;
2279 
2280 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2281 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2282 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2283 	}
2284 
2285 	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2286 }
2287 EXPORT_SYMBOL_GPL(mtd_is_locked);
2288 
2289 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2290 {
2291 	struct mtd_info *master = mtd_get_master(mtd);
2292 
2293 	if (ofs < 0 || ofs >= mtd->size)
2294 		return -EINVAL;
2295 	if (!master->_block_isreserved)
2296 		return 0;
2297 
2298 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2299 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2300 
2301 	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2302 }
2303 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2304 
2305 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2306 {
2307 	struct mtd_info *master = mtd_get_master(mtd);
2308 
2309 	if (ofs < 0 || ofs >= mtd->size)
2310 		return -EINVAL;
2311 	if (!master->_block_isbad)
2312 		return 0;
2313 
2314 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2315 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2316 
2317 	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2318 }
2319 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2320 
2321 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2322 {
2323 	struct mtd_info *master = mtd_get_master(mtd);
2324 	int ret;
2325 
2326 	if (!master->_block_markbad)
2327 		return -EOPNOTSUPP;
2328 	if (ofs < 0 || ofs >= mtd->size)
2329 		return -EINVAL;
2330 	if (!(mtd->flags & MTD_WRITEABLE))
2331 		return -EROFS;
2332 
2333 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2334 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2335 
2336 	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2337 	if (ret)
2338 		return ret;
2339 
2340 	while (mtd->parent) {
2341 		mtd->ecc_stats.badblocks++;
2342 		mtd = mtd->parent;
2343 	}
2344 
2345 	return 0;
2346 }
2347 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2348 
2349 /*
2350  * default_mtd_writev - the default writev method
2351  * @mtd: mtd device description object pointer
2352  * @vecs: the vectors to write
2353  * @count: count of vectors in @vecs
2354  * @to: the MTD device offset to write to
2355  * @retlen: on exit contains the count of bytes written to the MTD device.
2356  *
2357  * This function returns zero in case of success and a negative error code in
2358  * case of failure.
2359  */
2360 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2361 			      unsigned long count, loff_t to, size_t *retlen)
2362 {
2363 	unsigned long i;
2364 	size_t totlen = 0, thislen;
2365 	int ret = 0;
2366 
2367 	for (i = 0; i < count; i++) {
2368 		if (!vecs[i].iov_len)
2369 			continue;
2370 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2371 				vecs[i].iov_base);
2372 		totlen += thislen;
2373 		if (ret || thislen != vecs[i].iov_len)
2374 			break;
2375 		to += vecs[i].iov_len;
2376 	}
2377 	*retlen = totlen;
2378 	return ret;
2379 }
2380 
2381 /*
2382  * mtd_writev - the vector-based MTD write method
2383  * @mtd: mtd device description object pointer
2384  * @vecs: the vectors to write
2385  * @count: count of vectors in @vecs
2386  * @to: the MTD device offset to write to
2387  * @retlen: on exit contains the count of bytes written to the MTD device.
2388  *
2389  * This function returns zero in case of success and a negative error code in
2390  * case of failure.
2391  */
2392 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2393 	       unsigned long count, loff_t to, size_t *retlen)
2394 {
2395 	struct mtd_info *master = mtd_get_master(mtd);
2396 
2397 	*retlen = 0;
2398 	if (!(mtd->flags & MTD_WRITEABLE))
2399 		return -EROFS;
2400 
2401 	if (!master->_writev)
2402 		return default_mtd_writev(mtd, vecs, count, to, retlen);
2403 
2404 	return master->_writev(master, vecs, count,
2405 			       mtd_get_master_ofs(mtd, to), retlen);
2406 }
2407 EXPORT_SYMBOL_GPL(mtd_writev);
2408 
2409 /**
2410  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2411  * @mtd: mtd device description object pointer
2412  * @size: a pointer to the ideal or maximum size of the allocation, points
2413  *        to the actual allocation size on success.
2414  *
2415  * This routine attempts to allocate a contiguous kernel buffer up to
2416  * the specified size, backing off the size of the request exponentially
2417  * until the request succeeds or until the allocation size falls below
2418  * the system page size. This attempts to make sure it does not adversely
2419  * impact system performance, so when allocating more than one page, we
2420  * ask the memory allocator to avoid re-trying, swapping, writing back
2421  * or performing I/O.
2422  *
2423  * Note, this function also makes sure that the allocated buffer is aligned to
2424  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2425  *
2426  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2427  * to handle smaller (i.e. degraded) buffer allocations under low- or
2428  * fragmented-memory situations where such reduced allocations, from a
2429  * requested ideal, are allowed.
2430  *
2431  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2432  */
2433 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2434 {
2435 	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2436 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2437 	void *kbuf;
2438 
2439 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2440 
2441 	while (*size > min_alloc) {
2442 		kbuf = kmalloc(*size, flags);
2443 		if (kbuf)
2444 			return kbuf;
2445 
2446 		*size >>= 1;
2447 		*size = ALIGN(*size, mtd->writesize);
2448 	}
2449 
2450 	/*
2451 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2452 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2453 	 */
2454 	return kmalloc(*size, GFP_KERNEL);
2455 }
2456 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2457 
2458 #ifdef CONFIG_PROC_FS
2459 
2460 /*====================================================================*/
2461 /* Support for /proc/mtd */
2462 
2463 static int mtd_proc_show(struct seq_file *m, void *v)
2464 {
2465 	struct mtd_info *mtd;
2466 
2467 	seq_puts(m, "dev:    size   erasesize  name\n");
2468 	mutex_lock(&mtd_table_mutex);
2469 	mtd_for_each_device(mtd) {
2470 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2471 			   mtd->index, (unsigned long long)mtd->size,
2472 			   mtd->erasesize, mtd->name);
2473 	}
2474 	mutex_unlock(&mtd_table_mutex);
2475 	return 0;
2476 }
2477 #endif /* CONFIG_PROC_FS */
2478 
2479 /*====================================================================*/
2480 /* Init code */
2481 
2482 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2483 {
2484 	struct backing_dev_info *bdi;
2485 	int ret;
2486 
2487 	bdi = bdi_alloc(NUMA_NO_NODE);
2488 	if (!bdi)
2489 		return ERR_PTR(-ENOMEM);
2490 	bdi->ra_pages = 0;
2491 	bdi->io_pages = 0;
2492 
2493 	/*
2494 	 * We put '-0' suffix to the name to get the same name format as we
2495 	 * used to get. Since this is called only once, we get a unique name.
2496 	 */
2497 	ret = bdi_register(bdi, "%.28s-0", name);
2498 	if (ret)
2499 		bdi_put(bdi);
2500 
2501 	return ret ? ERR_PTR(ret) : bdi;
2502 }
2503 
2504 static struct proc_dir_entry *proc_mtd;
2505 
2506 static int __init init_mtd(void)
2507 {
2508 	int ret;
2509 
2510 	ret = class_register(&mtd_class);
2511 	if (ret)
2512 		goto err_reg;
2513 
2514 	mtd_bdi = mtd_bdi_init("mtd");
2515 	if (IS_ERR(mtd_bdi)) {
2516 		ret = PTR_ERR(mtd_bdi);
2517 		goto err_bdi;
2518 	}
2519 
2520 	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2521 
2522 	ret = init_mtdchar();
2523 	if (ret)
2524 		goto out_procfs;
2525 
2526 	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2527 	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2528 			    &mtd_expert_analysis_mode);
2529 
2530 	return 0;
2531 
2532 out_procfs:
2533 	if (proc_mtd)
2534 		remove_proc_entry("mtd", NULL);
2535 	bdi_unregister(mtd_bdi);
2536 	bdi_put(mtd_bdi);
2537 err_bdi:
2538 	class_unregister(&mtd_class);
2539 err_reg:
2540 	pr_err("Error registering mtd class or bdi: %d\n", ret);
2541 	return ret;
2542 }
2543 
2544 static void __exit cleanup_mtd(void)
2545 {
2546 	debugfs_remove_recursive(dfs_dir_mtd);
2547 	cleanup_mtdchar();
2548 	if (proc_mtd)
2549 		remove_proc_entry("mtd", NULL);
2550 	class_unregister(&mtd_class);
2551 	bdi_unregister(mtd_bdi);
2552 	bdi_put(mtd_bdi);
2553 	idr_destroy(&mtd_idr);
2554 }
2555 
2556 module_init(init_mtd);
2557 module_exit(cleanup_mtd);
2558 
2559 MODULE_LICENSE("GPL");
2560 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2561 MODULE_DESCRIPTION("Core MTD registration and access routines");
2562