xref: /linux/drivers/mtd/mtdcore.c (revision 3b2e6a932eade9625fb6388c970004ae866601ee)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Core registration and callback routines for MTD
4  * drivers and users.
5  *
6  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7  * Copyright © 2006      Red Hat UK Limited
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31 
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34 
35 #include "mtdcore.h"
36 
37 struct backing_dev_info *mtd_bdi;
38 
39 #ifdef CONFIG_PM_SLEEP
40 
41 static int mtd_cls_suspend(struct device *dev)
42 {
43 	struct mtd_info *mtd = dev_get_drvdata(dev);
44 
45 	return mtd ? mtd_suspend(mtd) : 0;
46 }
47 
48 static int mtd_cls_resume(struct device *dev)
49 {
50 	struct mtd_info *mtd = dev_get_drvdata(dev);
51 
52 	if (mtd)
53 		mtd_resume(mtd);
54 	return 0;
55 }
56 
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59 #else
60 #define MTD_CLS_PM_OPS NULL
61 #endif
62 
63 static struct class mtd_class = {
64 	.name = "mtd",
65 	.owner = THIS_MODULE,
66 	.pm = MTD_CLS_PM_OPS,
67 };
68 
69 static DEFINE_IDR(mtd_idr);
70 
71 /* These are exported solely for the purpose of mtd_blkdevs.c. You
72    should not use them for _anything_ else */
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
75 
76 struct mtd_info *__mtd_next_device(int i)
77 {
78 	return idr_get_next(&mtd_idr, &i);
79 }
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
81 
82 static LIST_HEAD(mtd_notifiers);
83 
84 
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86 
87 /* REVISIT once MTD uses the driver model better, whoever allocates
88  * the mtd_info will probably want to use the release() hook...
89  */
90 static void mtd_release(struct device *dev)
91 {
92 	struct mtd_info *mtd = dev_get_drvdata(dev);
93 	dev_t index = MTD_DEVT(mtd->index);
94 
95 	/* remove /dev/mtdXro node */
96 	device_destroy(&mtd_class, index + 1);
97 }
98 
99 #define MTD_DEVICE_ATTR_RO(name) \
100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
101 
102 #define MTD_DEVICE_ATTR_RW(name) \
103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
104 
105 static ssize_t mtd_type_show(struct device *dev,
106 		struct device_attribute *attr, char *buf)
107 {
108 	struct mtd_info *mtd = dev_get_drvdata(dev);
109 	char *type;
110 
111 	switch (mtd->type) {
112 	case MTD_ABSENT:
113 		type = "absent";
114 		break;
115 	case MTD_RAM:
116 		type = "ram";
117 		break;
118 	case MTD_ROM:
119 		type = "rom";
120 		break;
121 	case MTD_NORFLASH:
122 		type = "nor";
123 		break;
124 	case MTD_NANDFLASH:
125 		type = "nand";
126 		break;
127 	case MTD_DATAFLASH:
128 		type = "dataflash";
129 		break;
130 	case MTD_UBIVOLUME:
131 		type = "ubi";
132 		break;
133 	case MTD_MLCNANDFLASH:
134 		type = "mlc-nand";
135 		break;
136 	default:
137 		type = "unknown";
138 	}
139 
140 	return sysfs_emit(buf, "%s\n", type);
141 }
142 MTD_DEVICE_ATTR_RO(type);
143 
144 static ssize_t mtd_flags_show(struct device *dev,
145 		struct device_attribute *attr, char *buf)
146 {
147 	struct mtd_info *mtd = dev_get_drvdata(dev);
148 
149 	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
150 }
151 MTD_DEVICE_ATTR_RO(flags);
152 
153 static ssize_t mtd_size_show(struct device *dev,
154 		struct device_attribute *attr, char *buf)
155 {
156 	struct mtd_info *mtd = dev_get_drvdata(dev);
157 
158 	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
159 }
160 MTD_DEVICE_ATTR_RO(size);
161 
162 static ssize_t mtd_erasesize_show(struct device *dev,
163 		struct device_attribute *attr, char *buf)
164 {
165 	struct mtd_info *mtd = dev_get_drvdata(dev);
166 
167 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
168 }
169 MTD_DEVICE_ATTR_RO(erasesize);
170 
171 static ssize_t mtd_writesize_show(struct device *dev,
172 		struct device_attribute *attr, char *buf)
173 {
174 	struct mtd_info *mtd = dev_get_drvdata(dev);
175 
176 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
177 }
178 MTD_DEVICE_ATTR_RO(writesize);
179 
180 static ssize_t mtd_subpagesize_show(struct device *dev,
181 		struct device_attribute *attr, char *buf)
182 {
183 	struct mtd_info *mtd = dev_get_drvdata(dev);
184 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
185 
186 	return sysfs_emit(buf, "%u\n", subpagesize);
187 }
188 MTD_DEVICE_ATTR_RO(subpagesize);
189 
190 static ssize_t mtd_oobsize_show(struct device *dev,
191 		struct device_attribute *attr, char *buf)
192 {
193 	struct mtd_info *mtd = dev_get_drvdata(dev);
194 
195 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
196 }
197 MTD_DEVICE_ATTR_RO(oobsize);
198 
199 static ssize_t mtd_oobavail_show(struct device *dev,
200 				 struct device_attribute *attr, char *buf)
201 {
202 	struct mtd_info *mtd = dev_get_drvdata(dev);
203 
204 	return sysfs_emit(buf, "%u\n", mtd->oobavail);
205 }
206 MTD_DEVICE_ATTR_RO(oobavail);
207 
208 static ssize_t mtd_numeraseregions_show(struct device *dev,
209 		struct device_attribute *attr, char *buf)
210 {
211 	struct mtd_info *mtd = dev_get_drvdata(dev);
212 
213 	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
214 }
215 MTD_DEVICE_ATTR_RO(numeraseregions);
216 
217 static ssize_t mtd_name_show(struct device *dev,
218 		struct device_attribute *attr, char *buf)
219 {
220 	struct mtd_info *mtd = dev_get_drvdata(dev);
221 
222 	return sysfs_emit(buf, "%s\n", mtd->name);
223 }
224 MTD_DEVICE_ATTR_RO(name);
225 
226 static ssize_t mtd_ecc_strength_show(struct device *dev,
227 				     struct device_attribute *attr, char *buf)
228 {
229 	struct mtd_info *mtd = dev_get_drvdata(dev);
230 
231 	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
232 }
233 MTD_DEVICE_ATTR_RO(ecc_strength);
234 
235 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 					  struct device_attribute *attr,
237 					  char *buf)
238 {
239 	struct mtd_info *mtd = dev_get_drvdata(dev);
240 
241 	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
242 }
243 
244 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 					   struct device_attribute *attr,
246 					   const char *buf, size_t count)
247 {
248 	struct mtd_info *mtd = dev_get_drvdata(dev);
249 	unsigned int bitflip_threshold;
250 	int retval;
251 
252 	retval = kstrtouint(buf, 0, &bitflip_threshold);
253 	if (retval)
254 		return retval;
255 
256 	mtd->bitflip_threshold = bitflip_threshold;
257 	return count;
258 }
259 MTD_DEVICE_ATTR_RW(bitflip_threshold);
260 
261 static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 		struct device_attribute *attr, char *buf)
263 {
264 	struct mtd_info *mtd = dev_get_drvdata(dev);
265 
266 	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
267 
268 }
269 MTD_DEVICE_ATTR_RO(ecc_step_size);
270 
271 static ssize_t mtd_corrected_bits_show(struct device *dev,
272 		struct device_attribute *attr, char *buf)
273 {
274 	struct mtd_info *mtd = dev_get_drvdata(dev);
275 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
276 
277 	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
278 }
279 MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
280 
281 static ssize_t mtd_ecc_failures_show(struct device *dev,
282 		struct device_attribute *attr, char *buf)
283 {
284 	struct mtd_info *mtd = dev_get_drvdata(dev);
285 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
286 
287 	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
288 }
289 MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
290 
291 static ssize_t mtd_bad_blocks_show(struct device *dev,
292 		struct device_attribute *attr, char *buf)
293 {
294 	struct mtd_info *mtd = dev_get_drvdata(dev);
295 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296 
297 	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
298 }
299 MTD_DEVICE_ATTR_RO(bad_blocks);
300 
301 static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 		struct device_attribute *attr, char *buf)
303 {
304 	struct mtd_info *mtd = dev_get_drvdata(dev);
305 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306 
307 	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
308 }
309 MTD_DEVICE_ATTR_RO(bbt_blocks);
310 
311 static struct attribute *mtd_attrs[] = {
312 	&dev_attr_type.attr,
313 	&dev_attr_flags.attr,
314 	&dev_attr_size.attr,
315 	&dev_attr_erasesize.attr,
316 	&dev_attr_writesize.attr,
317 	&dev_attr_subpagesize.attr,
318 	&dev_attr_oobsize.attr,
319 	&dev_attr_oobavail.attr,
320 	&dev_attr_numeraseregions.attr,
321 	&dev_attr_name.attr,
322 	&dev_attr_ecc_strength.attr,
323 	&dev_attr_ecc_step_size.attr,
324 	&dev_attr_corrected_bits.attr,
325 	&dev_attr_ecc_failures.attr,
326 	&dev_attr_bad_blocks.attr,
327 	&dev_attr_bbt_blocks.attr,
328 	&dev_attr_bitflip_threshold.attr,
329 	NULL,
330 };
331 ATTRIBUTE_GROUPS(mtd);
332 
333 static const struct device_type mtd_devtype = {
334 	.name		= "mtd",
335 	.groups		= mtd_groups,
336 	.release	= mtd_release,
337 };
338 
339 static int mtd_partid_debug_show(struct seq_file *s, void *p)
340 {
341 	struct mtd_info *mtd = s->private;
342 
343 	seq_printf(s, "%s\n", mtd->dbg.partid);
344 
345 	return 0;
346 }
347 
348 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
349 
350 static int mtd_partname_debug_show(struct seq_file *s, void *p)
351 {
352 	struct mtd_info *mtd = s->private;
353 
354 	seq_printf(s, "%s\n", mtd->dbg.partname);
355 
356 	return 0;
357 }
358 
359 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
360 
361 static struct dentry *dfs_dir_mtd;
362 
363 static void mtd_debugfs_populate(struct mtd_info *mtd)
364 {
365 	struct mtd_info *master = mtd_get_master(mtd);
366 	struct device *dev = &mtd->dev;
367 	struct dentry *root;
368 
369 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
370 		return;
371 
372 	root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
373 	mtd->dbg.dfs_dir = root;
374 
375 	if (master->dbg.partid)
376 		debugfs_create_file("partid", 0400, root, master,
377 				    &mtd_partid_debug_fops);
378 
379 	if (master->dbg.partname)
380 		debugfs_create_file("partname", 0400, root, master,
381 				    &mtd_partname_debug_fops);
382 }
383 
384 #ifndef CONFIG_MMU
385 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
386 {
387 	switch (mtd->type) {
388 	case MTD_RAM:
389 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
390 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
391 	case MTD_ROM:
392 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
393 			NOMMU_MAP_READ;
394 	default:
395 		return NOMMU_MAP_COPY;
396 	}
397 }
398 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
399 #endif
400 
401 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
402 			       void *cmd)
403 {
404 	struct mtd_info *mtd;
405 
406 	mtd = container_of(n, struct mtd_info, reboot_notifier);
407 	mtd->_reboot(mtd);
408 
409 	return NOTIFY_DONE;
410 }
411 
412 /**
413  * mtd_wunit_to_pairing_info - get pairing information of a wunit
414  * @mtd: pointer to new MTD device info structure
415  * @wunit: write unit we are interested in
416  * @info: returned pairing information
417  *
418  * Retrieve pairing information associated to the wunit.
419  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
420  * paired together, and where programming a page may influence the page it is
421  * paired with.
422  * The notion of page is replaced by the term wunit (write-unit) to stay
423  * consistent with the ->writesize field.
424  *
425  * The @wunit argument can be extracted from an absolute offset using
426  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
427  * to @wunit.
428  *
429  * From the pairing info the MTD user can find all the wunits paired with
430  * @wunit using the following loop:
431  *
432  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
433  *	info.pair = i;
434  *	mtd_pairing_info_to_wunit(mtd, &info);
435  *	...
436  * }
437  */
438 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
439 			      struct mtd_pairing_info *info)
440 {
441 	struct mtd_info *master = mtd_get_master(mtd);
442 	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
443 
444 	if (wunit < 0 || wunit >= npairs)
445 		return -EINVAL;
446 
447 	if (master->pairing && master->pairing->get_info)
448 		return master->pairing->get_info(master, wunit, info);
449 
450 	info->group = 0;
451 	info->pair = wunit;
452 
453 	return 0;
454 }
455 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
456 
457 /**
458  * mtd_pairing_info_to_wunit - get wunit from pairing information
459  * @mtd: pointer to new MTD device info structure
460  * @info: pairing information struct
461  *
462  * Returns a positive number representing the wunit associated to the info
463  * struct, or a negative error code.
464  *
465  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
466  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
467  * doc).
468  *
469  * It can also be used to only program the first page of each pair (i.e.
470  * page attached to group 0), which allows one to use an MLC NAND in
471  * software-emulated SLC mode:
472  *
473  * info.group = 0;
474  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
475  * for (info.pair = 0; info.pair < npairs; info.pair++) {
476  *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
477  *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
478  *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
479  * }
480  */
481 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
482 			      const struct mtd_pairing_info *info)
483 {
484 	struct mtd_info *master = mtd_get_master(mtd);
485 	int ngroups = mtd_pairing_groups(master);
486 	int npairs = mtd_wunit_per_eb(master) / ngroups;
487 
488 	if (!info || info->pair < 0 || info->pair >= npairs ||
489 	    info->group < 0 || info->group >= ngroups)
490 		return -EINVAL;
491 
492 	if (master->pairing && master->pairing->get_wunit)
493 		return mtd->pairing->get_wunit(master, info);
494 
495 	return info->pair;
496 }
497 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
498 
499 /**
500  * mtd_pairing_groups - get the number of pairing groups
501  * @mtd: pointer to new MTD device info structure
502  *
503  * Returns the number of pairing groups.
504  *
505  * This number is usually equal to the number of bits exposed by a single
506  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
507  * to iterate over all pages of a given pair.
508  */
509 int mtd_pairing_groups(struct mtd_info *mtd)
510 {
511 	struct mtd_info *master = mtd_get_master(mtd);
512 
513 	if (!master->pairing || !master->pairing->ngroups)
514 		return 1;
515 
516 	return master->pairing->ngroups;
517 }
518 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
519 
520 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
521 			      void *val, size_t bytes)
522 {
523 	struct mtd_info *mtd = priv;
524 	size_t retlen;
525 	int err;
526 
527 	err = mtd_read(mtd, offset, bytes, &retlen, val);
528 	if (err && err != -EUCLEAN)
529 		return err;
530 
531 	return retlen == bytes ? 0 : -EIO;
532 }
533 
534 static int mtd_nvmem_add(struct mtd_info *mtd)
535 {
536 	struct device_node *node = mtd_get_of_node(mtd);
537 	struct nvmem_config config = {};
538 
539 	config.id = -1;
540 	config.dev = &mtd->dev;
541 	config.name = dev_name(&mtd->dev);
542 	config.owner = THIS_MODULE;
543 	config.reg_read = mtd_nvmem_reg_read;
544 	config.size = mtd->size;
545 	config.word_size = 1;
546 	config.stride = 1;
547 	config.read_only = true;
548 	config.root_only = true;
549 	config.ignore_wp = true;
550 	config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
551 	config.priv = mtd;
552 
553 	mtd->nvmem = nvmem_register(&config);
554 	if (IS_ERR(mtd->nvmem)) {
555 		/* Just ignore if there is no NVMEM support in the kernel */
556 		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
557 			mtd->nvmem = NULL;
558 		} else {
559 			dev_err(&mtd->dev, "Failed to register NVMEM device\n");
560 			return PTR_ERR(mtd->nvmem);
561 		}
562 	}
563 
564 	return 0;
565 }
566 
567 /**
568  *	add_mtd_device - register an MTD device
569  *	@mtd: pointer to new MTD device info structure
570  *
571  *	Add a device to the list of MTD devices present in the system, and
572  *	notify each currently active MTD 'user' of its arrival. Returns
573  *	zero on success or non-zero on failure.
574  */
575 
576 int add_mtd_device(struct mtd_info *mtd)
577 {
578 	struct mtd_info *master = mtd_get_master(mtd);
579 	struct mtd_notifier *not;
580 	int i, error;
581 
582 	/*
583 	 * May occur, for instance, on buggy drivers which call
584 	 * mtd_device_parse_register() multiple times on the same master MTD,
585 	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
586 	 */
587 	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
588 		return -EEXIST;
589 
590 	BUG_ON(mtd->writesize == 0);
591 
592 	/*
593 	 * MTD drivers should implement ->_{write,read}() or
594 	 * ->_{write,read}_oob(), but not both.
595 	 */
596 	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
597 		    (mtd->_read && mtd->_read_oob)))
598 		return -EINVAL;
599 
600 	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
601 		    !(mtd->flags & MTD_NO_ERASE)))
602 		return -EINVAL;
603 
604 	/*
605 	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
606 	 * master is an MLC NAND and has a proper pairing scheme defined.
607 	 * We also reject masters that implement ->_writev() for now, because
608 	 * NAND controller drivers don't implement this hook, and adding the
609 	 * SLC -> MLC address/length conversion to this path is useless if we
610 	 * don't have a user.
611 	 */
612 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
613 	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
614 	     !master->pairing || master->_writev))
615 		return -EINVAL;
616 
617 	mutex_lock(&mtd_table_mutex);
618 
619 	i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
620 	if (i < 0) {
621 		error = i;
622 		goto fail_locked;
623 	}
624 
625 	mtd->index = i;
626 	mtd->usecount = 0;
627 
628 	/* default value if not set by driver */
629 	if (mtd->bitflip_threshold == 0)
630 		mtd->bitflip_threshold = mtd->ecc_strength;
631 
632 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
633 		int ngroups = mtd_pairing_groups(master);
634 
635 		mtd->erasesize /= ngroups;
636 		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
637 			    mtd->erasesize;
638 	}
639 
640 	if (is_power_of_2(mtd->erasesize))
641 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
642 	else
643 		mtd->erasesize_shift = 0;
644 
645 	if (is_power_of_2(mtd->writesize))
646 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
647 	else
648 		mtd->writesize_shift = 0;
649 
650 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
651 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
652 
653 	/* Some chips always power up locked. Unlock them now */
654 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
655 		error = mtd_unlock(mtd, 0, mtd->size);
656 		if (error && error != -EOPNOTSUPP)
657 			printk(KERN_WARNING
658 			       "%s: unlock failed, writes may not work\n",
659 			       mtd->name);
660 		/* Ignore unlock failures? */
661 		error = 0;
662 	}
663 
664 	/* Caller should have set dev.parent to match the
665 	 * physical device, if appropriate.
666 	 */
667 	mtd->dev.type = &mtd_devtype;
668 	mtd->dev.class = &mtd_class;
669 	mtd->dev.devt = MTD_DEVT(i);
670 	dev_set_name(&mtd->dev, "mtd%d", i);
671 	dev_set_drvdata(&mtd->dev, mtd);
672 	of_node_get(mtd_get_of_node(mtd));
673 	error = device_register(&mtd->dev);
674 	if (error)
675 		goto fail_added;
676 
677 	/* Add the nvmem provider */
678 	error = mtd_nvmem_add(mtd);
679 	if (error)
680 		goto fail_nvmem_add;
681 
682 	mtd_debugfs_populate(mtd);
683 
684 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
685 		      "mtd%dro", i);
686 
687 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
688 	/* No need to get a refcount on the module containing
689 	   the notifier, since we hold the mtd_table_mutex */
690 	list_for_each_entry(not, &mtd_notifiers, list)
691 		not->add(mtd);
692 
693 	mutex_unlock(&mtd_table_mutex);
694 	/* We _know_ we aren't being removed, because
695 	   our caller is still holding us here. So none
696 	   of this try_ nonsense, and no bitching about it
697 	   either. :) */
698 	__module_get(THIS_MODULE);
699 	return 0;
700 
701 fail_nvmem_add:
702 	device_unregister(&mtd->dev);
703 fail_added:
704 	of_node_put(mtd_get_of_node(mtd));
705 	idr_remove(&mtd_idr, i);
706 fail_locked:
707 	mutex_unlock(&mtd_table_mutex);
708 	return error;
709 }
710 
711 /**
712  *	del_mtd_device - unregister an MTD device
713  *	@mtd: pointer to MTD device info structure
714  *
715  *	Remove a device from the list of MTD devices present in the system,
716  *	and notify each currently active MTD 'user' of its departure.
717  *	Returns zero on success or 1 on failure, which currently will happen
718  *	if the requested device does not appear to be present in the list.
719  */
720 
721 int del_mtd_device(struct mtd_info *mtd)
722 {
723 	int ret;
724 	struct mtd_notifier *not;
725 
726 	mutex_lock(&mtd_table_mutex);
727 
728 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
729 		ret = -ENODEV;
730 		goto out_error;
731 	}
732 
733 	/* No need to get a refcount on the module containing
734 		the notifier, since we hold the mtd_table_mutex */
735 	list_for_each_entry(not, &mtd_notifiers, list)
736 		not->remove(mtd);
737 
738 	if (mtd->usecount) {
739 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
740 		       mtd->index, mtd->name, mtd->usecount);
741 		ret = -EBUSY;
742 	} else {
743 		debugfs_remove_recursive(mtd->dbg.dfs_dir);
744 
745 		/* Try to remove the NVMEM provider */
746 		nvmem_unregister(mtd->nvmem);
747 
748 		device_unregister(&mtd->dev);
749 
750 		/* Clear dev so mtd can be safely re-registered later if desired */
751 		memset(&mtd->dev, 0, sizeof(mtd->dev));
752 
753 		idr_remove(&mtd_idr, mtd->index);
754 		of_node_put(mtd_get_of_node(mtd));
755 
756 		module_put(THIS_MODULE);
757 		ret = 0;
758 	}
759 
760 out_error:
761 	mutex_unlock(&mtd_table_mutex);
762 	return ret;
763 }
764 
765 /*
766  * Set a few defaults based on the parent devices, if not provided by the
767  * driver
768  */
769 static void mtd_set_dev_defaults(struct mtd_info *mtd)
770 {
771 	if (mtd->dev.parent) {
772 		if (!mtd->owner && mtd->dev.parent->driver)
773 			mtd->owner = mtd->dev.parent->driver->owner;
774 		if (!mtd->name)
775 			mtd->name = dev_name(mtd->dev.parent);
776 	} else {
777 		pr_debug("mtd device won't show a device symlink in sysfs\n");
778 	}
779 
780 	INIT_LIST_HEAD(&mtd->partitions);
781 	mutex_init(&mtd->master.partitions_lock);
782 	mutex_init(&mtd->master.chrdev_lock);
783 }
784 
785 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
786 {
787 	struct otp_info *info;
788 	ssize_t size = 0;
789 	unsigned int i;
790 	size_t retlen;
791 	int ret;
792 
793 	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
794 	if (!info)
795 		return -ENOMEM;
796 
797 	if (is_user)
798 		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
799 	else
800 		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
801 	if (ret)
802 		goto err;
803 
804 	for (i = 0; i < retlen / sizeof(*info); i++)
805 		size += info[i].length;
806 
807 	kfree(info);
808 	return size;
809 
810 err:
811 	kfree(info);
812 
813 	/* ENODATA means there is no OTP region. */
814 	return ret == -ENODATA ? 0 : ret;
815 }
816 
817 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
818 						   const char *compatible,
819 						   int size,
820 						   nvmem_reg_read_t reg_read)
821 {
822 	struct nvmem_device *nvmem = NULL;
823 	struct nvmem_config config = {};
824 	struct device_node *np;
825 
826 	/* DT binding is optional */
827 	np = of_get_compatible_child(mtd->dev.of_node, compatible);
828 
829 	/* OTP nvmem will be registered on the physical device */
830 	config.dev = mtd->dev.parent;
831 	config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
832 	config.id = NVMEM_DEVID_NONE;
833 	config.owner = THIS_MODULE;
834 	config.type = NVMEM_TYPE_OTP;
835 	config.root_only = true;
836 	config.ignore_wp = true;
837 	config.reg_read = reg_read;
838 	config.size = size;
839 	config.of_node = np;
840 	config.priv = mtd;
841 
842 	nvmem = nvmem_register(&config);
843 	/* Just ignore if there is no NVMEM support in the kernel */
844 	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
845 		nvmem = NULL;
846 
847 	of_node_put(np);
848 	kfree(config.name);
849 
850 	return nvmem;
851 }
852 
853 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
854 				       void *val, size_t bytes)
855 {
856 	struct mtd_info *mtd = priv;
857 	size_t retlen;
858 	int ret;
859 
860 	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
861 	if (ret)
862 		return ret;
863 
864 	return retlen == bytes ? 0 : -EIO;
865 }
866 
867 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
868 				       void *val, size_t bytes)
869 {
870 	struct mtd_info *mtd = priv;
871 	size_t retlen;
872 	int ret;
873 
874 	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
875 	if (ret)
876 		return ret;
877 
878 	return retlen == bytes ? 0 : -EIO;
879 }
880 
881 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
882 {
883 	struct nvmem_device *nvmem;
884 	ssize_t size;
885 	int err;
886 
887 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
888 		size = mtd_otp_size(mtd, true);
889 		if (size < 0)
890 			return size;
891 
892 		if (size > 0) {
893 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
894 						       mtd_nvmem_user_otp_reg_read);
895 			if (IS_ERR(nvmem)) {
896 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
897 				return PTR_ERR(nvmem);
898 			}
899 			mtd->otp_user_nvmem = nvmem;
900 		}
901 	}
902 
903 	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
904 		size = mtd_otp_size(mtd, false);
905 		if (size < 0) {
906 			err = size;
907 			goto err;
908 		}
909 
910 		if (size > 0) {
911 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
912 						       mtd_nvmem_fact_otp_reg_read);
913 			if (IS_ERR(nvmem)) {
914 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
915 				err = PTR_ERR(nvmem);
916 				goto err;
917 			}
918 			mtd->otp_factory_nvmem = nvmem;
919 		}
920 	}
921 
922 	return 0;
923 
924 err:
925 	nvmem_unregister(mtd->otp_user_nvmem);
926 	return err;
927 }
928 
929 /**
930  * mtd_device_parse_register - parse partitions and register an MTD device.
931  *
932  * @mtd: the MTD device to register
933  * @types: the list of MTD partition probes to try, see
934  *         'parse_mtd_partitions()' for more information
935  * @parser_data: MTD partition parser-specific data
936  * @parts: fallback partition information to register, if parsing fails;
937  *         only valid if %nr_parts > %0
938  * @nr_parts: the number of partitions in parts, if zero then the full
939  *            MTD device is registered if no partition info is found
940  *
941  * This function aggregates MTD partitions parsing (done by
942  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
943  * basically follows the most common pattern found in many MTD drivers:
944  *
945  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
946  *   registered first.
947  * * Then It tries to probe partitions on MTD device @mtd using parsers
948  *   specified in @types (if @types is %NULL, then the default list of parsers
949  *   is used, see 'parse_mtd_partitions()' for more information). If none are
950  *   found this functions tries to fallback to information specified in
951  *   @parts/@nr_parts.
952  * * If no partitions were found this function just registers the MTD device
953  *   @mtd and exits.
954  *
955  * Returns zero in case of success and a negative error code in case of failure.
956  */
957 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
958 			      struct mtd_part_parser_data *parser_data,
959 			      const struct mtd_partition *parts,
960 			      int nr_parts)
961 {
962 	int ret;
963 
964 	mtd_set_dev_defaults(mtd);
965 
966 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
967 		ret = add_mtd_device(mtd);
968 		if (ret)
969 			return ret;
970 	}
971 
972 	/* Prefer parsed partitions over driver-provided fallback */
973 	ret = parse_mtd_partitions(mtd, types, parser_data);
974 	if (ret == -EPROBE_DEFER)
975 		goto out;
976 
977 	if (ret > 0)
978 		ret = 0;
979 	else if (nr_parts)
980 		ret = add_mtd_partitions(mtd, parts, nr_parts);
981 	else if (!device_is_registered(&mtd->dev))
982 		ret = add_mtd_device(mtd);
983 	else
984 		ret = 0;
985 
986 	if (ret)
987 		goto out;
988 
989 	/*
990 	 * FIXME: some drivers unfortunately call this function more than once.
991 	 * So we have to check if we've already assigned the reboot notifier.
992 	 *
993 	 * Generally, we can make multiple calls work for most cases, but it
994 	 * does cause problems with parse_mtd_partitions() above (e.g.,
995 	 * cmdlineparts will register partitions more than once).
996 	 */
997 	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
998 		  "MTD already registered\n");
999 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1000 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1001 		register_reboot_notifier(&mtd->reboot_notifier);
1002 	}
1003 
1004 	ret = mtd_otp_nvmem_add(mtd);
1005 
1006 out:
1007 	if (ret && device_is_registered(&mtd->dev))
1008 		del_mtd_device(mtd);
1009 
1010 	return ret;
1011 }
1012 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1013 
1014 /**
1015  * mtd_device_unregister - unregister an existing MTD device.
1016  *
1017  * @master: the MTD device to unregister.  This will unregister both the master
1018  *          and any partitions if registered.
1019  */
1020 int mtd_device_unregister(struct mtd_info *master)
1021 {
1022 	int err;
1023 
1024 	if (master->_reboot) {
1025 		unregister_reboot_notifier(&master->reboot_notifier);
1026 		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1027 	}
1028 
1029 	nvmem_unregister(master->otp_user_nvmem);
1030 	nvmem_unregister(master->otp_factory_nvmem);
1031 
1032 	err = del_mtd_partitions(master);
1033 	if (err)
1034 		return err;
1035 
1036 	if (!device_is_registered(&master->dev))
1037 		return 0;
1038 
1039 	return del_mtd_device(master);
1040 }
1041 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1042 
1043 /**
1044  *	register_mtd_user - register a 'user' of MTD devices.
1045  *	@new: pointer to notifier info structure
1046  *
1047  *	Registers a pair of callbacks function to be called upon addition
1048  *	or removal of MTD devices. Causes the 'add' callback to be immediately
1049  *	invoked for each MTD device currently present in the system.
1050  */
1051 void register_mtd_user (struct mtd_notifier *new)
1052 {
1053 	struct mtd_info *mtd;
1054 
1055 	mutex_lock(&mtd_table_mutex);
1056 
1057 	list_add(&new->list, &mtd_notifiers);
1058 
1059 	__module_get(THIS_MODULE);
1060 
1061 	mtd_for_each_device(mtd)
1062 		new->add(mtd);
1063 
1064 	mutex_unlock(&mtd_table_mutex);
1065 }
1066 EXPORT_SYMBOL_GPL(register_mtd_user);
1067 
1068 /**
1069  *	unregister_mtd_user - unregister a 'user' of MTD devices.
1070  *	@old: pointer to notifier info structure
1071  *
1072  *	Removes a callback function pair from the list of 'users' to be
1073  *	notified upon addition or removal of MTD devices. Causes the
1074  *	'remove' callback to be immediately invoked for each MTD device
1075  *	currently present in the system.
1076  */
1077 int unregister_mtd_user (struct mtd_notifier *old)
1078 {
1079 	struct mtd_info *mtd;
1080 
1081 	mutex_lock(&mtd_table_mutex);
1082 
1083 	module_put(THIS_MODULE);
1084 
1085 	mtd_for_each_device(mtd)
1086 		old->remove(mtd);
1087 
1088 	list_del(&old->list);
1089 	mutex_unlock(&mtd_table_mutex);
1090 	return 0;
1091 }
1092 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1093 
1094 /**
1095  *	get_mtd_device - obtain a validated handle for an MTD device
1096  *	@mtd: last known address of the required MTD device
1097  *	@num: internal device number of the required MTD device
1098  *
1099  *	Given a number and NULL address, return the num'th entry in the device
1100  *	table, if any.	Given an address and num == -1, search the device table
1101  *	for a device with that address and return if it's still present. Given
1102  *	both, return the num'th driver only if its address matches. Return
1103  *	error code if not.
1104  */
1105 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1106 {
1107 	struct mtd_info *ret = NULL, *other;
1108 	int err = -ENODEV;
1109 
1110 	mutex_lock(&mtd_table_mutex);
1111 
1112 	if (num == -1) {
1113 		mtd_for_each_device(other) {
1114 			if (other == mtd) {
1115 				ret = mtd;
1116 				break;
1117 			}
1118 		}
1119 	} else if (num >= 0) {
1120 		ret = idr_find(&mtd_idr, num);
1121 		if (mtd && mtd != ret)
1122 			ret = NULL;
1123 	}
1124 
1125 	if (!ret) {
1126 		ret = ERR_PTR(err);
1127 		goto out;
1128 	}
1129 
1130 	err = __get_mtd_device(ret);
1131 	if (err)
1132 		ret = ERR_PTR(err);
1133 out:
1134 	mutex_unlock(&mtd_table_mutex);
1135 	return ret;
1136 }
1137 EXPORT_SYMBOL_GPL(get_mtd_device);
1138 
1139 
1140 int __get_mtd_device(struct mtd_info *mtd)
1141 {
1142 	struct mtd_info *master = mtd_get_master(mtd);
1143 	int err;
1144 
1145 	if (!try_module_get(master->owner))
1146 		return -ENODEV;
1147 
1148 	if (master->_get_device) {
1149 		err = master->_get_device(mtd);
1150 
1151 		if (err) {
1152 			module_put(master->owner);
1153 			return err;
1154 		}
1155 	}
1156 
1157 	master->usecount++;
1158 
1159 	while (mtd->parent) {
1160 		mtd->usecount++;
1161 		mtd = mtd->parent;
1162 	}
1163 
1164 	return 0;
1165 }
1166 EXPORT_SYMBOL_GPL(__get_mtd_device);
1167 
1168 /**
1169  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1170  *	device name
1171  *	@name: MTD device name to open
1172  *
1173  * 	This function returns MTD device description structure in case of
1174  * 	success and an error code in case of failure.
1175  */
1176 struct mtd_info *get_mtd_device_nm(const char *name)
1177 {
1178 	int err = -ENODEV;
1179 	struct mtd_info *mtd = NULL, *other;
1180 
1181 	mutex_lock(&mtd_table_mutex);
1182 
1183 	mtd_for_each_device(other) {
1184 		if (!strcmp(name, other->name)) {
1185 			mtd = other;
1186 			break;
1187 		}
1188 	}
1189 
1190 	if (!mtd)
1191 		goto out_unlock;
1192 
1193 	err = __get_mtd_device(mtd);
1194 	if (err)
1195 		goto out_unlock;
1196 
1197 	mutex_unlock(&mtd_table_mutex);
1198 	return mtd;
1199 
1200 out_unlock:
1201 	mutex_unlock(&mtd_table_mutex);
1202 	return ERR_PTR(err);
1203 }
1204 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1205 
1206 void put_mtd_device(struct mtd_info *mtd)
1207 {
1208 	mutex_lock(&mtd_table_mutex);
1209 	__put_mtd_device(mtd);
1210 	mutex_unlock(&mtd_table_mutex);
1211 
1212 }
1213 EXPORT_SYMBOL_GPL(put_mtd_device);
1214 
1215 void __put_mtd_device(struct mtd_info *mtd)
1216 {
1217 	struct mtd_info *master = mtd_get_master(mtd);
1218 
1219 	while (mtd->parent) {
1220 		--mtd->usecount;
1221 		BUG_ON(mtd->usecount < 0);
1222 		mtd = mtd->parent;
1223 	}
1224 
1225 	master->usecount--;
1226 
1227 	if (master->_put_device)
1228 		master->_put_device(master);
1229 
1230 	module_put(master->owner);
1231 }
1232 EXPORT_SYMBOL_GPL(__put_mtd_device);
1233 
1234 /*
1235  * Erase is an synchronous operation. Device drivers are epected to return a
1236  * negative error code if the operation failed and update instr->fail_addr
1237  * to point the portion that was not properly erased.
1238  */
1239 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1240 {
1241 	struct mtd_info *master = mtd_get_master(mtd);
1242 	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1243 	struct erase_info adjinstr;
1244 	int ret;
1245 
1246 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1247 	adjinstr = *instr;
1248 
1249 	if (!mtd->erasesize || !master->_erase)
1250 		return -ENOTSUPP;
1251 
1252 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1253 		return -EINVAL;
1254 	if (!(mtd->flags & MTD_WRITEABLE))
1255 		return -EROFS;
1256 
1257 	if (!instr->len)
1258 		return 0;
1259 
1260 	ledtrig_mtd_activity();
1261 
1262 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1263 		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1264 				master->erasesize;
1265 		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1266 				master->erasesize) -
1267 			       adjinstr.addr;
1268 	}
1269 
1270 	adjinstr.addr += mst_ofs;
1271 
1272 	ret = master->_erase(master, &adjinstr);
1273 
1274 	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1275 		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1276 		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1277 			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1278 							 master);
1279 			instr->fail_addr *= mtd->erasesize;
1280 		}
1281 	}
1282 
1283 	return ret;
1284 }
1285 EXPORT_SYMBOL_GPL(mtd_erase);
1286 
1287 /*
1288  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1289  */
1290 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1291 	      void **virt, resource_size_t *phys)
1292 {
1293 	struct mtd_info *master = mtd_get_master(mtd);
1294 
1295 	*retlen = 0;
1296 	*virt = NULL;
1297 	if (phys)
1298 		*phys = 0;
1299 	if (!master->_point)
1300 		return -EOPNOTSUPP;
1301 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1302 		return -EINVAL;
1303 	if (!len)
1304 		return 0;
1305 
1306 	from = mtd_get_master_ofs(mtd, from);
1307 	return master->_point(master, from, len, retlen, virt, phys);
1308 }
1309 EXPORT_SYMBOL_GPL(mtd_point);
1310 
1311 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1312 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1313 {
1314 	struct mtd_info *master = mtd_get_master(mtd);
1315 
1316 	if (!master->_unpoint)
1317 		return -EOPNOTSUPP;
1318 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1319 		return -EINVAL;
1320 	if (!len)
1321 		return 0;
1322 	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1323 }
1324 EXPORT_SYMBOL_GPL(mtd_unpoint);
1325 
1326 /*
1327  * Allow NOMMU mmap() to directly map the device (if not NULL)
1328  * - return the address to which the offset maps
1329  * - return -ENOSYS to indicate refusal to do the mapping
1330  */
1331 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1332 				    unsigned long offset, unsigned long flags)
1333 {
1334 	size_t retlen;
1335 	void *virt;
1336 	int ret;
1337 
1338 	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1339 	if (ret)
1340 		return ret;
1341 	if (retlen != len) {
1342 		mtd_unpoint(mtd, offset, retlen);
1343 		return -ENOSYS;
1344 	}
1345 	return (unsigned long)virt;
1346 }
1347 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1348 
1349 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1350 				 const struct mtd_ecc_stats *old_stats)
1351 {
1352 	struct mtd_ecc_stats diff;
1353 
1354 	if (master == mtd)
1355 		return;
1356 
1357 	diff = master->ecc_stats;
1358 	diff.failed -= old_stats->failed;
1359 	diff.corrected -= old_stats->corrected;
1360 
1361 	while (mtd->parent) {
1362 		mtd->ecc_stats.failed += diff.failed;
1363 		mtd->ecc_stats.corrected += diff.corrected;
1364 		mtd = mtd->parent;
1365 	}
1366 }
1367 
1368 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1369 	     u_char *buf)
1370 {
1371 	struct mtd_oob_ops ops = {
1372 		.len = len,
1373 		.datbuf = buf,
1374 	};
1375 	int ret;
1376 
1377 	ret = mtd_read_oob(mtd, from, &ops);
1378 	*retlen = ops.retlen;
1379 
1380 	return ret;
1381 }
1382 EXPORT_SYMBOL_GPL(mtd_read);
1383 
1384 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1385 	      const u_char *buf)
1386 {
1387 	struct mtd_oob_ops ops = {
1388 		.len = len,
1389 		.datbuf = (u8 *)buf,
1390 	};
1391 	int ret;
1392 
1393 	ret = mtd_write_oob(mtd, to, &ops);
1394 	*retlen = ops.retlen;
1395 
1396 	return ret;
1397 }
1398 EXPORT_SYMBOL_GPL(mtd_write);
1399 
1400 /*
1401  * In blackbox flight recorder like scenarios we want to make successful writes
1402  * in interrupt context. panic_write() is only intended to be called when its
1403  * known the kernel is about to panic and we need the write to succeed. Since
1404  * the kernel is not going to be running for much longer, this function can
1405  * break locks and delay to ensure the write succeeds (but not sleep).
1406  */
1407 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1408 		    const u_char *buf)
1409 {
1410 	struct mtd_info *master = mtd_get_master(mtd);
1411 
1412 	*retlen = 0;
1413 	if (!master->_panic_write)
1414 		return -EOPNOTSUPP;
1415 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1416 		return -EINVAL;
1417 	if (!(mtd->flags & MTD_WRITEABLE))
1418 		return -EROFS;
1419 	if (!len)
1420 		return 0;
1421 	if (!master->oops_panic_write)
1422 		master->oops_panic_write = true;
1423 
1424 	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1425 				    retlen, buf);
1426 }
1427 EXPORT_SYMBOL_GPL(mtd_panic_write);
1428 
1429 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1430 			     struct mtd_oob_ops *ops)
1431 {
1432 	/*
1433 	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1434 	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1435 	 *  this case.
1436 	 */
1437 	if (!ops->datbuf)
1438 		ops->len = 0;
1439 
1440 	if (!ops->oobbuf)
1441 		ops->ooblen = 0;
1442 
1443 	if (offs < 0 || offs + ops->len > mtd->size)
1444 		return -EINVAL;
1445 
1446 	if (ops->ooblen) {
1447 		size_t maxooblen;
1448 
1449 		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1450 			return -EINVAL;
1451 
1452 		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1453 				      mtd_div_by_ws(offs, mtd)) *
1454 			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1455 		if (ops->ooblen > maxooblen)
1456 			return -EINVAL;
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1463 			    struct mtd_oob_ops *ops)
1464 {
1465 	struct mtd_info *master = mtd_get_master(mtd);
1466 	int ret;
1467 
1468 	from = mtd_get_master_ofs(mtd, from);
1469 	if (master->_read_oob)
1470 		ret = master->_read_oob(master, from, ops);
1471 	else
1472 		ret = master->_read(master, from, ops->len, &ops->retlen,
1473 				    ops->datbuf);
1474 
1475 	return ret;
1476 }
1477 
1478 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1479 			     struct mtd_oob_ops *ops)
1480 {
1481 	struct mtd_info *master = mtd_get_master(mtd);
1482 	int ret;
1483 
1484 	to = mtd_get_master_ofs(mtd, to);
1485 	if (master->_write_oob)
1486 		ret = master->_write_oob(master, to, ops);
1487 	else
1488 		ret = master->_write(master, to, ops->len, &ops->retlen,
1489 				     ops->datbuf);
1490 
1491 	return ret;
1492 }
1493 
1494 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1495 			       struct mtd_oob_ops *ops)
1496 {
1497 	struct mtd_info *master = mtd_get_master(mtd);
1498 	int ngroups = mtd_pairing_groups(master);
1499 	int npairs = mtd_wunit_per_eb(master) / ngroups;
1500 	struct mtd_oob_ops adjops = *ops;
1501 	unsigned int wunit, oobavail;
1502 	struct mtd_pairing_info info;
1503 	int max_bitflips = 0;
1504 	u32 ebofs, pageofs;
1505 	loff_t base, pos;
1506 
1507 	ebofs = mtd_mod_by_eb(start, mtd);
1508 	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1509 	info.group = 0;
1510 	info.pair = mtd_div_by_ws(ebofs, mtd);
1511 	pageofs = mtd_mod_by_ws(ebofs, mtd);
1512 	oobavail = mtd_oobavail(mtd, ops);
1513 
1514 	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1515 		int ret;
1516 
1517 		if (info.pair >= npairs) {
1518 			info.pair = 0;
1519 			base += master->erasesize;
1520 		}
1521 
1522 		wunit = mtd_pairing_info_to_wunit(master, &info);
1523 		pos = mtd_wunit_to_offset(mtd, base, wunit);
1524 
1525 		adjops.len = ops->len - ops->retlen;
1526 		if (adjops.len > mtd->writesize - pageofs)
1527 			adjops.len = mtd->writesize - pageofs;
1528 
1529 		adjops.ooblen = ops->ooblen - ops->oobretlen;
1530 		if (adjops.ooblen > oobavail - adjops.ooboffs)
1531 			adjops.ooblen = oobavail - adjops.ooboffs;
1532 
1533 		if (read) {
1534 			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1535 			if (ret > 0)
1536 				max_bitflips = max(max_bitflips, ret);
1537 		} else {
1538 			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1539 		}
1540 
1541 		if (ret < 0)
1542 			return ret;
1543 
1544 		max_bitflips = max(max_bitflips, ret);
1545 		ops->retlen += adjops.retlen;
1546 		ops->oobretlen += adjops.oobretlen;
1547 		adjops.datbuf += adjops.retlen;
1548 		adjops.oobbuf += adjops.oobretlen;
1549 		adjops.ooboffs = 0;
1550 		pageofs = 0;
1551 		info.pair++;
1552 	}
1553 
1554 	return max_bitflips;
1555 }
1556 
1557 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1558 {
1559 	struct mtd_info *master = mtd_get_master(mtd);
1560 	struct mtd_ecc_stats old_stats = master->ecc_stats;
1561 	int ret_code;
1562 
1563 	ops->retlen = ops->oobretlen = 0;
1564 
1565 	ret_code = mtd_check_oob_ops(mtd, from, ops);
1566 	if (ret_code)
1567 		return ret_code;
1568 
1569 	ledtrig_mtd_activity();
1570 
1571 	/* Check the validity of a potential fallback on mtd->_read */
1572 	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1573 		return -EOPNOTSUPP;
1574 
1575 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1576 		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1577 	else
1578 		ret_code = mtd_read_oob_std(mtd, from, ops);
1579 
1580 	mtd_update_ecc_stats(mtd, master, &old_stats);
1581 
1582 	/*
1583 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1584 	 * similar to mtd->_read(), returning a non-negative integer
1585 	 * representing max bitflips. In other cases, mtd->_read_oob() may
1586 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1587 	 */
1588 	if (unlikely(ret_code < 0))
1589 		return ret_code;
1590 	if (mtd->ecc_strength == 0)
1591 		return 0;	/* device lacks ecc */
1592 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1593 }
1594 EXPORT_SYMBOL_GPL(mtd_read_oob);
1595 
1596 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1597 				struct mtd_oob_ops *ops)
1598 {
1599 	struct mtd_info *master = mtd_get_master(mtd);
1600 	int ret;
1601 
1602 	ops->retlen = ops->oobretlen = 0;
1603 
1604 	if (!(mtd->flags & MTD_WRITEABLE))
1605 		return -EROFS;
1606 
1607 	ret = mtd_check_oob_ops(mtd, to, ops);
1608 	if (ret)
1609 		return ret;
1610 
1611 	ledtrig_mtd_activity();
1612 
1613 	/* Check the validity of a potential fallback on mtd->_write */
1614 	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1615 		return -EOPNOTSUPP;
1616 
1617 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1618 		return mtd_io_emulated_slc(mtd, to, false, ops);
1619 
1620 	return mtd_write_oob_std(mtd, to, ops);
1621 }
1622 EXPORT_SYMBOL_GPL(mtd_write_oob);
1623 
1624 /**
1625  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1626  * @mtd: MTD device structure
1627  * @section: ECC section. Depending on the layout you may have all the ECC
1628  *	     bytes stored in a single contiguous section, or one section
1629  *	     per ECC chunk (and sometime several sections for a single ECC
1630  *	     ECC chunk)
1631  * @oobecc: OOB region struct filled with the appropriate ECC position
1632  *	    information
1633  *
1634  * This function returns ECC section information in the OOB area. If you want
1635  * to get all the ECC bytes information, then you should call
1636  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1637  *
1638  * Returns zero on success, a negative error code otherwise.
1639  */
1640 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1641 		      struct mtd_oob_region *oobecc)
1642 {
1643 	struct mtd_info *master = mtd_get_master(mtd);
1644 
1645 	memset(oobecc, 0, sizeof(*oobecc));
1646 
1647 	if (!master || section < 0)
1648 		return -EINVAL;
1649 
1650 	if (!master->ooblayout || !master->ooblayout->ecc)
1651 		return -ENOTSUPP;
1652 
1653 	return master->ooblayout->ecc(master, section, oobecc);
1654 }
1655 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1656 
1657 /**
1658  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1659  *			section
1660  * @mtd: MTD device structure
1661  * @section: Free section you are interested in. Depending on the layout
1662  *	     you may have all the free bytes stored in a single contiguous
1663  *	     section, or one section per ECC chunk plus an extra section
1664  *	     for the remaining bytes (or other funky layout).
1665  * @oobfree: OOB region struct filled with the appropriate free position
1666  *	     information
1667  *
1668  * This function returns free bytes position in the OOB area. If you want
1669  * to get all the free bytes information, then you should call
1670  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1671  *
1672  * Returns zero on success, a negative error code otherwise.
1673  */
1674 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1675 		       struct mtd_oob_region *oobfree)
1676 {
1677 	struct mtd_info *master = mtd_get_master(mtd);
1678 
1679 	memset(oobfree, 0, sizeof(*oobfree));
1680 
1681 	if (!master || section < 0)
1682 		return -EINVAL;
1683 
1684 	if (!master->ooblayout || !master->ooblayout->free)
1685 		return -ENOTSUPP;
1686 
1687 	return master->ooblayout->free(master, section, oobfree);
1688 }
1689 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1690 
1691 /**
1692  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1693  * @mtd: mtd info structure
1694  * @byte: the byte we are searching for
1695  * @sectionp: pointer where the section id will be stored
1696  * @oobregion: used to retrieve the ECC position
1697  * @iter: iterator function. Should be either mtd_ooblayout_free or
1698  *	  mtd_ooblayout_ecc depending on the region type you're searching for
1699  *
1700  * This function returns the section id and oobregion information of a
1701  * specific byte. For example, say you want to know where the 4th ECC byte is
1702  * stored, you'll use:
1703  *
1704  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1705  *
1706  * Returns zero on success, a negative error code otherwise.
1707  */
1708 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1709 				int *sectionp, struct mtd_oob_region *oobregion,
1710 				int (*iter)(struct mtd_info *,
1711 					    int section,
1712 					    struct mtd_oob_region *oobregion))
1713 {
1714 	int pos = 0, ret, section = 0;
1715 
1716 	memset(oobregion, 0, sizeof(*oobregion));
1717 
1718 	while (1) {
1719 		ret = iter(mtd, section, oobregion);
1720 		if (ret)
1721 			return ret;
1722 
1723 		if (pos + oobregion->length > byte)
1724 			break;
1725 
1726 		pos += oobregion->length;
1727 		section++;
1728 	}
1729 
1730 	/*
1731 	 * Adjust region info to make it start at the beginning at the
1732 	 * 'start' ECC byte.
1733 	 */
1734 	oobregion->offset += byte - pos;
1735 	oobregion->length -= byte - pos;
1736 	*sectionp = section;
1737 
1738 	return 0;
1739 }
1740 
1741 /**
1742  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1743  *				  ECC byte
1744  * @mtd: mtd info structure
1745  * @eccbyte: the byte we are searching for
1746  * @section: pointer where the section id will be stored
1747  * @oobregion: OOB region information
1748  *
1749  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1750  * byte.
1751  *
1752  * Returns zero on success, a negative error code otherwise.
1753  */
1754 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1755 				 int *section,
1756 				 struct mtd_oob_region *oobregion)
1757 {
1758 	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1759 					 mtd_ooblayout_ecc);
1760 }
1761 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1762 
1763 /**
1764  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1765  * @mtd: mtd info structure
1766  * @buf: destination buffer to store OOB bytes
1767  * @oobbuf: OOB buffer
1768  * @start: first byte to retrieve
1769  * @nbytes: number of bytes to retrieve
1770  * @iter: section iterator
1771  *
1772  * Extract bytes attached to a specific category (ECC or free)
1773  * from the OOB buffer and copy them into buf.
1774  *
1775  * Returns zero on success, a negative error code otherwise.
1776  */
1777 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1778 				const u8 *oobbuf, int start, int nbytes,
1779 				int (*iter)(struct mtd_info *,
1780 					    int section,
1781 					    struct mtd_oob_region *oobregion))
1782 {
1783 	struct mtd_oob_region oobregion;
1784 	int section, ret;
1785 
1786 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1787 					&oobregion, iter);
1788 
1789 	while (!ret) {
1790 		int cnt;
1791 
1792 		cnt = min_t(int, nbytes, oobregion.length);
1793 		memcpy(buf, oobbuf + oobregion.offset, cnt);
1794 		buf += cnt;
1795 		nbytes -= cnt;
1796 
1797 		if (!nbytes)
1798 			break;
1799 
1800 		ret = iter(mtd, ++section, &oobregion);
1801 	}
1802 
1803 	return ret;
1804 }
1805 
1806 /**
1807  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1808  * @mtd: mtd info structure
1809  * @buf: source buffer to get OOB bytes from
1810  * @oobbuf: OOB buffer
1811  * @start: first OOB byte to set
1812  * @nbytes: number of OOB bytes to set
1813  * @iter: section iterator
1814  *
1815  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1816  * is selected by passing the appropriate iterator.
1817  *
1818  * Returns zero on success, a negative error code otherwise.
1819  */
1820 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1821 				u8 *oobbuf, int start, int nbytes,
1822 				int (*iter)(struct mtd_info *,
1823 					    int section,
1824 					    struct mtd_oob_region *oobregion))
1825 {
1826 	struct mtd_oob_region oobregion;
1827 	int section, ret;
1828 
1829 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1830 					&oobregion, iter);
1831 
1832 	while (!ret) {
1833 		int cnt;
1834 
1835 		cnt = min_t(int, nbytes, oobregion.length);
1836 		memcpy(oobbuf + oobregion.offset, buf, cnt);
1837 		buf += cnt;
1838 		nbytes -= cnt;
1839 
1840 		if (!nbytes)
1841 			break;
1842 
1843 		ret = iter(mtd, ++section, &oobregion);
1844 	}
1845 
1846 	return ret;
1847 }
1848 
1849 /**
1850  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1851  * @mtd: mtd info structure
1852  * @iter: category iterator
1853  *
1854  * Count the number of bytes in a given category.
1855  *
1856  * Returns a positive value on success, a negative error code otherwise.
1857  */
1858 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1859 				int (*iter)(struct mtd_info *,
1860 					    int section,
1861 					    struct mtd_oob_region *oobregion))
1862 {
1863 	struct mtd_oob_region oobregion;
1864 	int section = 0, ret, nbytes = 0;
1865 
1866 	while (1) {
1867 		ret = iter(mtd, section++, &oobregion);
1868 		if (ret) {
1869 			if (ret == -ERANGE)
1870 				ret = nbytes;
1871 			break;
1872 		}
1873 
1874 		nbytes += oobregion.length;
1875 	}
1876 
1877 	return ret;
1878 }
1879 
1880 /**
1881  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1882  * @mtd: mtd info structure
1883  * @eccbuf: destination buffer to store ECC bytes
1884  * @oobbuf: OOB buffer
1885  * @start: first ECC byte to retrieve
1886  * @nbytes: number of ECC bytes to retrieve
1887  *
1888  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1889  *
1890  * Returns zero on success, a negative error code otherwise.
1891  */
1892 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1893 			       const u8 *oobbuf, int start, int nbytes)
1894 {
1895 	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1896 				       mtd_ooblayout_ecc);
1897 }
1898 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1899 
1900 /**
1901  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1902  * @mtd: mtd info structure
1903  * @eccbuf: source buffer to get ECC bytes from
1904  * @oobbuf: OOB buffer
1905  * @start: first ECC byte to set
1906  * @nbytes: number of ECC bytes to set
1907  *
1908  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1909  *
1910  * Returns zero on success, a negative error code otherwise.
1911  */
1912 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1913 			       u8 *oobbuf, int start, int nbytes)
1914 {
1915 	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1916 				       mtd_ooblayout_ecc);
1917 }
1918 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1919 
1920 /**
1921  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1922  * @mtd: mtd info structure
1923  * @databuf: destination buffer to store ECC bytes
1924  * @oobbuf: OOB buffer
1925  * @start: first ECC byte to retrieve
1926  * @nbytes: number of ECC bytes to retrieve
1927  *
1928  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1929  *
1930  * Returns zero on success, a negative error code otherwise.
1931  */
1932 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1933 				const u8 *oobbuf, int start, int nbytes)
1934 {
1935 	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1936 				       mtd_ooblayout_free);
1937 }
1938 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1939 
1940 /**
1941  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1942  * @mtd: mtd info structure
1943  * @databuf: source buffer to get data bytes from
1944  * @oobbuf: OOB buffer
1945  * @start: first ECC byte to set
1946  * @nbytes: number of ECC bytes to set
1947  *
1948  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
1949  *
1950  * Returns zero on success, a negative error code otherwise.
1951  */
1952 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1953 				u8 *oobbuf, int start, int nbytes)
1954 {
1955 	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1956 				       mtd_ooblayout_free);
1957 }
1958 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1959 
1960 /**
1961  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1962  * @mtd: mtd info structure
1963  *
1964  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1965  *
1966  * Returns zero on success, a negative error code otherwise.
1967  */
1968 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1969 {
1970 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1971 }
1972 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1973 
1974 /**
1975  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
1976  * @mtd: mtd info structure
1977  *
1978  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1979  *
1980  * Returns zero on success, a negative error code otherwise.
1981  */
1982 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1983 {
1984 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1985 }
1986 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1987 
1988 /*
1989  * Method to access the protection register area, present in some flash
1990  * devices. The user data is one time programmable but the factory data is read
1991  * only.
1992  */
1993 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1994 			   struct otp_info *buf)
1995 {
1996 	struct mtd_info *master = mtd_get_master(mtd);
1997 
1998 	if (!master->_get_fact_prot_info)
1999 		return -EOPNOTSUPP;
2000 	if (!len)
2001 		return 0;
2002 	return master->_get_fact_prot_info(master, len, retlen, buf);
2003 }
2004 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2005 
2006 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2007 			   size_t *retlen, u_char *buf)
2008 {
2009 	struct mtd_info *master = mtd_get_master(mtd);
2010 
2011 	*retlen = 0;
2012 	if (!master->_read_fact_prot_reg)
2013 		return -EOPNOTSUPP;
2014 	if (!len)
2015 		return 0;
2016 	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2017 }
2018 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2019 
2020 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2021 			   struct otp_info *buf)
2022 {
2023 	struct mtd_info *master = mtd_get_master(mtd);
2024 
2025 	if (!master->_get_user_prot_info)
2026 		return -EOPNOTSUPP;
2027 	if (!len)
2028 		return 0;
2029 	return master->_get_user_prot_info(master, len, retlen, buf);
2030 }
2031 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2032 
2033 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2034 			   size_t *retlen, u_char *buf)
2035 {
2036 	struct mtd_info *master = mtd_get_master(mtd);
2037 
2038 	*retlen = 0;
2039 	if (!master->_read_user_prot_reg)
2040 		return -EOPNOTSUPP;
2041 	if (!len)
2042 		return 0;
2043 	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2044 }
2045 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2046 
2047 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2048 			    size_t *retlen, const u_char *buf)
2049 {
2050 	struct mtd_info *master = mtd_get_master(mtd);
2051 	int ret;
2052 
2053 	*retlen = 0;
2054 	if (!master->_write_user_prot_reg)
2055 		return -EOPNOTSUPP;
2056 	if (!len)
2057 		return 0;
2058 	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2059 	if (ret)
2060 		return ret;
2061 
2062 	/*
2063 	 * If no data could be written at all, we are out of memory and
2064 	 * must return -ENOSPC.
2065 	 */
2066 	return (*retlen) ? 0 : -ENOSPC;
2067 }
2068 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2069 
2070 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2071 {
2072 	struct mtd_info *master = mtd_get_master(mtd);
2073 
2074 	if (!master->_lock_user_prot_reg)
2075 		return -EOPNOTSUPP;
2076 	if (!len)
2077 		return 0;
2078 	return master->_lock_user_prot_reg(master, from, len);
2079 }
2080 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2081 
2082 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2083 {
2084 	struct mtd_info *master = mtd_get_master(mtd);
2085 
2086 	if (!master->_erase_user_prot_reg)
2087 		return -EOPNOTSUPP;
2088 	if (!len)
2089 		return 0;
2090 	return master->_erase_user_prot_reg(master, from, len);
2091 }
2092 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2093 
2094 /* Chip-supported device locking */
2095 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2096 {
2097 	struct mtd_info *master = mtd_get_master(mtd);
2098 
2099 	if (!master->_lock)
2100 		return -EOPNOTSUPP;
2101 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2102 		return -EINVAL;
2103 	if (!len)
2104 		return 0;
2105 
2106 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2107 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2108 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2109 	}
2110 
2111 	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2112 }
2113 EXPORT_SYMBOL_GPL(mtd_lock);
2114 
2115 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2116 {
2117 	struct mtd_info *master = mtd_get_master(mtd);
2118 
2119 	if (!master->_unlock)
2120 		return -EOPNOTSUPP;
2121 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2122 		return -EINVAL;
2123 	if (!len)
2124 		return 0;
2125 
2126 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2127 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2128 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2129 	}
2130 
2131 	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2132 }
2133 EXPORT_SYMBOL_GPL(mtd_unlock);
2134 
2135 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2136 {
2137 	struct mtd_info *master = mtd_get_master(mtd);
2138 
2139 	if (!master->_is_locked)
2140 		return -EOPNOTSUPP;
2141 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2142 		return -EINVAL;
2143 	if (!len)
2144 		return 0;
2145 
2146 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2147 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2148 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2149 	}
2150 
2151 	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2152 }
2153 EXPORT_SYMBOL_GPL(mtd_is_locked);
2154 
2155 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2156 {
2157 	struct mtd_info *master = mtd_get_master(mtd);
2158 
2159 	if (ofs < 0 || ofs >= mtd->size)
2160 		return -EINVAL;
2161 	if (!master->_block_isreserved)
2162 		return 0;
2163 
2164 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2165 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2166 
2167 	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2168 }
2169 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2170 
2171 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2172 {
2173 	struct mtd_info *master = mtd_get_master(mtd);
2174 
2175 	if (ofs < 0 || ofs >= mtd->size)
2176 		return -EINVAL;
2177 	if (!master->_block_isbad)
2178 		return 0;
2179 
2180 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2181 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2182 
2183 	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2184 }
2185 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2186 
2187 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2188 {
2189 	struct mtd_info *master = mtd_get_master(mtd);
2190 	int ret;
2191 
2192 	if (!master->_block_markbad)
2193 		return -EOPNOTSUPP;
2194 	if (ofs < 0 || ofs >= mtd->size)
2195 		return -EINVAL;
2196 	if (!(mtd->flags & MTD_WRITEABLE))
2197 		return -EROFS;
2198 
2199 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2200 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2201 
2202 	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2203 	if (ret)
2204 		return ret;
2205 
2206 	while (mtd->parent) {
2207 		mtd->ecc_stats.badblocks++;
2208 		mtd = mtd->parent;
2209 	}
2210 
2211 	return 0;
2212 }
2213 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2214 
2215 /*
2216  * default_mtd_writev - the default writev method
2217  * @mtd: mtd device description object pointer
2218  * @vecs: the vectors to write
2219  * @count: count of vectors in @vecs
2220  * @to: the MTD device offset to write to
2221  * @retlen: on exit contains the count of bytes written to the MTD device.
2222  *
2223  * This function returns zero in case of success and a negative error code in
2224  * case of failure.
2225  */
2226 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2227 			      unsigned long count, loff_t to, size_t *retlen)
2228 {
2229 	unsigned long i;
2230 	size_t totlen = 0, thislen;
2231 	int ret = 0;
2232 
2233 	for (i = 0; i < count; i++) {
2234 		if (!vecs[i].iov_len)
2235 			continue;
2236 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2237 				vecs[i].iov_base);
2238 		totlen += thislen;
2239 		if (ret || thislen != vecs[i].iov_len)
2240 			break;
2241 		to += vecs[i].iov_len;
2242 	}
2243 	*retlen = totlen;
2244 	return ret;
2245 }
2246 
2247 /*
2248  * mtd_writev - the vector-based MTD write method
2249  * @mtd: mtd device description object pointer
2250  * @vecs: the vectors to write
2251  * @count: count of vectors in @vecs
2252  * @to: the MTD device offset to write to
2253  * @retlen: on exit contains the count of bytes written to the MTD device.
2254  *
2255  * This function returns zero in case of success and a negative error code in
2256  * case of failure.
2257  */
2258 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2259 	       unsigned long count, loff_t to, size_t *retlen)
2260 {
2261 	struct mtd_info *master = mtd_get_master(mtd);
2262 
2263 	*retlen = 0;
2264 	if (!(mtd->flags & MTD_WRITEABLE))
2265 		return -EROFS;
2266 
2267 	if (!master->_writev)
2268 		return default_mtd_writev(mtd, vecs, count, to, retlen);
2269 
2270 	return master->_writev(master, vecs, count,
2271 			       mtd_get_master_ofs(mtd, to), retlen);
2272 }
2273 EXPORT_SYMBOL_GPL(mtd_writev);
2274 
2275 /**
2276  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2277  * @mtd: mtd device description object pointer
2278  * @size: a pointer to the ideal or maximum size of the allocation, points
2279  *        to the actual allocation size on success.
2280  *
2281  * This routine attempts to allocate a contiguous kernel buffer up to
2282  * the specified size, backing off the size of the request exponentially
2283  * until the request succeeds or until the allocation size falls below
2284  * the system page size. This attempts to make sure it does not adversely
2285  * impact system performance, so when allocating more than one page, we
2286  * ask the memory allocator to avoid re-trying, swapping, writing back
2287  * or performing I/O.
2288  *
2289  * Note, this function also makes sure that the allocated buffer is aligned to
2290  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2291  *
2292  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2293  * to handle smaller (i.e. degraded) buffer allocations under low- or
2294  * fragmented-memory situations where such reduced allocations, from a
2295  * requested ideal, are allowed.
2296  *
2297  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2298  */
2299 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2300 {
2301 	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2302 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2303 	void *kbuf;
2304 
2305 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2306 
2307 	while (*size > min_alloc) {
2308 		kbuf = kmalloc(*size, flags);
2309 		if (kbuf)
2310 			return kbuf;
2311 
2312 		*size >>= 1;
2313 		*size = ALIGN(*size, mtd->writesize);
2314 	}
2315 
2316 	/*
2317 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2318 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2319 	 */
2320 	return kmalloc(*size, GFP_KERNEL);
2321 }
2322 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2323 
2324 #ifdef CONFIG_PROC_FS
2325 
2326 /*====================================================================*/
2327 /* Support for /proc/mtd */
2328 
2329 static int mtd_proc_show(struct seq_file *m, void *v)
2330 {
2331 	struct mtd_info *mtd;
2332 
2333 	seq_puts(m, "dev:    size   erasesize  name\n");
2334 	mutex_lock(&mtd_table_mutex);
2335 	mtd_for_each_device(mtd) {
2336 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2337 			   mtd->index, (unsigned long long)mtd->size,
2338 			   mtd->erasesize, mtd->name);
2339 	}
2340 	mutex_unlock(&mtd_table_mutex);
2341 	return 0;
2342 }
2343 #endif /* CONFIG_PROC_FS */
2344 
2345 /*====================================================================*/
2346 /* Init code */
2347 
2348 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2349 {
2350 	struct backing_dev_info *bdi;
2351 	int ret;
2352 
2353 	bdi = bdi_alloc(NUMA_NO_NODE);
2354 	if (!bdi)
2355 		return ERR_PTR(-ENOMEM);
2356 	bdi->ra_pages = 0;
2357 	bdi->io_pages = 0;
2358 
2359 	/*
2360 	 * We put '-0' suffix to the name to get the same name format as we
2361 	 * used to get. Since this is called only once, we get a unique name.
2362 	 */
2363 	ret = bdi_register(bdi, "%.28s-0", name);
2364 	if (ret)
2365 		bdi_put(bdi);
2366 
2367 	return ret ? ERR_PTR(ret) : bdi;
2368 }
2369 
2370 char *mtd_expert_analysis_warning =
2371 	"Bad block checks have been entirely disabled.\n"
2372 	"This is only reserved for post-mortem forensics and debug purposes.\n"
2373 	"Never enable this mode if you do not know what you are doing!\n";
2374 EXPORT_SYMBOL_GPL(mtd_expert_analysis_warning);
2375 bool mtd_expert_analysis_mode;
2376 EXPORT_SYMBOL_GPL(mtd_expert_analysis_mode);
2377 
2378 static struct proc_dir_entry *proc_mtd;
2379 
2380 static int __init init_mtd(void)
2381 {
2382 	int ret;
2383 
2384 	ret = class_register(&mtd_class);
2385 	if (ret)
2386 		goto err_reg;
2387 
2388 	mtd_bdi = mtd_bdi_init("mtd");
2389 	if (IS_ERR(mtd_bdi)) {
2390 		ret = PTR_ERR(mtd_bdi);
2391 		goto err_bdi;
2392 	}
2393 
2394 	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2395 
2396 	ret = init_mtdchar();
2397 	if (ret)
2398 		goto out_procfs;
2399 
2400 	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2401 	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2402 			    &mtd_expert_analysis_mode);
2403 
2404 	return 0;
2405 
2406 out_procfs:
2407 	if (proc_mtd)
2408 		remove_proc_entry("mtd", NULL);
2409 	bdi_put(mtd_bdi);
2410 err_bdi:
2411 	class_unregister(&mtd_class);
2412 err_reg:
2413 	pr_err("Error registering mtd class or bdi: %d\n", ret);
2414 	return ret;
2415 }
2416 
2417 static void __exit cleanup_mtd(void)
2418 {
2419 	debugfs_remove_recursive(dfs_dir_mtd);
2420 	cleanup_mtdchar();
2421 	if (proc_mtd)
2422 		remove_proc_entry("mtd", NULL);
2423 	class_unregister(&mtd_class);
2424 	bdi_unregister(mtd_bdi);
2425 	bdi_put(mtd_bdi);
2426 	idr_destroy(&mtd_idr);
2427 }
2428 
2429 module_init(init_mtd);
2430 module_exit(cleanup_mtd);
2431 
2432 MODULE_LICENSE("GPL");
2433 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2434 MODULE_DESCRIPTION("Core MTD registration and access routines");
2435