xref: /linux/drivers/mtd/mtdcore.c (revision 6015fb905d89063231ed33bc15be19ef0fc339b8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Core registration and callback routines for MTD
4  * drivers and users.
5  *
6  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7  * Copyright © 2006      Red Hat UK Limited
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31 
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34 
35 #include "mtdcore.h"
36 
37 struct backing_dev_info *mtd_bdi;
38 
39 #ifdef CONFIG_PM_SLEEP
40 
41 static int mtd_cls_suspend(struct device *dev)
42 {
43 	struct mtd_info *mtd = dev_get_drvdata(dev);
44 
45 	return mtd ? mtd_suspend(mtd) : 0;
46 }
47 
48 static int mtd_cls_resume(struct device *dev)
49 {
50 	struct mtd_info *mtd = dev_get_drvdata(dev);
51 
52 	if (mtd)
53 		mtd_resume(mtd);
54 	return 0;
55 }
56 
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59 #else
60 #define MTD_CLS_PM_OPS NULL
61 #endif
62 
63 static struct class mtd_class = {
64 	.name = "mtd",
65 	.owner = THIS_MODULE,
66 	.pm = MTD_CLS_PM_OPS,
67 };
68 
69 static DEFINE_IDR(mtd_idr);
70 
71 /* These are exported solely for the purpose of mtd_blkdevs.c. You
72    should not use them for _anything_ else */
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
75 
76 struct mtd_info *__mtd_next_device(int i)
77 {
78 	return idr_get_next(&mtd_idr, &i);
79 }
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
81 
82 static LIST_HEAD(mtd_notifiers);
83 
84 
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86 
87 /* REVISIT once MTD uses the driver model better, whoever allocates
88  * the mtd_info will probably want to use the release() hook...
89  */
90 static void mtd_release(struct device *dev)
91 {
92 	struct mtd_info *mtd = dev_get_drvdata(dev);
93 	dev_t index = MTD_DEVT(mtd->index);
94 
95 	/* remove /dev/mtdXro node */
96 	device_destroy(&mtd_class, index + 1);
97 }
98 
99 #define MTD_DEVICE_ATTR_RO(name) \
100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
101 
102 #define MTD_DEVICE_ATTR_RW(name) \
103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
104 
105 static ssize_t mtd_type_show(struct device *dev,
106 		struct device_attribute *attr, char *buf)
107 {
108 	struct mtd_info *mtd = dev_get_drvdata(dev);
109 	char *type;
110 
111 	switch (mtd->type) {
112 	case MTD_ABSENT:
113 		type = "absent";
114 		break;
115 	case MTD_RAM:
116 		type = "ram";
117 		break;
118 	case MTD_ROM:
119 		type = "rom";
120 		break;
121 	case MTD_NORFLASH:
122 		type = "nor";
123 		break;
124 	case MTD_NANDFLASH:
125 		type = "nand";
126 		break;
127 	case MTD_DATAFLASH:
128 		type = "dataflash";
129 		break;
130 	case MTD_UBIVOLUME:
131 		type = "ubi";
132 		break;
133 	case MTD_MLCNANDFLASH:
134 		type = "mlc-nand";
135 		break;
136 	default:
137 		type = "unknown";
138 	}
139 
140 	return sysfs_emit(buf, "%s\n", type);
141 }
142 MTD_DEVICE_ATTR_RO(type);
143 
144 static ssize_t mtd_flags_show(struct device *dev,
145 		struct device_attribute *attr, char *buf)
146 {
147 	struct mtd_info *mtd = dev_get_drvdata(dev);
148 
149 	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
150 }
151 MTD_DEVICE_ATTR_RO(flags);
152 
153 static ssize_t mtd_size_show(struct device *dev,
154 		struct device_attribute *attr, char *buf)
155 {
156 	struct mtd_info *mtd = dev_get_drvdata(dev);
157 
158 	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
159 }
160 MTD_DEVICE_ATTR_RO(size);
161 
162 static ssize_t mtd_erasesize_show(struct device *dev,
163 		struct device_attribute *attr, char *buf)
164 {
165 	struct mtd_info *mtd = dev_get_drvdata(dev);
166 
167 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
168 }
169 MTD_DEVICE_ATTR_RO(erasesize);
170 
171 static ssize_t mtd_writesize_show(struct device *dev,
172 		struct device_attribute *attr, char *buf)
173 {
174 	struct mtd_info *mtd = dev_get_drvdata(dev);
175 
176 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
177 }
178 MTD_DEVICE_ATTR_RO(writesize);
179 
180 static ssize_t mtd_subpagesize_show(struct device *dev,
181 		struct device_attribute *attr, char *buf)
182 {
183 	struct mtd_info *mtd = dev_get_drvdata(dev);
184 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
185 
186 	return sysfs_emit(buf, "%u\n", subpagesize);
187 }
188 MTD_DEVICE_ATTR_RO(subpagesize);
189 
190 static ssize_t mtd_oobsize_show(struct device *dev,
191 		struct device_attribute *attr, char *buf)
192 {
193 	struct mtd_info *mtd = dev_get_drvdata(dev);
194 
195 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
196 }
197 MTD_DEVICE_ATTR_RO(oobsize);
198 
199 static ssize_t mtd_oobavail_show(struct device *dev,
200 				 struct device_attribute *attr, char *buf)
201 {
202 	struct mtd_info *mtd = dev_get_drvdata(dev);
203 
204 	return sysfs_emit(buf, "%u\n", mtd->oobavail);
205 }
206 MTD_DEVICE_ATTR_RO(oobavail);
207 
208 static ssize_t mtd_numeraseregions_show(struct device *dev,
209 		struct device_attribute *attr, char *buf)
210 {
211 	struct mtd_info *mtd = dev_get_drvdata(dev);
212 
213 	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
214 }
215 MTD_DEVICE_ATTR_RO(numeraseregions);
216 
217 static ssize_t mtd_name_show(struct device *dev,
218 		struct device_attribute *attr, char *buf)
219 {
220 	struct mtd_info *mtd = dev_get_drvdata(dev);
221 
222 	return sysfs_emit(buf, "%s\n", mtd->name);
223 }
224 MTD_DEVICE_ATTR_RO(name);
225 
226 static ssize_t mtd_ecc_strength_show(struct device *dev,
227 				     struct device_attribute *attr, char *buf)
228 {
229 	struct mtd_info *mtd = dev_get_drvdata(dev);
230 
231 	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
232 }
233 MTD_DEVICE_ATTR_RO(ecc_strength);
234 
235 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 					  struct device_attribute *attr,
237 					  char *buf)
238 {
239 	struct mtd_info *mtd = dev_get_drvdata(dev);
240 
241 	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
242 }
243 
244 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 					   struct device_attribute *attr,
246 					   const char *buf, size_t count)
247 {
248 	struct mtd_info *mtd = dev_get_drvdata(dev);
249 	unsigned int bitflip_threshold;
250 	int retval;
251 
252 	retval = kstrtouint(buf, 0, &bitflip_threshold);
253 	if (retval)
254 		return retval;
255 
256 	mtd->bitflip_threshold = bitflip_threshold;
257 	return count;
258 }
259 MTD_DEVICE_ATTR_RW(bitflip_threshold);
260 
261 static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 		struct device_attribute *attr, char *buf)
263 {
264 	struct mtd_info *mtd = dev_get_drvdata(dev);
265 
266 	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
267 
268 }
269 MTD_DEVICE_ATTR_RO(ecc_step_size);
270 
271 static ssize_t mtd_corrected_bits_show(struct device *dev,
272 		struct device_attribute *attr, char *buf)
273 {
274 	struct mtd_info *mtd = dev_get_drvdata(dev);
275 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
276 
277 	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
278 }
279 MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
280 
281 static ssize_t mtd_ecc_failures_show(struct device *dev,
282 		struct device_attribute *attr, char *buf)
283 {
284 	struct mtd_info *mtd = dev_get_drvdata(dev);
285 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
286 
287 	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
288 }
289 MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
290 
291 static ssize_t mtd_bad_blocks_show(struct device *dev,
292 		struct device_attribute *attr, char *buf)
293 {
294 	struct mtd_info *mtd = dev_get_drvdata(dev);
295 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296 
297 	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
298 }
299 MTD_DEVICE_ATTR_RO(bad_blocks);
300 
301 static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 		struct device_attribute *attr, char *buf)
303 {
304 	struct mtd_info *mtd = dev_get_drvdata(dev);
305 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306 
307 	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
308 }
309 MTD_DEVICE_ATTR_RO(bbt_blocks);
310 
311 static struct attribute *mtd_attrs[] = {
312 	&dev_attr_type.attr,
313 	&dev_attr_flags.attr,
314 	&dev_attr_size.attr,
315 	&dev_attr_erasesize.attr,
316 	&dev_attr_writesize.attr,
317 	&dev_attr_subpagesize.attr,
318 	&dev_attr_oobsize.attr,
319 	&dev_attr_oobavail.attr,
320 	&dev_attr_numeraseregions.attr,
321 	&dev_attr_name.attr,
322 	&dev_attr_ecc_strength.attr,
323 	&dev_attr_ecc_step_size.attr,
324 	&dev_attr_corrected_bits.attr,
325 	&dev_attr_ecc_failures.attr,
326 	&dev_attr_bad_blocks.attr,
327 	&dev_attr_bbt_blocks.attr,
328 	&dev_attr_bitflip_threshold.attr,
329 	NULL,
330 };
331 ATTRIBUTE_GROUPS(mtd);
332 
333 static const struct device_type mtd_devtype = {
334 	.name		= "mtd",
335 	.groups		= mtd_groups,
336 	.release	= mtd_release,
337 };
338 
339 static int mtd_partid_debug_show(struct seq_file *s, void *p)
340 {
341 	struct mtd_info *mtd = s->private;
342 
343 	seq_printf(s, "%s\n", mtd->dbg.partid);
344 
345 	return 0;
346 }
347 
348 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
349 
350 static int mtd_partname_debug_show(struct seq_file *s, void *p)
351 {
352 	struct mtd_info *mtd = s->private;
353 
354 	seq_printf(s, "%s\n", mtd->dbg.partname);
355 
356 	return 0;
357 }
358 
359 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
360 
361 static struct dentry *dfs_dir_mtd;
362 
363 static void mtd_debugfs_populate(struct mtd_info *mtd)
364 {
365 	struct mtd_info *master = mtd_get_master(mtd);
366 	struct device *dev = &mtd->dev;
367 	struct dentry *root;
368 
369 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
370 		return;
371 
372 	root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
373 	mtd->dbg.dfs_dir = root;
374 
375 	if (master->dbg.partid)
376 		debugfs_create_file("partid", 0400, root, master,
377 				    &mtd_partid_debug_fops);
378 
379 	if (master->dbg.partname)
380 		debugfs_create_file("partname", 0400, root, master,
381 				    &mtd_partname_debug_fops);
382 }
383 
384 #ifndef CONFIG_MMU
385 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
386 {
387 	switch (mtd->type) {
388 	case MTD_RAM:
389 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
390 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
391 	case MTD_ROM:
392 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
393 			NOMMU_MAP_READ;
394 	default:
395 		return NOMMU_MAP_COPY;
396 	}
397 }
398 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
399 #endif
400 
401 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
402 			       void *cmd)
403 {
404 	struct mtd_info *mtd;
405 
406 	mtd = container_of(n, struct mtd_info, reboot_notifier);
407 	mtd->_reboot(mtd);
408 
409 	return NOTIFY_DONE;
410 }
411 
412 /**
413  * mtd_wunit_to_pairing_info - get pairing information of a wunit
414  * @mtd: pointer to new MTD device info structure
415  * @wunit: write unit we are interested in
416  * @info: returned pairing information
417  *
418  * Retrieve pairing information associated to the wunit.
419  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
420  * paired together, and where programming a page may influence the page it is
421  * paired with.
422  * The notion of page is replaced by the term wunit (write-unit) to stay
423  * consistent with the ->writesize field.
424  *
425  * The @wunit argument can be extracted from an absolute offset using
426  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
427  * to @wunit.
428  *
429  * From the pairing info the MTD user can find all the wunits paired with
430  * @wunit using the following loop:
431  *
432  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
433  *	info.pair = i;
434  *	mtd_pairing_info_to_wunit(mtd, &info);
435  *	...
436  * }
437  */
438 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
439 			      struct mtd_pairing_info *info)
440 {
441 	struct mtd_info *master = mtd_get_master(mtd);
442 	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
443 
444 	if (wunit < 0 || wunit >= npairs)
445 		return -EINVAL;
446 
447 	if (master->pairing && master->pairing->get_info)
448 		return master->pairing->get_info(master, wunit, info);
449 
450 	info->group = 0;
451 	info->pair = wunit;
452 
453 	return 0;
454 }
455 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
456 
457 /**
458  * mtd_pairing_info_to_wunit - get wunit from pairing information
459  * @mtd: pointer to new MTD device info structure
460  * @info: pairing information struct
461  *
462  * Returns a positive number representing the wunit associated to the info
463  * struct, or a negative error code.
464  *
465  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
466  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
467  * doc).
468  *
469  * It can also be used to only program the first page of each pair (i.e.
470  * page attached to group 0), which allows one to use an MLC NAND in
471  * software-emulated SLC mode:
472  *
473  * info.group = 0;
474  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
475  * for (info.pair = 0; info.pair < npairs; info.pair++) {
476  *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
477  *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
478  *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
479  * }
480  */
481 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
482 			      const struct mtd_pairing_info *info)
483 {
484 	struct mtd_info *master = mtd_get_master(mtd);
485 	int ngroups = mtd_pairing_groups(master);
486 	int npairs = mtd_wunit_per_eb(master) / ngroups;
487 
488 	if (!info || info->pair < 0 || info->pair >= npairs ||
489 	    info->group < 0 || info->group >= ngroups)
490 		return -EINVAL;
491 
492 	if (master->pairing && master->pairing->get_wunit)
493 		return mtd->pairing->get_wunit(master, info);
494 
495 	return info->pair;
496 }
497 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
498 
499 /**
500  * mtd_pairing_groups - get the number of pairing groups
501  * @mtd: pointer to new MTD device info structure
502  *
503  * Returns the number of pairing groups.
504  *
505  * This number is usually equal to the number of bits exposed by a single
506  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
507  * to iterate over all pages of a given pair.
508  */
509 int mtd_pairing_groups(struct mtd_info *mtd)
510 {
511 	struct mtd_info *master = mtd_get_master(mtd);
512 
513 	if (!master->pairing || !master->pairing->ngroups)
514 		return 1;
515 
516 	return master->pairing->ngroups;
517 }
518 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
519 
520 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
521 			      void *val, size_t bytes)
522 {
523 	struct mtd_info *mtd = priv;
524 	size_t retlen;
525 	int err;
526 
527 	err = mtd_read(mtd, offset, bytes, &retlen, val);
528 	if (err && err != -EUCLEAN)
529 		return err;
530 
531 	return retlen == bytes ? 0 : -EIO;
532 }
533 
534 static int mtd_nvmem_add(struct mtd_info *mtd)
535 {
536 	struct device_node *node = mtd_get_of_node(mtd);
537 	struct nvmem_config config = {};
538 
539 	config.id = -1;
540 	config.dev = &mtd->dev;
541 	config.name = dev_name(&mtd->dev);
542 	config.owner = THIS_MODULE;
543 	config.reg_read = mtd_nvmem_reg_read;
544 	config.size = mtd->size;
545 	config.word_size = 1;
546 	config.stride = 1;
547 	config.read_only = true;
548 	config.root_only = true;
549 	config.ignore_wp = true;
550 	config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
551 	config.priv = mtd;
552 
553 	mtd->nvmem = nvmem_register(&config);
554 	if (IS_ERR(mtd->nvmem)) {
555 		/* Just ignore if there is no NVMEM support in the kernel */
556 		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
557 			mtd->nvmem = NULL;
558 		} else {
559 			dev_err(&mtd->dev, "Failed to register NVMEM device\n");
560 			return PTR_ERR(mtd->nvmem);
561 		}
562 	}
563 
564 	return 0;
565 }
566 
567 /**
568  *	add_mtd_device - register an MTD device
569  *	@mtd: pointer to new MTD device info structure
570  *
571  *	Add a device to the list of MTD devices present in the system, and
572  *	notify each currently active MTD 'user' of its arrival. Returns
573  *	zero on success or non-zero on failure.
574  */
575 
576 int add_mtd_device(struct mtd_info *mtd)
577 {
578 	struct mtd_info *master = mtd_get_master(mtd);
579 	struct mtd_notifier *not;
580 	int i, error;
581 
582 	/*
583 	 * May occur, for instance, on buggy drivers which call
584 	 * mtd_device_parse_register() multiple times on the same master MTD,
585 	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
586 	 */
587 	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
588 		return -EEXIST;
589 
590 	BUG_ON(mtd->writesize == 0);
591 
592 	/*
593 	 * MTD drivers should implement ->_{write,read}() or
594 	 * ->_{write,read}_oob(), but not both.
595 	 */
596 	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
597 		    (mtd->_read && mtd->_read_oob)))
598 		return -EINVAL;
599 
600 	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
601 		    !(mtd->flags & MTD_NO_ERASE)))
602 		return -EINVAL;
603 
604 	/*
605 	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
606 	 * master is an MLC NAND and has a proper pairing scheme defined.
607 	 * We also reject masters that implement ->_writev() for now, because
608 	 * NAND controller drivers don't implement this hook, and adding the
609 	 * SLC -> MLC address/length conversion to this path is useless if we
610 	 * don't have a user.
611 	 */
612 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
613 	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
614 	     !master->pairing || master->_writev))
615 		return -EINVAL;
616 
617 	mutex_lock(&mtd_table_mutex);
618 
619 	i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
620 	if (i < 0) {
621 		error = i;
622 		goto fail_locked;
623 	}
624 
625 	mtd->index = i;
626 	mtd->usecount = 0;
627 
628 	/* default value if not set by driver */
629 	if (mtd->bitflip_threshold == 0)
630 		mtd->bitflip_threshold = mtd->ecc_strength;
631 
632 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
633 		int ngroups = mtd_pairing_groups(master);
634 
635 		mtd->erasesize /= ngroups;
636 		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
637 			    mtd->erasesize;
638 	}
639 
640 	if (is_power_of_2(mtd->erasesize))
641 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
642 	else
643 		mtd->erasesize_shift = 0;
644 
645 	if (is_power_of_2(mtd->writesize))
646 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
647 	else
648 		mtd->writesize_shift = 0;
649 
650 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
651 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
652 
653 	/* Some chips always power up locked. Unlock them now */
654 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
655 		error = mtd_unlock(mtd, 0, mtd->size);
656 		if (error && error != -EOPNOTSUPP)
657 			printk(KERN_WARNING
658 			       "%s: unlock failed, writes may not work\n",
659 			       mtd->name);
660 		/* Ignore unlock failures? */
661 		error = 0;
662 	}
663 
664 	/* Caller should have set dev.parent to match the
665 	 * physical device, if appropriate.
666 	 */
667 	mtd->dev.type = &mtd_devtype;
668 	mtd->dev.class = &mtd_class;
669 	mtd->dev.devt = MTD_DEVT(i);
670 	dev_set_name(&mtd->dev, "mtd%d", i);
671 	dev_set_drvdata(&mtd->dev, mtd);
672 	of_node_get(mtd_get_of_node(mtd));
673 	error = device_register(&mtd->dev);
674 	if (error)
675 		goto fail_added;
676 
677 	/* Add the nvmem provider */
678 	error = mtd_nvmem_add(mtd);
679 	if (error)
680 		goto fail_nvmem_add;
681 
682 	mtd_debugfs_populate(mtd);
683 
684 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
685 		      "mtd%dro", i);
686 
687 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
688 	/* No need to get a refcount on the module containing
689 	   the notifier, since we hold the mtd_table_mutex */
690 	list_for_each_entry(not, &mtd_notifiers, list)
691 		not->add(mtd);
692 
693 	mutex_unlock(&mtd_table_mutex);
694 	/* We _know_ we aren't being removed, because
695 	   our caller is still holding us here. So none
696 	   of this try_ nonsense, and no bitching about it
697 	   either. :) */
698 	__module_get(THIS_MODULE);
699 	return 0;
700 
701 fail_nvmem_add:
702 	device_unregister(&mtd->dev);
703 fail_added:
704 	of_node_put(mtd_get_of_node(mtd));
705 	idr_remove(&mtd_idr, i);
706 fail_locked:
707 	mutex_unlock(&mtd_table_mutex);
708 	return error;
709 }
710 
711 /**
712  *	del_mtd_device - unregister an MTD device
713  *	@mtd: pointer to MTD device info structure
714  *
715  *	Remove a device from the list of MTD devices present in the system,
716  *	and notify each currently active MTD 'user' of its departure.
717  *	Returns zero on success or 1 on failure, which currently will happen
718  *	if the requested device does not appear to be present in the list.
719  */
720 
721 int del_mtd_device(struct mtd_info *mtd)
722 {
723 	int ret;
724 	struct mtd_notifier *not;
725 
726 	mutex_lock(&mtd_table_mutex);
727 
728 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
729 		ret = -ENODEV;
730 		goto out_error;
731 	}
732 
733 	/* No need to get a refcount on the module containing
734 		the notifier, since we hold the mtd_table_mutex */
735 	list_for_each_entry(not, &mtd_notifiers, list)
736 		not->remove(mtd);
737 
738 	if (mtd->usecount) {
739 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
740 		       mtd->index, mtd->name, mtd->usecount);
741 		ret = -EBUSY;
742 	} else {
743 		debugfs_remove_recursive(mtd->dbg.dfs_dir);
744 
745 		/* Try to remove the NVMEM provider */
746 		if (mtd->nvmem)
747 			nvmem_unregister(mtd->nvmem);
748 
749 		device_unregister(&mtd->dev);
750 
751 		/* Clear dev so mtd can be safely re-registered later if desired */
752 		memset(&mtd->dev, 0, sizeof(mtd->dev));
753 
754 		idr_remove(&mtd_idr, mtd->index);
755 		of_node_put(mtd_get_of_node(mtd));
756 
757 		module_put(THIS_MODULE);
758 		ret = 0;
759 	}
760 
761 out_error:
762 	mutex_unlock(&mtd_table_mutex);
763 	return ret;
764 }
765 
766 /*
767  * Set a few defaults based on the parent devices, if not provided by the
768  * driver
769  */
770 static void mtd_set_dev_defaults(struct mtd_info *mtd)
771 {
772 	if (mtd->dev.parent) {
773 		if (!mtd->owner && mtd->dev.parent->driver)
774 			mtd->owner = mtd->dev.parent->driver->owner;
775 		if (!mtd->name)
776 			mtd->name = dev_name(mtd->dev.parent);
777 	} else {
778 		pr_debug("mtd device won't show a device symlink in sysfs\n");
779 	}
780 
781 	INIT_LIST_HEAD(&mtd->partitions);
782 	mutex_init(&mtd->master.partitions_lock);
783 	mutex_init(&mtd->master.chrdev_lock);
784 }
785 
786 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
787 {
788 	struct otp_info *info;
789 	ssize_t size = 0;
790 	unsigned int i;
791 	size_t retlen;
792 	int ret;
793 
794 	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
795 	if (!info)
796 		return -ENOMEM;
797 
798 	if (is_user)
799 		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
800 	else
801 		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
802 	if (ret)
803 		goto err;
804 
805 	for (i = 0; i < retlen / sizeof(*info); i++)
806 		size += info[i].length;
807 
808 	kfree(info);
809 	return size;
810 
811 err:
812 	kfree(info);
813 
814 	/* ENODATA means there is no OTP region. */
815 	return ret == -ENODATA ? 0 : ret;
816 }
817 
818 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
819 						   const char *compatible,
820 						   int size,
821 						   nvmem_reg_read_t reg_read)
822 {
823 	struct nvmem_device *nvmem = NULL;
824 	struct nvmem_config config = {};
825 	struct device_node *np;
826 
827 	/* DT binding is optional */
828 	np = of_get_compatible_child(mtd->dev.of_node, compatible);
829 
830 	/* OTP nvmem will be registered on the physical device */
831 	config.dev = mtd->dev.parent;
832 	config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
833 	config.id = NVMEM_DEVID_NONE;
834 	config.owner = THIS_MODULE;
835 	config.type = NVMEM_TYPE_OTP;
836 	config.root_only = true;
837 	config.ignore_wp = true;
838 	config.reg_read = reg_read;
839 	config.size = size;
840 	config.of_node = np;
841 	config.priv = mtd;
842 
843 	nvmem = nvmem_register(&config);
844 	/* Just ignore if there is no NVMEM support in the kernel */
845 	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
846 		nvmem = NULL;
847 
848 	of_node_put(np);
849 	kfree(config.name);
850 
851 	return nvmem;
852 }
853 
854 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
855 				       void *val, size_t bytes)
856 {
857 	struct mtd_info *mtd = priv;
858 	size_t retlen;
859 	int ret;
860 
861 	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
862 	if (ret)
863 		return ret;
864 
865 	return retlen == bytes ? 0 : -EIO;
866 }
867 
868 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
869 				       void *val, size_t bytes)
870 {
871 	struct mtd_info *mtd = priv;
872 	size_t retlen;
873 	int ret;
874 
875 	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
876 	if (ret)
877 		return ret;
878 
879 	return retlen == bytes ? 0 : -EIO;
880 }
881 
882 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
883 {
884 	struct nvmem_device *nvmem;
885 	ssize_t size;
886 	int err;
887 
888 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
889 		size = mtd_otp_size(mtd, true);
890 		if (size < 0)
891 			return size;
892 
893 		if (size > 0) {
894 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
895 						       mtd_nvmem_user_otp_reg_read);
896 			if (IS_ERR(nvmem)) {
897 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
898 				return PTR_ERR(nvmem);
899 			}
900 			mtd->otp_user_nvmem = nvmem;
901 		}
902 	}
903 
904 	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
905 		size = mtd_otp_size(mtd, false);
906 		if (size < 0) {
907 			err = size;
908 			goto err;
909 		}
910 
911 		if (size > 0) {
912 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
913 						       mtd_nvmem_fact_otp_reg_read);
914 			if (IS_ERR(nvmem)) {
915 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
916 				err = PTR_ERR(nvmem);
917 				goto err;
918 			}
919 			mtd->otp_factory_nvmem = nvmem;
920 		}
921 	}
922 
923 	return 0;
924 
925 err:
926 	if (mtd->otp_user_nvmem)
927 		nvmem_unregister(mtd->otp_user_nvmem);
928 	return err;
929 }
930 
931 /**
932  * mtd_device_parse_register - parse partitions and register an MTD device.
933  *
934  * @mtd: the MTD device to register
935  * @types: the list of MTD partition probes to try, see
936  *         'parse_mtd_partitions()' for more information
937  * @parser_data: MTD partition parser-specific data
938  * @parts: fallback partition information to register, if parsing fails;
939  *         only valid if %nr_parts > %0
940  * @nr_parts: the number of partitions in parts, if zero then the full
941  *            MTD device is registered if no partition info is found
942  *
943  * This function aggregates MTD partitions parsing (done by
944  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
945  * basically follows the most common pattern found in many MTD drivers:
946  *
947  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
948  *   registered first.
949  * * Then It tries to probe partitions on MTD device @mtd using parsers
950  *   specified in @types (if @types is %NULL, then the default list of parsers
951  *   is used, see 'parse_mtd_partitions()' for more information). If none are
952  *   found this functions tries to fallback to information specified in
953  *   @parts/@nr_parts.
954  * * If no partitions were found this function just registers the MTD device
955  *   @mtd and exits.
956  *
957  * Returns zero in case of success and a negative error code in case of failure.
958  */
959 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
960 			      struct mtd_part_parser_data *parser_data,
961 			      const struct mtd_partition *parts,
962 			      int nr_parts)
963 {
964 	int ret;
965 
966 	mtd_set_dev_defaults(mtd);
967 
968 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
969 		ret = add_mtd_device(mtd);
970 		if (ret)
971 			return ret;
972 	}
973 
974 	/* Prefer parsed partitions over driver-provided fallback */
975 	ret = parse_mtd_partitions(mtd, types, parser_data);
976 	if (ret == -EPROBE_DEFER)
977 		goto out;
978 
979 	if (ret > 0)
980 		ret = 0;
981 	else if (nr_parts)
982 		ret = add_mtd_partitions(mtd, parts, nr_parts);
983 	else if (!device_is_registered(&mtd->dev))
984 		ret = add_mtd_device(mtd);
985 	else
986 		ret = 0;
987 
988 	if (ret)
989 		goto out;
990 
991 	/*
992 	 * FIXME: some drivers unfortunately call this function more than once.
993 	 * So we have to check if we've already assigned the reboot notifier.
994 	 *
995 	 * Generally, we can make multiple calls work for most cases, but it
996 	 * does cause problems with parse_mtd_partitions() above (e.g.,
997 	 * cmdlineparts will register partitions more than once).
998 	 */
999 	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1000 		  "MTD already registered\n");
1001 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1002 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1003 		register_reboot_notifier(&mtd->reboot_notifier);
1004 	}
1005 
1006 	ret = mtd_otp_nvmem_add(mtd);
1007 
1008 out:
1009 	if (ret && device_is_registered(&mtd->dev))
1010 		del_mtd_device(mtd);
1011 
1012 	return ret;
1013 }
1014 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1015 
1016 /**
1017  * mtd_device_unregister - unregister an existing MTD device.
1018  *
1019  * @master: the MTD device to unregister.  This will unregister both the master
1020  *          and any partitions if registered.
1021  */
1022 int mtd_device_unregister(struct mtd_info *master)
1023 {
1024 	int err;
1025 
1026 	if (master->_reboot) {
1027 		unregister_reboot_notifier(&master->reboot_notifier);
1028 		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1029 	}
1030 
1031 	if (master->otp_user_nvmem)
1032 		nvmem_unregister(master->otp_user_nvmem);
1033 
1034 	if (master->otp_factory_nvmem)
1035 		nvmem_unregister(master->otp_factory_nvmem);
1036 
1037 	err = del_mtd_partitions(master);
1038 	if (err)
1039 		return err;
1040 
1041 	if (!device_is_registered(&master->dev))
1042 		return 0;
1043 
1044 	return del_mtd_device(master);
1045 }
1046 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1047 
1048 /**
1049  *	register_mtd_user - register a 'user' of MTD devices.
1050  *	@new: pointer to notifier info structure
1051  *
1052  *	Registers a pair of callbacks function to be called upon addition
1053  *	or removal of MTD devices. Causes the 'add' callback to be immediately
1054  *	invoked for each MTD device currently present in the system.
1055  */
1056 void register_mtd_user (struct mtd_notifier *new)
1057 {
1058 	struct mtd_info *mtd;
1059 
1060 	mutex_lock(&mtd_table_mutex);
1061 
1062 	list_add(&new->list, &mtd_notifiers);
1063 
1064 	__module_get(THIS_MODULE);
1065 
1066 	mtd_for_each_device(mtd)
1067 		new->add(mtd);
1068 
1069 	mutex_unlock(&mtd_table_mutex);
1070 }
1071 EXPORT_SYMBOL_GPL(register_mtd_user);
1072 
1073 /**
1074  *	unregister_mtd_user - unregister a 'user' of MTD devices.
1075  *	@old: pointer to notifier info structure
1076  *
1077  *	Removes a callback function pair from the list of 'users' to be
1078  *	notified upon addition or removal of MTD devices. Causes the
1079  *	'remove' callback to be immediately invoked for each MTD device
1080  *	currently present in the system.
1081  */
1082 int unregister_mtd_user (struct mtd_notifier *old)
1083 {
1084 	struct mtd_info *mtd;
1085 
1086 	mutex_lock(&mtd_table_mutex);
1087 
1088 	module_put(THIS_MODULE);
1089 
1090 	mtd_for_each_device(mtd)
1091 		old->remove(mtd);
1092 
1093 	list_del(&old->list);
1094 	mutex_unlock(&mtd_table_mutex);
1095 	return 0;
1096 }
1097 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1098 
1099 /**
1100  *	get_mtd_device - obtain a validated handle for an MTD device
1101  *	@mtd: last known address of the required MTD device
1102  *	@num: internal device number of the required MTD device
1103  *
1104  *	Given a number and NULL address, return the num'th entry in the device
1105  *	table, if any.	Given an address and num == -1, search the device table
1106  *	for a device with that address and return if it's still present. Given
1107  *	both, return the num'th driver only if its address matches. Return
1108  *	error code if not.
1109  */
1110 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1111 {
1112 	struct mtd_info *ret = NULL, *other;
1113 	int err = -ENODEV;
1114 
1115 	mutex_lock(&mtd_table_mutex);
1116 
1117 	if (num == -1) {
1118 		mtd_for_each_device(other) {
1119 			if (other == mtd) {
1120 				ret = mtd;
1121 				break;
1122 			}
1123 		}
1124 	} else if (num >= 0) {
1125 		ret = idr_find(&mtd_idr, num);
1126 		if (mtd && mtd != ret)
1127 			ret = NULL;
1128 	}
1129 
1130 	if (!ret) {
1131 		ret = ERR_PTR(err);
1132 		goto out;
1133 	}
1134 
1135 	err = __get_mtd_device(ret);
1136 	if (err)
1137 		ret = ERR_PTR(err);
1138 out:
1139 	mutex_unlock(&mtd_table_mutex);
1140 	return ret;
1141 }
1142 EXPORT_SYMBOL_GPL(get_mtd_device);
1143 
1144 
1145 int __get_mtd_device(struct mtd_info *mtd)
1146 {
1147 	struct mtd_info *master = mtd_get_master(mtd);
1148 	int err;
1149 
1150 	if (!try_module_get(master->owner))
1151 		return -ENODEV;
1152 
1153 	if (master->_get_device) {
1154 		err = master->_get_device(mtd);
1155 
1156 		if (err) {
1157 			module_put(master->owner);
1158 			return err;
1159 		}
1160 	}
1161 
1162 	master->usecount++;
1163 
1164 	while (mtd->parent) {
1165 		mtd->usecount++;
1166 		mtd = mtd->parent;
1167 	}
1168 
1169 	return 0;
1170 }
1171 EXPORT_SYMBOL_GPL(__get_mtd_device);
1172 
1173 /**
1174  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1175  *	device name
1176  *	@name: MTD device name to open
1177  *
1178  * 	This function returns MTD device description structure in case of
1179  * 	success and an error code in case of failure.
1180  */
1181 struct mtd_info *get_mtd_device_nm(const char *name)
1182 {
1183 	int err = -ENODEV;
1184 	struct mtd_info *mtd = NULL, *other;
1185 
1186 	mutex_lock(&mtd_table_mutex);
1187 
1188 	mtd_for_each_device(other) {
1189 		if (!strcmp(name, other->name)) {
1190 			mtd = other;
1191 			break;
1192 		}
1193 	}
1194 
1195 	if (!mtd)
1196 		goto out_unlock;
1197 
1198 	err = __get_mtd_device(mtd);
1199 	if (err)
1200 		goto out_unlock;
1201 
1202 	mutex_unlock(&mtd_table_mutex);
1203 	return mtd;
1204 
1205 out_unlock:
1206 	mutex_unlock(&mtd_table_mutex);
1207 	return ERR_PTR(err);
1208 }
1209 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1210 
1211 void put_mtd_device(struct mtd_info *mtd)
1212 {
1213 	mutex_lock(&mtd_table_mutex);
1214 	__put_mtd_device(mtd);
1215 	mutex_unlock(&mtd_table_mutex);
1216 
1217 }
1218 EXPORT_SYMBOL_GPL(put_mtd_device);
1219 
1220 void __put_mtd_device(struct mtd_info *mtd)
1221 {
1222 	struct mtd_info *master = mtd_get_master(mtd);
1223 
1224 	while (mtd->parent) {
1225 		--mtd->usecount;
1226 		BUG_ON(mtd->usecount < 0);
1227 		mtd = mtd->parent;
1228 	}
1229 
1230 	master->usecount--;
1231 
1232 	if (master->_put_device)
1233 		master->_put_device(master);
1234 
1235 	module_put(master->owner);
1236 }
1237 EXPORT_SYMBOL_GPL(__put_mtd_device);
1238 
1239 /*
1240  * Erase is an synchronous operation. Device drivers are epected to return a
1241  * negative error code if the operation failed and update instr->fail_addr
1242  * to point the portion that was not properly erased.
1243  */
1244 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1245 {
1246 	struct mtd_info *master = mtd_get_master(mtd);
1247 	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1248 	struct erase_info adjinstr;
1249 	int ret;
1250 
1251 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1252 	adjinstr = *instr;
1253 
1254 	if (!mtd->erasesize || !master->_erase)
1255 		return -ENOTSUPP;
1256 
1257 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1258 		return -EINVAL;
1259 	if (!(mtd->flags & MTD_WRITEABLE))
1260 		return -EROFS;
1261 
1262 	if (!instr->len)
1263 		return 0;
1264 
1265 	ledtrig_mtd_activity();
1266 
1267 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1268 		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1269 				master->erasesize;
1270 		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1271 				master->erasesize) -
1272 			       adjinstr.addr;
1273 	}
1274 
1275 	adjinstr.addr += mst_ofs;
1276 
1277 	ret = master->_erase(master, &adjinstr);
1278 
1279 	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1280 		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1281 		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1282 			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1283 							 master);
1284 			instr->fail_addr *= mtd->erasesize;
1285 		}
1286 	}
1287 
1288 	return ret;
1289 }
1290 EXPORT_SYMBOL_GPL(mtd_erase);
1291 
1292 /*
1293  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1294  */
1295 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1296 	      void **virt, resource_size_t *phys)
1297 {
1298 	struct mtd_info *master = mtd_get_master(mtd);
1299 
1300 	*retlen = 0;
1301 	*virt = NULL;
1302 	if (phys)
1303 		*phys = 0;
1304 	if (!master->_point)
1305 		return -EOPNOTSUPP;
1306 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1307 		return -EINVAL;
1308 	if (!len)
1309 		return 0;
1310 
1311 	from = mtd_get_master_ofs(mtd, from);
1312 	return master->_point(master, from, len, retlen, virt, phys);
1313 }
1314 EXPORT_SYMBOL_GPL(mtd_point);
1315 
1316 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1317 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1318 {
1319 	struct mtd_info *master = mtd_get_master(mtd);
1320 
1321 	if (!master->_unpoint)
1322 		return -EOPNOTSUPP;
1323 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1324 		return -EINVAL;
1325 	if (!len)
1326 		return 0;
1327 	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1328 }
1329 EXPORT_SYMBOL_GPL(mtd_unpoint);
1330 
1331 /*
1332  * Allow NOMMU mmap() to directly map the device (if not NULL)
1333  * - return the address to which the offset maps
1334  * - return -ENOSYS to indicate refusal to do the mapping
1335  */
1336 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1337 				    unsigned long offset, unsigned long flags)
1338 {
1339 	size_t retlen;
1340 	void *virt;
1341 	int ret;
1342 
1343 	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1344 	if (ret)
1345 		return ret;
1346 	if (retlen != len) {
1347 		mtd_unpoint(mtd, offset, retlen);
1348 		return -ENOSYS;
1349 	}
1350 	return (unsigned long)virt;
1351 }
1352 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1353 
1354 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1355 				 const struct mtd_ecc_stats *old_stats)
1356 {
1357 	struct mtd_ecc_stats diff;
1358 
1359 	if (master == mtd)
1360 		return;
1361 
1362 	diff = master->ecc_stats;
1363 	diff.failed -= old_stats->failed;
1364 	diff.corrected -= old_stats->corrected;
1365 
1366 	while (mtd->parent) {
1367 		mtd->ecc_stats.failed += diff.failed;
1368 		mtd->ecc_stats.corrected += diff.corrected;
1369 		mtd = mtd->parent;
1370 	}
1371 }
1372 
1373 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1374 	     u_char *buf)
1375 {
1376 	struct mtd_oob_ops ops = {
1377 		.len = len,
1378 		.datbuf = buf,
1379 	};
1380 	int ret;
1381 
1382 	ret = mtd_read_oob(mtd, from, &ops);
1383 	*retlen = ops.retlen;
1384 
1385 	return ret;
1386 }
1387 EXPORT_SYMBOL_GPL(mtd_read);
1388 
1389 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1390 	      const u_char *buf)
1391 {
1392 	struct mtd_oob_ops ops = {
1393 		.len = len,
1394 		.datbuf = (u8 *)buf,
1395 	};
1396 	int ret;
1397 
1398 	ret = mtd_write_oob(mtd, to, &ops);
1399 	*retlen = ops.retlen;
1400 
1401 	return ret;
1402 }
1403 EXPORT_SYMBOL_GPL(mtd_write);
1404 
1405 /*
1406  * In blackbox flight recorder like scenarios we want to make successful writes
1407  * in interrupt context. panic_write() is only intended to be called when its
1408  * known the kernel is about to panic and we need the write to succeed. Since
1409  * the kernel is not going to be running for much longer, this function can
1410  * break locks and delay to ensure the write succeeds (but not sleep).
1411  */
1412 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1413 		    const u_char *buf)
1414 {
1415 	struct mtd_info *master = mtd_get_master(mtd);
1416 
1417 	*retlen = 0;
1418 	if (!master->_panic_write)
1419 		return -EOPNOTSUPP;
1420 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1421 		return -EINVAL;
1422 	if (!(mtd->flags & MTD_WRITEABLE))
1423 		return -EROFS;
1424 	if (!len)
1425 		return 0;
1426 	if (!master->oops_panic_write)
1427 		master->oops_panic_write = true;
1428 
1429 	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1430 				    retlen, buf);
1431 }
1432 EXPORT_SYMBOL_GPL(mtd_panic_write);
1433 
1434 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1435 			     struct mtd_oob_ops *ops)
1436 {
1437 	/*
1438 	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1439 	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1440 	 *  this case.
1441 	 */
1442 	if (!ops->datbuf)
1443 		ops->len = 0;
1444 
1445 	if (!ops->oobbuf)
1446 		ops->ooblen = 0;
1447 
1448 	if (offs < 0 || offs + ops->len > mtd->size)
1449 		return -EINVAL;
1450 
1451 	if (ops->ooblen) {
1452 		size_t maxooblen;
1453 
1454 		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1455 			return -EINVAL;
1456 
1457 		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1458 				      mtd_div_by_ws(offs, mtd)) *
1459 			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1460 		if (ops->ooblen > maxooblen)
1461 			return -EINVAL;
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1468 			    struct mtd_oob_ops *ops)
1469 {
1470 	struct mtd_info *master = mtd_get_master(mtd);
1471 	int ret;
1472 
1473 	from = mtd_get_master_ofs(mtd, from);
1474 	if (master->_read_oob)
1475 		ret = master->_read_oob(master, from, ops);
1476 	else
1477 		ret = master->_read(master, from, ops->len, &ops->retlen,
1478 				    ops->datbuf);
1479 
1480 	return ret;
1481 }
1482 
1483 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1484 			     struct mtd_oob_ops *ops)
1485 {
1486 	struct mtd_info *master = mtd_get_master(mtd);
1487 	int ret;
1488 
1489 	to = mtd_get_master_ofs(mtd, to);
1490 	if (master->_write_oob)
1491 		ret = master->_write_oob(master, to, ops);
1492 	else
1493 		ret = master->_write(master, to, ops->len, &ops->retlen,
1494 				     ops->datbuf);
1495 
1496 	return ret;
1497 }
1498 
1499 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1500 			       struct mtd_oob_ops *ops)
1501 {
1502 	struct mtd_info *master = mtd_get_master(mtd);
1503 	int ngroups = mtd_pairing_groups(master);
1504 	int npairs = mtd_wunit_per_eb(master) / ngroups;
1505 	struct mtd_oob_ops adjops = *ops;
1506 	unsigned int wunit, oobavail;
1507 	struct mtd_pairing_info info;
1508 	int max_bitflips = 0;
1509 	u32 ebofs, pageofs;
1510 	loff_t base, pos;
1511 
1512 	ebofs = mtd_mod_by_eb(start, mtd);
1513 	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1514 	info.group = 0;
1515 	info.pair = mtd_div_by_ws(ebofs, mtd);
1516 	pageofs = mtd_mod_by_ws(ebofs, mtd);
1517 	oobavail = mtd_oobavail(mtd, ops);
1518 
1519 	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1520 		int ret;
1521 
1522 		if (info.pair >= npairs) {
1523 			info.pair = 0;
1524 			base += master->erasesize;
1525 		}
1526 
1527 		wunit = mtd_pairing_info_to_wunit(master, &info);
1528 		pos = mtd_wunit_to_offset(mtd, base, wunit);
1529 
1530 		adjops.len = ops->len - ops->retlen;
1531 		if (adjops.len > mtd->writesize - pageofs)
1532 			adjops.len = mtd->writesize - pageofs;
1533 
1534 		adjops.ooblen = ops->ooblen - ops->oobretlen;
1535 		if (adjops.ooblen > oobavail - adjops.ooboffs)
1536 			adjops.ooblen = oobavail - adjops.ooboffs;
1537 
1538 		if (read) {
1539 			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1540 			if (ret > 0)
1541 				max_bitflips = max(max_bitflips, ret);
1542 		} else {
1543 			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1544 		}
1545 
1546 		if (ret < 0)
1547 			return ret;
1548 
1549 		max_bitflips = max(max_bitflips, ret);
1550 		ops->retlen += adjops.retlen;
1551 		ops->oobretlen += adjops.oobretlen;
1552 		adjops.datbuf += adjops.retlen;
1553 		adjops.oobbuf += adjops.oobretlen;
1554 		adjops.ooboffs = 0;
1555 		pageofs = 0;
1556 		info.pair++;
1557 	}
1558 
1559 	return max_bitflips;
1560 }
1561 
1562 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1563 {
1564 	struct mtd_info *master = mtd_get_master(mtd);
1565 	struct mtd_ecc_stats old_stats = master->ecc_stats;
1566 	int ret_code;
1567 
1568 	ops->retlen = ops->oobretlen = 0;
1569 
1570 	ret_code = mtd_check_oob_ops(mtd, from, ops);
1571 	if (ret_code)
1572 		return ret_code;
1573 
1574 	ledtrig_mtd_activity();
1575 
1576 	/* Check the validity of a potential fallback on mtd->_read */
1577 	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1578 		return -EOPNOTSUPP;
1579 
1580 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1581 		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1582 	else
1583 		ret_code = mtd_read_oob_std(mtd, from, ops);
1584 
1585 	mtd_update_ecc_stats(mtd, master, &old_stats);
1586 
1587 	/*
1588 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1589 	 * similar to mtd->_read(), returning a non-negative integer
1590 	 * representing max bitflips. In other cases, mtd->_read_oob() may
1591 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1592 	 */
1593 	if (unlikely(ret_code < 0))
1594 		return ret_code;
1595 	if (mtd->ecc_strength == 0)
1596 		return 0;	/* device lacks ecc */
1597 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1598 }
1599 EXPORT_SYMBOL_GPL(mtd_read_oob);
1600 
1601 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1602 				struct mtd_oob_ops *ops)
1603 {
1604 	struct mtd_info *master = mtd_get_master(mtd);
1605 	int ret;
1606 
1607 	ops->retlen = ops->oobretlen = 0;
1608 
1609 	if (!(mtd->flags & MTD_WRITEABLE))
1610 		return -EROFS;
1611 
1612 	ret = mtd_check_oob_ops(mtd, to, ops);
1613 	if (ret)
1614 		return ret;
1615 
1616 	ledtrig_mtd_activity();
1617 
1618 	/* Check the validity of a potential fallback on mtd->_write */
1619 	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1620 		return -EOPNOTSUPP;
1621 
1622 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1623 		return mtd_io_emulated_slc(mtd, to, false, ops);
1624 
1625 	return mtd_write_oob_std(mtd, to, ops);
1626 }
1627 EXPORT_SYMBOL_GPL(mtd_write_oob);
1628 
1629 /**
1630  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1631  * @mtd: MTD device structure
1632  * @section: ECC section. Depending on the layout you may have all the ECC
1633  *	     bytes stored in a single contiguous section, or one section
1634  *	     per ECC chunk (and sometime several sections for a single ECC
1635  *	     ECC chunk)
1636  * @oobecc: OOB region struct filled with the appropriate ECC position
1637  *	    information
1638  *
1639  * This function returns ECC section information in the OOB area. If you want
1640  * to get all the ECC bytes information, then you should call
1641  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1642  *
1643  * Returns zero on success, a negative error code otherwise.
1644  */
1645 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1646 		      struct mtd_oob_region *oobecc)
1647 {
1648 	struct mtd_info *master = mtd_get_master(mtd);
1649 
1650 	memset(oobecc, 0, sizeof(*oobecc));
1651 
1652 	if (!master || section < 0)
1653 		return -EINVAL;
1654 
1655 	if (!master->ooblayout || !master->ooblayout->ecc)
1656 		return -ENOTSUPP;
1657 
1658 	return master->ooblayout->ecc(master, section, oobecc);
1659 }
1660 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1661 
1662 /**
1663  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1664  *			section
1665  * @mtd: MTD device structure
1666  * @section: Free section you are interested in. Depending on the layout
1667  *	     you may have all the free bytes stored in a single contiguous
1668  *	     section, or one section per ECC chunk plus an extra section
1669  *	     for the remaining bytes (or other funky layout).
1670  * @oobfree: OOB region struct filled with the appropriate free position
1671  *	     information
1672  *
1673  * This function returns free bytes position in the OOB area. If you want
1674  * to get all the free bytes information, then you should call
1675  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1676  *
1677  * Returns zero on success, a negative error code otherwise.
1678  */
1679 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1680 		       struct mtd_oob_region *oobfree)
1681 {
1682 	struct mtd_info *master = mtd_get_master(mtd);
1683 
1684 	memset(oobfree, 0, sizeof(*oobfree));
1685 
1686 	if (!master || section < 0)
1687 		return -EINVAL;
1688 
1689 	if (!master->ooblayout || !master->ooblayout->free)
1690 		return -ENOTSUPP;
1691 
1692 	return master->ooblayout->free(master, section, oobfree);
1693 }
1694 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1695 
1696 /**
1697  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1698  * @mtd: mtd info structure
1699  * @byte: the byte we are searching for
1700  * @sectionp: pointer where the section id will be stored
1701  * @oobregion: used to retrieve the ECC position
1702  * @iter: iterator function. Should be either mtd_ooblayout_free or
1703  *	  mtd_ooblayout_ecc depending on the region type you're searching for
1704  *
1705  * This function returns the section id and oobregion information of a
1706  * specific byte. For example, say you want to know where the 4th ECC byte is
1707  * stored, you'll use:
1708  *
1709  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1710  *
1711  * Returns zero on success, a negative error code otherwise.
1712  */
1713 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1714 				int *sectionp, struct mtd_oob_region *oobregion,
1715 				int (*iter)(struct mtd_info *,
1716 					    int section,
1717 					    struct mtd_oob_region *oobregion))
1718 {
1719 	int pos = 0, ret, section = 0;
1720 
1721 	memset(oobregion, 0, sizeof(*oobregion));
1722 
1723 	while (1) {
1724 		ret = iter(mtd, section, oobregion);
1725 		if (ret)
1726 			return ret;
1727 
1728 		if (pos + oobregion->length > byte)
1729 			break;
1730 
1731 		pos += oobregion->length;
1732 		section++;
1733 	}
1734 
1735 	/*
1736 	 * Adjust region info to make it start at the beginning at the
1737 	 * 'start' ECC byte.
1738 	 */
1739 	oobregion->offset += byte - pos;
1740 	oobregion->length -= byte - pos;
1741 	*sectionp = section;
1742 
1743 	return 0;
1744 }
1745 
1746 /**
1747  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1748  *				  ECC byte
1749  * @mtd: mtd info structure
1750  * @eccbyte: the byte we are searching for
1751  * @section: pointer where the section id will be stored
1752  * @oobregion: OOB region information
1753  *
1754  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1755  * byte.
1756  *
1757  * Returns zero on success, a negative error code otherwise.
1758  */
1759 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1760 				 int *section,
1761 				 struct mtd_oob_region *oobregion)
1762 {
1763 	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1764 					 mtd_ooblayout_ecc);
1765 }
1766 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1767 
1768 /**
1769  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1770  * @mtd: mtd info structure
1771  * @buf: destination buffer to store OOB bytes
1772  * @oobbuf: OOB buffer
1773  * @start: first byte to retrieve
1774  * @nbytes: number of bytes to retrieve
1775  * @iter: section iterator
1776  *
1777  * Extract bytes attached to a specific category (ECC or free)
1778  * from the OOB buffer and copy them into buf.
1779  *
1780  * Returns zero on success, a negative error code otherwise.
1781  */
1782 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1783 				const u8 *oobbuf, int start, int nbytes,
1784 				int (*iter)(struct mtd_info *,
1785 					    int section,
1786 					    struct mtd_oob_region *oobregion))
1787 {
1788 	struct mtd_oob_region oobregion;
1789 	int section, ret;
1790 
1791 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1792 					&oobregion, iter);
1793 
1794 	while (!ret) {
1795 		int cnt;
1796 
1797 		cnt = min_t(int, nbytes, oobregion.length);
1798 		memcpy(buf, oobbuf + oobregion.offset, cnt);
1799 		buf += cnt;
1800 		nbytes -= cnt;
1801 
1802 		if (!nbytes)
1803 			break;
1804 
1805 		ret = iter(mtd, ++section, &oobregion);
1806 	}
1807 
1808 	return ret;
1809 }
1810 
1811 /**
1812  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1813  * @mtd: mtd info structure
1814  * @buf: source buffer to get OOB bytes from
1815  * @oobbuf: OOB buffer
1816  * @start: first OOB byte to set
1817  * @nbytes: number of OOB bytes to set
1818  * @iter: section iterator
1819  *
1820  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1821  * is selected by passing the appropriate iterator.
1822  *
1823  * Returns zero on success, a negative error code otherwise.
1824  */
1825 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1826 				u8 *oobbuf, int start, int nbytes,
1827 				int (*iter)(struct mtd_info *,
1828 					    int section,
1829 					    struct mtd_oob_region *oobregion))
1830 {
1831 	struct mtd_oob_region oobregion;
1832 	int section, ret;
1833 
1834 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1835 					&oobregion, iter);
1836 
1837 	while (!ret) {
1838 		int cnt;
1839 
1840 		cnt = min_t(int, nbytes, oobregion.length);
1841 		memcpy(oobbuf + oobregion.offset, buf, cnt);
1842 		buf += cnt;
1843 		nbytes -= cnt;
1844 
1845 		if (!nbytes)
1846 			break;
1847 
1848 		ret = iter(mtd, ++section, &oobregion);
1849 	}
1850 
1851 	return ret;
1852 }
1853 
1854 /**
1855  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1856  * @mtd: mtd info structure
1857  * @iter: category iterator
1858  *
1859  * Count the number of bytes in a given category.
1860  *
1861  * Returns a positive value on success, a negative error code otherwise.
1862  */
1863 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1864 				int (*iter)(struct mtd_info *,
1865 					    int section,
1866 					    struct mtd_oob_region *oobregion))
1867 {
1868 	struct mtd_oob_region oobregion;
1869 	int section = 0, ret, nbytes = 0;
1870 
1871 	while (1) {
1872 		ret = iter(mtd, section++, &oobregion);
1873 		if (ret) {
1874 			if (ret == -ERANGE)
1875 				ret = nbytes;
1876 			break;
1877 		}
1878 
1879 		nbytes += oobregion.length;
1880 	}
1881 
1882 	return ret;
1883 }
1884 
1885 /**
1886  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1887  * @mtd: mtd info structure
1888  * @eccbuf: destination buffer to store ECC bytes
1889  * @oobbuf: OOB buffer
1890  * @start: first ECC byte to retrieve
1891  * @nbytes: number of ECC bytes to retrieve
1892  *
1893  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1894  *
1895  * Returns zero on success, a negative error code otherwise.
1896  */
1897 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1898 			       const u8 *oobbuf, int start, int nbytes)
1899 {
1900 	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1901 				       mtd_ooblayout_ecc);
1902 }
1903 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1904 
1905 /**
1906  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1907  * @mtd: mtd info structure
1908  * @eccbuf: source buffer to get ECC bytes from
1909  * @oobbuf: OOB buffer
1910  * @start: first ECC byte to set
1911  * @nbytes: number of ECC bytes to set
1912  *
1913  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1914  *
1915  * Returns zero on success, a negative error code otherwise.
1916  */
1917 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1918 			       u8 *oobbuf, int start, int nbytes)
1919 {
1920 	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1921 				       mtd_ooblayout_ecc);
1922 }
1923 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1924 
1925 /**
1926  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1927  * @mtd: mtd info structure
1928  * @databuf: destination buffer to store ECC bytes
1929  * @oobbuf: OOB buffer
1930  * @start: first ECC byte to retrieve
1931  * @nbytes: number of ECC bytes to retrieve
1932  *
1933  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1934  *
1935  * Returns zero on success, a negative error code otherwise.
1936  */
1937 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1938 				const u8 *oobbuf, int start, int nbytes)
1939 {
1940 	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1941 				       mtd_ooblayout_free);
1942 }
1943 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1944 
1945 /**
1946  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1947  * @mtd: mtd info structure
1948  * @databuf: source buffer to get data bytes from
1949  * @oobbuf: OOB buffer
1950  * @start: first ECC byte to set
1951  * @nbytes: number of ECC bytes to set
1952  *
1953  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
1954  *
1955  * Returns zero on success, a negative error code otherwise.
1956  */
1957 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1958 				u8 *oobbuf, int start, int nbytes)
1959 {
1960 	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1961 				       mtd_ooblayout_free);
1962 }
1963 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1964 
1965 /**
1966  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1967  * @mtd: mtd info structure
1968  *
1969  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1970  *
1971  * Returns zero on success, a negative error code otherwise.
1972  */
1973 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1974 {
1975 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1976 }
1977 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1978 
1979 /**
1980  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
1981  * @mtd: mtd info structure
1982  *
1983  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1984  *
1985  * Returns zero on success, a negative error code otherwise.
1986  */
1987 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1988 {
1989 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1990 }
1991 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1992 
1993 /*
1994  * Method to access the protection register area, present in some flash
1995  * devices. The user data is one time programmable but the factory data is read
1996  * only.
1997  */
1998 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1999 			   struct otp_info *buf)
2000 {
2001 	struct mtd_info *master = mtd_get_master(mtd);
2002 
2003 	if (!master->_get_fact_prot_info)
2004 		return -EOPNOTSUPP;
2005 	if (!len)
2006 		return 0;
2007 	return master->_get_fact_prot_info(master, len, retlen, buf);
2008 }
2009 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2010 
2011 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2012 			   size_t *retlen, u_char *buf)
2013 {
2014 	struct mtd_info *master = mtd_get_master(mtd);
2015 
2016 	*retlen = 0;
2017 	if (!master->_read_fact_prot_reg)
2018 		return -EOPNOTSUPP;
2019 	if (!len)
2020 		return 0;
2021 	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2022 }
2023 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2024 
2025 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2026 			   struct otp_info *buf)
2027 {
2028 	struct mtd_info *master = mtd_get_master(mtd);
2029 
2030 	if (!master->_get_user_prot_info)
2031 		return -EOPNOTSUPP;
2032 	if (!len)
2033 		return 0;
2034 	return master->_get_user_prot_info(master, len, retlen, buf);
2035 }
2036 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2037 
2038 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2039 			   size_t *retlen, u_char *buf)
2040 {
2041 	struct mtd_info *master = mtd_get_master(mtd);
2042 
2043 	*retlen = 0;
2044 	if (!master->_read_user_prot_reg)
2045 		return -EOPNOTSUPP;
2046 	if (!len)
2047 		return 0;
2048 	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2049 }
2050 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2051 
2052 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2053 			    size_t *retlen, const u_char *buf)
2054 {
2055 	struct mtd_info *master = mtd_get_master(mtd);
2056 	int ret;
2057 
2058 	*retlen = 0;
2059 	if (!master->_write_user_prot_reg)
2060 		return -EOPNOTSUPP;
2061 	if (!len)
2062 		return 0;
2063 	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2064 	if (ret)
2065 		return ret;
2066 
2067 	/*
2068 	 * If no data could be written at all, we are out of memory and
2069 	 * must return -ENOSPC.
2070 	 */
2071 	return (*retlen) ? 0 : -ENOSPC;
2072 }
2073 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2074 
2075 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2076 {
2077 	struct mtd_info *master = mtd_get_master(mtd);
2078 
2079 	if (!master->_lock_user_prot_reg)
2080 		return -EOPNOTSUPP;
2081 	if (!len)
2082 		return 0;
2083 	return master->_lock_user_prot_reg(master, from, len);
2084 }
2085 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2086 
2087 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2088 {
2089 	struct mtd_info *master = mtd_get_master(mtd);
2090 
2091 	if (!master->_erase_user_prot_reg)
2092 		return -EOPNOTSUPP;
2093 	if (!len)
2094 		return 0;
2095 	return master->_erase_user_prot_reg(master, from, len);
2096 }
2097 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2098 
2099 /* Chip-supported device locking */
2100 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2101 {
2102 	struct mtd_info *master = mtd_get_master(mtd);
2103 
2104 	if (!master->_lock)
2105 		return -EOPNOTSUPP;
2106 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2107 		return -EINVAL;
2108 	if (!len)
2109 		return 0;
2110 
2111 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2112 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2113 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2114 	}
2115 
2116 	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2117 }
2118 EXPORT_SYMBOL_GPL(mtd_lock);
2119 
2120 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2121 {
2122 	struct mtd_info *master = mtd_get_master(mtd);
2123 
2124 	if (!master->_unlock)
2125 		return -EOPNOTSUPP;
2126 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2127 		return -EINVAL;
2128 	if (!len)
2129 		return 0;
2130 
2131 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2132 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2133 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2134 	}
2135 
2136 	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2137 }
2138 EXPORT_SYMBOL_GPL(mtd_unlock);
2139 
2140 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2141 {
2142 	struct mtd_info *master = mtd_get_master(mtd);
2143 
2144 	if (!master->_is_locked)
2145 		return -EOPNOTSUPP;
2146 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2147 		return -EINVAL;
2148 	if (!len)
2149 		return 0;
2150 
2151 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2152 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2153 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2154 	}
2155 
2156 	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2157 }
2158 EXPORT_SYMBOL_GPL(mtd_is_locked);
2159 
2160 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2161 {
2162 	struct mtd_info *master = mtd_get_master(mtd);
2163 
2164 	if (ofs < 0 || ofs >= mtd->size)
2165 		return -EINVAL;
2166 	if (!master->_block_isreserved)
2167 		return 0;
2168 
2169 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2170 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2171 
2172 	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2173 }
2174 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2175 
2176 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2177 {
2178 	struct mtd_info *master = mtd_get_master(mtd);
2179 
2180 	if (ofs < 0 || ofs >= mtd->size)
2181 		return -EINVAL;
2182 	if (!master->_block_isbad)
2183 		return 0;
2184 
2185 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2186 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2187 
2188 	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2189 }
2190 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2191 
2192 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2193 {
2194 	struct mtd_info *master = mtd_get_master(mtd);
2195 	int ret;
2196 
2197 	if (!master->_block_markbad)
2198 		return -EOPNOTSUPP;
2199 	if (ofs < 0 || ofs >= mtd->size)
2200 		return -EINVAL;
2201 	if (!(mtd->flags & MTD_WRITEABLE))
2202 		return -EROFS;
2203 
2204 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2205 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2206 
2207 	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2208 	if (ret)
2209 		return ret;
2210 
2211 	while (mtd->parent) {
2212 		mtd->ecc_stats.badblocks++;
2213 		mtd = mtd->parent;
2214 	}
2215 
2216 	return 0;
2217 }
2218 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2219 
2220 /*
2221  * default_mtd_writev - the default writev method
2222  * @mtd: mtd device description object pointer
2223  * @vecs: the vectors to write
2224  * @count: count of vectors in @vecs
2225  * @to: the MTD device offset to write to
2226  * @retlen: on exit contains the count of bytes written to the MTD device.
2227  *
2228  * This function returns zero in case of success and a negative error code in
2229  * case of failure.
2230  */
2231 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2232 			      unsigned long count, loff_t to, size_t *retlen)
2233 {
2234 	unsigned long i;
2235 	size_t totlen = 0, thislen;
2236 	int ret = 0;
2237 
2238 	for (i = 0; i < count; i++) {
2239 		if (!vecs[i].iov_len)
2240 			continue;
2241 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2242 				vecs[i].iov_base);
2243 		totlen += thislen;
2244 		if (ret || thislen != vecs[i].iov_len)
2245 			break;
2246 		to += vecs[i].iov_len;
2247 	}
2248 	*retlen = totlen;
2249 	return ret;
2250 }
2251 
2252 /*
2253  * mtd_writev - the vector-based MTD write method
2254  * @mtd: mtd device description object pointer
2255  * @vecs: the vectors to write
2256  * @count: count of vectors in @vecs
2257  * @to: the MTD device offset to write to
2258  * @retlen: on exit contains the count of bytes written to the MTD device.
2259  *
2260  * This function returns zero in case of success and a negative error code in
2261  * case of failure.
2262  */
2263 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2264 	       unsigned long count, loff_t to, size_t *retlen)
2265 {
2266 	struct mtd_info *master = mtd_get_master(mtd);
2267 
2268 	*retlen = 0;
2269 	if (!(mtd->flags & MTD_WRITEABLE))
2270 		return -EROFS;
2271 
2272 	if (!master->_writev)
2273 		return default_mtd_writev(mtd, vecs, count, to, retlen);
2274 
2275 	return master->_writev(master, vecs, count,
2276 			       mtd_get_master_ofs(mtd, to), retlen);
2277 }
2278 EXPORT_SYMBOL_GPL(mtd_writev);
2279 
2280 /**
2281  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2282  * @mtd: mtd device description object pointer
2283  * @size: a pointer to the ideal or maximum size of the allocation, points
2284  *        to the actual allocation size on success.
2285  *
2286  * This routine attempts to allocate a contiguous kernel buffer up to
2287  * the specified size, backing off the size of the request exponentially
2288  * until the request succeeds or until the allocation size falls below
2289  * the system page size. This attempts to make sure it does not adversely
2290  * impact system performance, so when allocating more than one page, we
2291  * ask the memory allocator to avoid re-trying, swapping, writing back
2292  * or performing I/O.
2293  *
2294  * Note, this function also makes sure that the allocated buffer is aligned to
2295  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2296  *
2297  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2298  * to handle smaller (i.e. degraded) buffer allocations under low- or
2299  * fragmented-memory situations where such reduced allocations, from a
2300  * requested ideal, are allowed.
2301  *
2302  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2303  */
2304 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2305 {
2306 	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2307 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2308 	void *kbuf;
2309 
2310 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2311 
2312 	while (*size > min_alloc) {
2313 		kbuf = kmalloc(*size, flags);
2314 		if (kbuf)
2315 			return kbuf;
2316 
2317 		*size >>= 1;
2318 		*size = ALIGN(*size, mtd->writesize);
2319 	}
2320 
2321 	/*
2322 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2323 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2324 	 */
2325 	return kmalloc(*size, GFP_KERNEL);
2326 }
2327 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2328 
2329 #ifdef CONFIG_PROC_FS
2330 
2331 /*====================================================================*/
2332 /* Support for /proc/mtd */
2333 
2334 static int mtd_proc_show(struct seq_file *m, void *v)
2335 {
2336 	struct mtd_info *mtd;
2337 
2338 	seq_puts(m, "dev:    size   erasesize  name\n");
2339 	mutex_lock(&mtd_table_mutex);
2340 	mtd_for_each_device(mtd) {
2341 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2342 			   mtd->index, (unsigned long long)mtd->size,
2343 			   mtd->erasesize, mtd->name);
2344 	}
2345 	mutex_unlock(&mtd_table_mutex);
2346 	return 0;
2347 }
2348 #endif /* CONFIG_PROC_FS */
2349 
2350 /*====================================================================*/
2351 /* Init code */
2352 
2353 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2354 {
2355 	struct backing_dev_info *bdi;
2356 	int ret;
2357 
2358 	bdi = bdi_alloc(NUMA_NO_NODE);
2359 	if (!bdi)
2360 		return ERR_PTR(-ENOMEM);
2361 	bdi->ra_pages = 0;
2362 	bdi->io_pages = 0;
2363 
2364 	/*
2365 	 * We put '-0' suffix to the name to get the same name format as we
2366 	 * used to get. Since this is called only once, we get a unique name.
2367 	 */
2368 	ret = bdi_register(bdi, "%.28s-0", name);
2369 	if (ret)
2370 		bdi_put(bdi);
2371 
2372 	return ret ? ERR_PTR(ret) : bdi;
2373 }
2374 
2375 char *mtd_expert_analysis_warning =
2376 	"Bad block checks have been entirely disabled.\n"
2377 	"This is only reserved for post-mortem forensics and debug purposes.\n"
2378 	"Never enable this mode if you do not know what you are doing!\n";
2379 EXPORT_SYMBOL_GPL(mtd_expert_analysis_warning);
2380 bool mtd_expert_analysis_mode;
2381 EXPORT_SYMBOL_GPL(mtd_expert_analysis_mode);
2382 
2383 static struct proc_dir_entry *proc_mtd;
2384 
2385 static int __init init_mtd(void)
2386 {
2387 	int ret;
2388 
2389 	ret = class_register(&mtd_class);
2390 	if (ret)
2391 		goto err_reg;
2392 
2393 	mtd_bdi = mtd_bdi_init("mtd");
2394 	if (IS_ERR(mtd_bdi)) {
2395 		ret = PTR_ERR(mtd_bdi);
2396 		goto err_bdi;
2397 	}
2398 
2399 	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2400 
2401 	ret = init_mtdchar();
2402 	if (ret)
2403 		goto out_procfs;
2404 
2405 	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2406 	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2407 			    &mtd_expert_analysis_mode);
2408 
2409 	return 0;
2410 
2411 out_procfs:
2412 	if (proc_mtd)
2413 		remove_proc_entry("mtd", NULL);
2414 	bdi_put(mtd_bdi);
2415 err_bdi:
2416 	class_unregister(&mtd_class);
2417 err_reg:
2418 	pr_err("Error registering mtd class or bdi: %d\n", ret);
2419 	return ret;
2420 }
2421 
2422 static void __exit cleanup_mtd(void)
2423 {
2424 	debugfs_remove_recursive(dfs_dir_mtd);
2425 	cleanup_mtdchar();
2426 	if (proc_mtd)
2427 		remove_proc_entry("mtd", NULL);
2428 	class_unregister(&mtd_class);
2429 	bdi_unregister(mtd_bdi);
2430 	bdi_put(mtd_bdi);
2431 	idr_destroy(&mtd_idr);
2432 }
2433 
2434 module_init(init_mtd);
2435 module_exit(cleanup_mtd);
2436 
2437 MODULE_LICENSE("GPL");
2438 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2439 MODULE_DESCRIPTION("Core MTD registration and access routines");
2440