xref: /linux/drivers/mtd/mtdcore.c (revision 6bdd45d795adf9e73b38ced5e7f750cd199499ff)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Core registration and callback routines for MTD
4  * drivers and users.
5  *
6  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7  * Copyright © 2006      Red Hat UK Limited
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31 #include <linux/root_dev.h>
32 
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/partitions.h>
35 
36 #include "mtdcore.h"
37 
38 struct backing_dev_info *mtd_bdi;
39 
40 #ifdef CONFIG_PM_SLEEP
41 
42 static int mtd_cls_suspend(struct device *dev)
43 {
44 	struct mtd_info *mtd = dev_get_drvdata(dev);
45 
46 	return mtd ? mtd_suspend(mtd) : 0;
47 }
48 
49 static int mtd_cls_resume(struct device *dev)
50 {
51 	struct mtd_info *mtd = dev_get_drvdata(dev);
52 
53 	if (mtd)
54 		mtd_resume(mtd);
55 	return 0;
56 }
57 
58 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
59 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
60 #else
61 #define MTD_CLS_PM_OPS NULL
62 #endif
63 
64 static struct class mtd_class = {
65 	.name = "mtd",
66 	.owner = THIS_MODULE,
67 	.pm = MTD_CLS_PM_OPS,
68 };
69 
70 static DEFINE_IDR(mtd_idr);
71 
72 /* These are exported solely for the purpose of mtd_blkdevs.c. You
73    should not use them for _anything_ else */
74 DEFINE_MUTEX(mtd_table_mutex);
75 EXPORT_SYMBOL_GPL(mtd_table_mutex);
76 
77 struct mtd_info *__mtd_next_device(int i)
78 {
79 	return idr_get_next(&mtd_idr, &i);
80 }
81 EXPORT_SYMBOL_GPL(__mtd_next_device);
82 
83 static LIST_HEAD(mtd_notifiers);
84 
85 
86 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
87 
88 /* REVISIT once MTD uses the driver model better, whoever allocates
89  * the mtd_info will probably want to use the release() hook...
90  */
91 static void mtd_release(struct device *dev)
92 {
93 	struct mtd_info *mtd = dev_get_drvdata(dev);
94 	dev_t index = MTD_DEVT(mtd->index);
95 
96 	/* remove /dev/mtdXro node */
97 	device_destroy(&mtd_class, index + 1);
98 }
99 
100 #define MTD_DEVICE_ATTR_RO(name) \
101 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
102 
103 #define MTD_DEVICE_ATTR_RW(name) \
104 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
105 
106 static ssize_t mtd_type_show(struct device *dev,
107 		struct device_attribute *attr, char *buf)
108 {
109 	struct mtd_info *mtd = dev_get_drvdata(dev);
110 	char *type;
111 
112 	switch (mtd->type) {
113 	case MTD_ABSENT:
114 		type = "absent";
115 		break;
116 	case MTD_RAM:
117 		type = "ram";
118 		break;
119 	case MTD_ROM:
120 		type = "rom";
121 		break;
122 	case MTD_NORFLASH:
123 		type = "nor";
124 		break;
125 	case MTD_NANDFLASH:
126 		type = "nand";
127 		break;
128 	case MTD_DATAFLASH:
129 		type = "dataflash";
130 		break;
131 	case MTD_UBIVOLUME:
132 		type = "ubi";
133 		break;
134 	case MTD_MLCNANDFLASH:
135 		type = "mlc-nand";
136 		break;
137 	default:
138 		type = "unknown";
139 	}
140 
141 	return sysfs_emit(buf, "%s\n", type);
142 }
143 MTD_DEVICE_ATTR_RO(type);
144 
145 static ssize_t mtd_flags_show(struct device *dev,
146 		struct device_attribute *attr, char *buf)
147 {
148 	struct mtd_info *mtd = dev_get_drvdata(dev);
149 
150 	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
151 }
152 MTD_DEVICE_ATTR_RO(flags);
153 
154 static ssize_t mtd_size_show(struct device *dev,
155 		struct device_attribute *attr, char *buf)
156 {
157 	struct mtd_info *mtd = dev_get_drvdata(dev);
158 
159 	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
160 }
161 MTD_DEVICE_ATTR_RO(size);
162 
163 static ssize_t mtd_erasesize_show(struct device *dev,
164 		struct device_attribute *attr, char *buf)
165 {
166 	struct mtd_info *mtd = dev_get_drvdata(dev);
167 
168 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
169 }
170 MTD_DEVICE_ATTR_RO(erasesize);
171 
172 static ssize_t mtd_writesize_show(struct device *dev,
173 		struct device_attribute *attr, char *buf)
174 {
175 	struct mtd_info *mtd = dev_get_drvdata(dev);
176 
177 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
178 }
179 MTD_DEVICE_ATTR_RO(writesize);
180 
181 static ssize_t mtd_subpagesize_show(struct device *dev,
182 		struct device_attribute *attr, char *buf)
183 {
184 	struct mtd_info *mtd = dev_get_drvdata(dev);
185 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
186 
187 	return sysfs_emit(buf, "%u\n", subpagesize);
188 }
189 MTD_DEVICE_ATTR_RO(subpagesize);
190 
191 static ssize_t mtd_oobsize_show(struct device *dev,
192 		struct device_attribute *attr, char *buf)
193 {
194 	struct mtd_info *mtd = dev_get_drvdata(dev);
195 
196 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
197 }
198 MTD_DEVICE_ATTR_RO(oobsize);
199 
200 static ssize_t mtd_oobavail_show(struct device *dev,
201 				 struct device_attribute *attr, char *buf)
202 {
203 	struct mtd_info *mtd = dev_get_drvdata(dev);
204 
205 	return sysfs_emit(buf, "%u\n", mtd->oobavail);
206 }
207 MTD_DEVICE_ATTR_RO(oobavail);
208 
209 static ssize_t mtd_numeraseregions_show(struct device *dev,
210 		struct device_attribute *attr, char *buf)
211 {
212 	struct mtd_info *mtd = dev_get_drvdata(dev);
213 
214 	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
215 }
216 MTD_DEVICE_ATTR_RO(numeraseregions);
217 
218 static ssize_t mtd_name_show(struct device *dev,
219 		struct device_attribute *attr, char *buf)
220 {
221 	struct mtd_info *mtd = dev_get_drvdata(dev);
222 
223 	return sysfs_emit(buf, "%s\n", mtd->name);
224 }
225 MTD_DEVICE_ATTR_RO(name);
226 
227 static ssize_t mtd_ecc_strength_show(struct device *dev,
228 				     struct device_attribute *attr, char *buf)
229 {
230 	struct mtd_info *mtd = dev_get_drvdata(dev);
231 
232 	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
233 }
234 MTD_DEVICE_ATTR_RO(ecc_strength);
235 
236 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
237 					  struct device_attribute *attr,
238 					  char *buf)
239 {
240 	struct mtd_info *mtd = dev_get_drvdata(dev);
241 
242 	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
243 }
244 
245 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
246 					   struct device_attribute *attr,
247 					   const char *buf, size_t count)
248 {
249 	struct mtd_info *mtd = dev_get_drvdata(dev);
250 	unsigned int bitflip_threshold;
251 	int retval;
252 
253 	retval = kstrtouint(buf, 0, &bitflip_threshold);
254 	if (retval)
255 		return retval;
256 
257 	mtd->bitflip_threshold = bitflip_threshold;
258 	return count;
259 }
260 MTD_DEVICE_ATTR_RW(bitflip_threshold);
261 
262 static ssize_t mtd_ecc_step_size_show(struct device *dev,
263 		struct device_attribute *attr, char *buf)
264 {
265 	struct mtd_info *mtd = dev_get_drvdata(dev);
266 
267 	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
268 
269 }
270 MTD_DEVICE_ATTR_RO(ecc_step_size);
271 
272 static ssize_t mtd_corrected_bits_show(struct device *dev,
273 		struct device_attribute *attr, char *buf)
274 {
275 	struct mtd_info *mtd = dev_get_drvdata(dev);
276 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
277 
278 	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
279 }
280 MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
281 
282 static ssize_t mtd_ecc_failures_show(struct device *dev,
283 		struct device_attribute *attr, char *buf)
284 {
285 	struct mtd_info *mtd = dev_get_drvdata(dev);
286 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
287 
288 	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
289 }
290 MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
291 
292 static ssize_t mtd_bad_blocks_show(struct device *dev,
293 		struct device_attribute *attr, char *buf)
294 {
295 	struct mtd_info *mtd = dev_get_drvdata(dev);
296 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
297 
298 	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
299 }
300 MTD_DEVICE_ATTR_RO(bad_blocks);
301 
302 static ssize_t mtd_bbt_blocks_show(struct device *dev,
303 		struct device_attribute *attr, char *buf)
304 {
305 	struct mtd_info *mtd = dev_get_drvdata(dev);
306 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
307 
308 	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
309 }
310 MTD_DEVICE_ATTR_RO(bbt_blocks);
311 
312 static struct attribute *mtd_attrs[] = {
313 	&dev_attr_type.attr,
314 	&dev_attr_flags.attr,
315 	&dev_attr_size.attr,
316 	&dev_attr_erasesize.attr,
317 	&dev_attr_writesize.attr,
318 	&dev_attr_subpagesize.attr,
319 	&dev_attr_oobsize.attr,
320 	&dev_attr_oobavail.attr,
321 	&dev_attr_numeraseregions.attr,
322 	&dev_attr_name.attr,
323 	&dev_attr_ecc_strength.attr,
324 	&dev_attr_ecc_step_size.attr,
325 	&dev_attr_corrected_bits.attr,
326 	&dev_attr_ecc_failures.attr,
327 	&dev_attr_bad_blocks.attr,
328 	&dev_attr_bbt_blocks.attr,
329 	&dev_attr_bitflip_threshold.attr,
330 	NULL,
331 };
332 ATTRIBUTE_GROUPS(mtd);
333 
334 static const struct device_type mtd_devtype = {
335 	.name		= "mtd",
336 	.groups		= mtd_groups,
337 	.release	= mtd_release,
338 };
339 
340 static bool mtd_expert_analysis_mode;
341 
342 #ifdef CONFIG_DEBUG_FS
343 bool mtd_check_expert_analysis_mode(void)
344 {
345 	const char *mtd_expert_analysis_warning =
346 		"Bad block checks have been entirely disabled.\n"
347 		"This is only reserved for post-mortem forensics and debug purposes.\n"
348 		"Never enable this mode if you do not know what you are doing!\n";
349 
350 	return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
351 }
352 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
353 #endif
354 
355 static struct dentry *dfs_dir_mtd;
356 
357 static void mtd_debugfs_populate(struct mtd_info *mtd)
358 {
359 	struct device *dev = &mtd->dev;
360 
361 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
362 		return;
363 
364 	mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
365 }
366 
367 #ifndef CONFIG_MMU
368 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
369 {
370 	switch (mtd->type) {
371 	case MTD_RAM:
372 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
373 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
374 	case MTD_ROM:
375 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
376 			NOMMU_MAP_READ;
377 	default:
378 		return NOMMU_MAP_COPY;
379 	}
380 }
381 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
382 #endif
383 
384 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
385 			       void *cmd)
386 {
387 	struct mtd_info *mtd;
388 
389 	mtd = container_of(n, struct mtd_info, reboot_notifier);
390 	mtd->_reboot(mtd);
391 
392 	return NOTIFY_DONE;
393 }
394 
395 /**
396  * mtd_wunit_to_pairing_info - get pairing information of a wunit
397  * @mtd: pointer to new MTD device info structure
398  * @wunit: write unit we are interested in
399  * @info: returned pairing information
400  *
401  * Retrieve pairing information associated to the wunit.
402  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
403  * paired together, and where programming a page may influence the page it is
404  * paired with.
405  * The notion of page is replaced by the term wunit (write-unit) to stay
406  * consistent with the ->writesize field.
407  *
408  * The @wunit argument can be extracted from an absolute offset using
409  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
410  * to @wunit.
411  *
412  * From the pairing info the MTD user can find all the wunits paired with
413  * @wunit using the following loop:
414  *
415  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
416  *	info.pair = i;
417  *	mtd_pairing_info_to_wunit(mtd, &info);
418  *	...
419  * }
420  */
421 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
422 			      struct mtd_pairing_info *info)
423 {
424 	struct mtd_info *master = mtd_get_master(mtd);
425 	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
426 
427 	if (wunit < 0 || wunit >= npairs)
428 		return -EINVAL;
429 
430 	if (master->pairing && master->pairing->get_info)
431 		return master->pairing->get_info(master, wunit, info);
432 
433 	info->group = 0;
434 	info->pair = wunit;
435 
436 	return 0;
437 }
438 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
439 
440 /**
441  * mtd_pairing_info_to_wunit - get wunit from pairing information
442  * @mtd: pointer to new MTD device info structure
443  * @info: pairing information struct
444  *
445  * Returns a positive number representing the wunit associated to the info
446  * struct, or a negative error code.
447  *
448  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
449  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
450  * doc).
451  *
452  * It can also be used to only program the first page of each pair (i.e.
453  * page attached to group 0), which allows one to use an MLC NAND in
454  * software-emulated SLC mode:
455  *
456  * info.group = 0;
457  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
458  * for (info.pair = 0; info.pair < npairs; info.pair++) {
459  *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
460  *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
461  *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
462  * }
463  */
464 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
465 			      const struct mtd_pairing_info *info)
466 {
467 	struct mtd_info *master = mtd_get_master(mtd);
468 	int ngroups = mtd_pairing_groups(master);
469 	int npairs = mtd_wunit_per_eb(master) / ngroups;
470 
471 	if (!info || info->pair < 0 || info->pair >= npairs ||
472 	    info->group < 0 || info->group >= ngroups)
473 		return -EINVAL;
474 
475 	if (master->pairing && master->pairing->get_wunit)
476 		return mtd->pairing->get_wunit(master, info);
477 
478 	return info->pair;
479 }
480 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
481 
482 /**
483  * mtd_pairing_groups - get the number of pairing groups
484  * @mtd: pointer to new MTD device info structure
485  *
486  * Returns the number of pairing groups.
487  *
488  * This number is usually equal to the number of bits exposed by a single
489  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
490  * to iterate over all pages of a given pair.
491  */
492 int mtd_pairing_groups(struct mtd_info *mtd)
493 {
494 	struct mtd_info *master = mtd_get_master(mtd);
495 
496 	if (!master->pairing || !master->pairing->ngroups)
497 		return 1;
498 
499 	return master->pairing->ngroups;
500 }
501 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
502 
503 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
504 			      void *val, size_t bytes)
505 {
506 	struct mtd_info *mtd = priv;
507 	size_t retlen;
508 	int err;
509 
510 	err = mtd_read(mtd, offset, bytes, &retlen, val);
511 	if (err && err != -EUCLEAN)
512 		return err;
513 
514 	return retlen == bytes ? 0 : -EIO;
515 }
516 
517 static int mtd_nvmem_add(struct mtd_info *mtd)
518 {
519 	struct device_node *node = mtd_get_of_node(mtd);
520 	struct nvmem_config config = {};
521 
522 	config.id = -1;
523 	config.dev = &mtd->dev;
524 	config.name = dev_name(&mtd->dev);
525 	config.owner = THIS_MODULE;
526 	config.reg_read = mtd_nvmem_reg_read;
527 	config.size = mtd->size;
528 	config.word_size = 1;
529 	config.stride = 1;
530 	config.read_only = true;
531 	config.root_only = true;
532 	config.ignore_wp = true;
533 	config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
534 	config.priv = mtd;
535 
536 	mtd->nvmem = nvmem_register(&config);
537 	if (IS_ERR(mtd->nvmem)) {
538 		/* Just ignore if there is no NVMEM support in the kernel */
539 		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
540 			mtd->nvmem = NULL;
541 		} else {
542 			dev_err(&mtd->dev, "Failed to register NVMEM device\n");
543 			return PTR_ERR(mtd->nvmem);
544 		}
545 	}
546 
547 	return 0;
548 }
549 
550 static void mtd_check_of_node(struct mtd_info *mtd)
551 {
552 	struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
553 	const char *pname, *prefix = "partition-";
554 	int plen, mtd_name_len, offset, prefix_len;
555 
556 	/* Check if MTD already has a device node */
557 	if (mtd_get_of_node(mtd))
558 		return;
559 
560 	if (!mtd_is_partition(mtd))
561 		return;
562 
563 	parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
564 	if (!parent_dn)
565 		return;
566 
567 	if (mtd_is_partition(mtd->parent))
568 		partitions = of_node_get(parent_dn);
569 	else
570 		partitions = of_get_child_by_name(parent_dn, "partitions");
571 	if (!partitions)
572 		goto exit_parent;
573 
574 	prefix_len = strlen(prefix);
575 	mtd_name_len = strlen(mtd->name);
576 
577 	/* Search if a partition is defined with the same name */
578 	for_each_child_of_node(partitions, mtd_dn) {
579 		/* Skip partition with no/wrong prefix */
580 		if (!of_node_name_prefix(mtd_dn, prefix))
581 			continue;
582 
583 		/* Label have priority. Check that first */
584 		if (!of_property_read_string(mtd_dn, "label", &pname)) {
585 			offset = 0;
586 		} else {
587 			pname = mtd_dn->name;
588 			offset = prefix_len;
589 		}
590 
591 		plen = strlen(pname) - offset;
592 		if (plen == mtd_name_len &&
593 		    !strncmp(mtd->name, pname + offset, plen)) {
594 			mtd_set_of_node(mtd, mtd_dn);
595 			break;
596 		}
597 	}
598 
599 	of_node_put(partitions);
600 exit_parent:
601 	of_node_put(parent_dn);
602 }
603 
604 /**
605  *	add_mtd_device - register an MTD device
606  *	@mtd: pointer to new MTD device info structure
607  *
608  *	Add a device to the list of MTD devices present in the system, and
609  *	notify each currently active MTD 'user' of its arrival. Returns
610  *	zero on success or non-zero on failure.
611  */
612 
613 int add_mtd_device(struct mtd_info *mtd)
614 {
615 	struct device_node *np = mtd_get_of_node(mtd);
616 	struct mtd_info *master = mtd_get_master(mtd);
617 	struct mtd_notifier *not;
618 	int i, error, ofidx;
619 
620 	/*
621 	 * May occur, for instance, on buggy drivers which call
622 	 * mtd_device_parse_register() multiple times on the same master MTD,
623 	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
624 	 */
625 	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
626 		return -EEXIST;
627 
628 	BUG_ON(mtd->writesize == 0);
629 
630 	/*
631 	 * MTD drivers should implement ->_{write,read}() or
632 	 * ->_{write,read}_oob(), but not both.
633 	 */
634 	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
635 		    (mtd->_read && mtd->_read_oob)))
636 		return -EINVAL;
637 
638 	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
639 		    !(mtd->flags & MTD_NO_ERASE)))
640 		return -EINVAL;
641 
642 	/*
643 	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
644 	 * master is an MLC NAND and has a proper pairing scheme defined.
645 	 * We also reject masters that implement ->_writev() for now, because
646 	 * NAND controller drivers don't implement this hook, and adding the
647 	 * SLC -> MLC address/length conversion to this path is useless if we
648 	 * don't have a user.
649 	 */
650 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
651 	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
652 	     !master->pairing || master->_writev))
653 		return -EINVAL;
654 
655 	mutex_lock(&mtd_table_mutex);
656 
657 	ofidx = -1;
658 	if (np)
659 		ofidx = of_alias_get_id(np, "mtd");
660 	if (ofidx >= 0)
661 		i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
662 	else
663 		i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
664 	if (i < 0) {
665 		error = i;
666 		goto fail_locked;
667 	}
668 
669 	mtd->index = i;
670 	mtd->usecount = 0;
671 
672 	/* default value if not set by driver */
673 	if (mtd->bitflip_threshold == 0)
674 		mtd->bitflip_threshold = mtd->ecc_strength;
675 
676 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
677 		int ngroups = mtd_pairing_groups(master);
678 
679 		mtd->erasesize /= ngroups;
680 		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
681 			    mtd->erasesize;
682 	}
683 
684 	if (is_power_of_2(mtd->erasesize))
685 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
686 	else
687 		mtd->erasesize_shift = 0;
688 
689 	if (is_power_of_2(mtd->writesize))
690 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
691 	else
692 		mtd->writesize_shift = 0;
693 
694 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
695 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
696 
697 	/* Some chips always power up locked. Unlock them now */
698 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
699 		error = mtd_unlock(mtd, 0, mtd->size);
700 		if (error && error != -EOPNOTSUPP)
701 			printk(KERN_WARNING
702 			       "%s: unlock failed, writes may not work\n",
703 			       mtd->name);
704 		/* Ignore unlock failures? */
705 		error = 0;
706 	}
707 
708 	/* Caller should have set dev.parent to match the
709 	 * physical device, if appropriate.
710 	 */
711 	mtd->dev.type = &mtd_devtype;
712 	mtd->dev.class = &mtd_class;
713 	mtd->dev.devt = MTD_DEVT(i);
714 	dev_set_name(&mtd->dev, "mtd%d", i);
715 	dev_set_drvdata(&mtd->dev, mtd);
716 	mtd_check_of_node(mtd);
717 	of_node_get(mtd_get_of_node(mtd));
718 	error = device_register(&mtd->dev);
719 	if (error) {
720 		put_device(&mtd->dev);
721 		goto fail_added;
722 	}
723 
724 	/* Add the nvmem provider */
725 	error = mtd_nvmem_add(mtd);
726 	if (error)
727 		goto fail_nvmem_add;
728 
729 	mtd_debugfs_populate(mtd);
730 
731 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
732 		      "mtd%dro", i);
733 
734 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
735 	/* No need to get a refcount on the module containing
736 	   the notifier, since we hold the mtd_table_mutex */
737 	list_for_each_entry(not, &mtd_notifiers, list)
738 		not->add(mtd);
739 
740 	mutex_unlock(&mtd_table_mutex);
741 
742 	if (of_find_property(mtd_get_of_node(mtd), "linux,rootfs", NULL)) {
743 		if (IS_BUILTIN(CONFIG_MTD)) {
744 			pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
745 			ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
746 		} else {
747 			pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
748 				mtd->index, mtd->name);
749 		}
750 	}
751 
752 	/* We _know_ we aren't being removed, because
753 	   our caller is still holding us here. So none
754 	   of this try_ nonsense, and no bitching about it
755 	   either. :) */
756 	__module_get(THIS_MODULE);
757 	return 0;
758 
759 fail_nvmem_add:
760 	device_unregister(&mtd->dev);
761 fail_added:
762 	of_node_put(mtd_get_of_node(mtd));
763 	idr_remove(&mtd_idr, i);
764 fail_locked:
765 	mutex_unlock(&mtd_table_mutex);
766 	return error;
767 }
768 
769 /**
770  *	del_mtd_device - unregister an MTD device
771  *	@mtd: pointer to MTD device info structure
772  *
773  *	Remove a device from the list of MTD devices present in the system,
774  *	and notify each currently active MTD 'user' of its departure.
775  *	Returns zero on success or 1 on failure, which currently will happen
776  *	if the requested device does not appear to be present in the list.
777  */
778 
779 int del_mtd_device(struct mtd_info *mtd)
780 {
781 	int ret;
782 	struct mtd_notifier *not;
783 
784 	mutex_lock(&mtd_table_mutex);
785 
786 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
787 		ret = -ENODEV;
788 		goto out_error;
789 	}
790 
791 	/* No need to get a refcount on the module containing
792 		the notifier, since we hold the mtd_table_mutex */
793 	list_for_each_entry(not, &mtd_notifiers, list)
794 		not->remove(mtd);
795 
796 	if (mtd->usecount) {
797 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
798 		       mtd->index, mtd->name, mtd->usecount);
799 		ret = -EBUSY;
800 	} else {
801 		debugfs_remove_recursive(mtd->dbg.dfs_dir);
802 
803 		/* Try to remove the NVMEM provider */
804 		nvmem_unregister(mtd->nvmem);
805 
806 		device_unregister(&mtd->dev);
807 
808 		/* Clear dev so mtd can be safely re-registered later if desired */
809 		memset(&mtd->dev, 0, sizeof(mtd->dev));
810 
811 		idr_remove(&mtd_idr, mtd->index);
812 		of_node_put(mtd_get_of_node(mtd));
813 
814 		module_put(THIS_MODULE);
815 		ret = 0;
816 	}
817 
818 out_error:
819 	mutex_unlock(&mtd_table_mutex);
820 	return ret;
821 }
822 
823 /*
824  * Set a few defaults based on the parent devices, if not provided by the
825  * driver
826  */
827 static void mtd_set_dev_defaults(struct mtd_info *mtd)
828 {
829 	if (mtd->dev.parent) {
830 		if (!mtd->owner && mtd->dev.parent->driver)
831 			mtd->owner = mtd->dev.parent->driver->owner;
832 		if (!mtd->name)
833 			mtd->name = dev_name(mtd->dev.parent);
834 	} else {
835 		pr_debug("mtd device won't show a device symlink in sysfs\n");
836 	}
837 
838 	INIT_LIST_HEAD(&mtd->partitions);
839 	mutex_init(&mtd->master.partitions_lock);
840 	mutex_init(&mtd->master.chrdev_lock);
841 }
842 
843 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
844 {
845 	struct otp_info *info;
846 	ssize_t size = 0;
847 	unsigned int i;
848 	size_t retlen;
849 	int ret;
850 
851 	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
852 	if (!info)
853 		return -ENOMEM;
854 
855 	if (is_user)
856 		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
857 	else
858 		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
859 	if (ret)
860 		goto err;
861 
862 	for (i = 0; i < retlen / sizeof(*info); i++)
863 		size += info[i].length;
864 
865 	kfree(info);
866 	return size;
867 
868 err:
869 	kfree(info);
870 
871 	/* ENODATA means there is no OTP region. */
872 	return ret == -ENODATA ? 0 : ret;
873 }
874 
875 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
876 						   const char *compatible,
877 						   int size,
878 						   nvmem_reg_read_t reg_read)
879 {
880 	struct nvmem_device *nvmem = NULL;
881 	struct nvmem_config config = {};
882 	struct device_node *np;
883 
884 	/* DT binding is optional */
885 	np = of_get_compatible_child(mtd->dev.of_node, compatible);
886 
887 	/* OTP nvmem will be registered on the physical device */
888 	config.dev = mtd->dev.parent;
889 	config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
890 	config.id = NVMEM_DEVID_NONE;
891 	config.owner = THIS_MODULE;
892 	config.type = NVMEM_TYPE_OTP;
893 	config.root_only = true;
894 	config.ignore_wp = true;
895 	config.reg_read = reg_read;
896 	config.size = size;
897 	config.of_node = np;
898 	config.priv = mtd;
899 
900 	nvmem = nvmem_register(&config);
901 	/* Just ignore if there is no NVMEM support in the kernel */
902 	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
903 		nvmem = NULL;
904 
905 	of_node_put(np);
906 	kfree(config.name);
907 
908 	return nvmem;
909 }
910 
911 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
912 				       void *val, size_t bytes)
913 {
914 	struct mtd_info *mtd = priv;
915 	size_t retlen;
916 	int ret;
917 
918 	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
919 	if (ret)
920 		return ret;
921 
922 	return retlen == bytes ? 0 : -EIO;
923 }
924 
925 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
926 				       void *val, size_t bytes)
927 {
928 	struct mtd_info *mtd = priv;
929 	size_t retlen;
930 	int ret;
931 
932 	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
933 	if (ret)
934 		return ret;
935 
936 	return retlen == bytes ? 0 : -EIO;
937 }
938 
939 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
940 {
941 	struct nvmem_device *nvmem;
942 	ssize_t size;
943 	int err;
944 
945 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
946 		size = mtd_otp_size(mtd, true);
947 		if (size < 0)
948 			return size;
949 
950 		if (size > 0) {
951 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
952 						       mtd_nvmem_user_otp_reg_read);
953 			if (IS_ERR(nvmem)) {
954 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
955 				return PTR_ERR(nvmem);
956 			}
957 			mtd->otp_user_nvmem = nvmem;
958 		}
959 	}
960 
961 	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
962 		size = mtd_otp_size(mtd, false);
963 		if (size < 0) {
964 			err = size;
965 			goto err;
966 		}
967 
968 		if (size > 0) {
969 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
970 						       mtd_nvmem_fact_otp_reg_read);
971 			if (IS_ERR(nvmem)) {
972 				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
973 				err = PTR_ERR(nvmem);
974 				goto err;
975 			}
976 			mtd->otp_factory_nvmem = nvmem;
977 		}
978 	}
979 
980 	return 0;
981 
982 err:
983 	nvmem_unregister(mtd->otp_user_nvmem);
984 	return err;
985 }
986 
987 /**
988  * mtd_device_parse_register - parse partitions and register an MTD device.
989  *
990  * @mtd: the MTD device to register
991  * @types: the list of MTD partition probes to try, see
992  *         'parse_mtd_partitions()' for more information
993  * @parser_data: MTD partition parser-specific data
994  * @parts: fallback partition information to register, if parsing fails;
995  *         only valid if %nr_parts > %0
996  * @nr_parts: the number of partitions in parts, if zero then the full
997  *            MTD device is registered if no partition info is found
998  *
999  * This function aggregates MTD partitions parsing (done by
1000  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1001  * basically follows the most common pattern found in many MTD drivers:
1002  *
1003  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1004  *   registered first.
1005  * * Then It tries to probe partitions on MTD device @mtd using parsers
1006  *   specified in @types (if @types is %NULL, then the default list of parsers
1007  *   is used, see 'parse_mtd_partitions()' for more information). If none are
1008  *   found this functions tries to fallback to information specified in
1009  *   @parts/@nr_parts.
1010  * * If no partitions were found this function just registers the MTD device
1011  *   @mtd and exits.
1012  *
1013  * Returns zero in case of success and a negative error code in case of failure.
1014  */
1015 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1016 			      struct mtd_part_parser_data *parser_data,
1017 			      const struct mtd_partition *parts,
1018 			      int nr_parts)
1019 {
1020 	int ret;
1021 
1022 	mtd_set_dev_defaults(mtd);
1023 
1024 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1025 		ret = add_mtd_device(mtd);
1026 		if (ret)
1027 			return ret;
1028 	}
1029 
1030 	/* Prefer parsed partitions over driver-provided fallback */
1031 	ret = parse_mtd_partitions(mtd, types, parser_data);
1032 	if (ret == -EPROBE_DEFER)
1033 		goto out;
1034 
1035 	if (ret > 0)
1036 		ret = 0;
1037 	else if (nr_parts)
1038 		ret = add_mtd_partitions(mtd, parts, nr_parts);
1039 	else if (!device_is_registered(&mtd->dev))
1040 		ret = add_mtd_device(mtd);
1041 	else
1042 		ret = 0;
1043 
1044 	if (ret)
1045 		goto out;
1046 
1047 	/*
1048 	 * FIXME: some drivers unfortunately call this function more than once.
1049 	 * So we have to check if we've already assigned the reboot notifier.
1050 	 *
1051 	 * Generally, we can make multiple calls work for most cases, but it
1052 	 * does cause problems with parse_mtd_partitions() above (e.g.,
1053 	 * cmdlineparts will register partitions more than once).
1054 	 */
1055 	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1056 		  "MTD already registered\n");
1057 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1058 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1059 		register_reboot_notifier(&mtd->reboot_notifier);
1060 	}
1061 
1062 	ret = mtd_otp_nvmem_add(mtd);
1063 
1064 out:
1065 	if (ret && device_is_registered(&mtd->dev))
1066 		del_mtd_device(mtd);
1067 
1068 	return ret;
1069 }
1070 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1071 
1072 /**
1073  * mtd_device_unregister - unregister an existing MTD device.
1074  *
1075  * @master: the MTD device to unregister.  This will unregister both the master
1076  *          and any partitions if registered.
1077  */
1078 int mtd_device_unregister(struct mtd_info *master)
1079 {
1080 	int err;
1081 
1082 	if (master->_reboot) {
1083 		unregister_reboot_notifier(&master->reboot_notifier);
1084 		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1085 	}
1086 
1087 	nvmem_unregister(master->otp_user_nvmem);
1088 	nvmem_unregister(master->otp_factory_nvmem);
1089 
1090 	err = del_mtd_partitions(master);
1091 	if (err)
1092 		return err;
1093 
1094 	if (!device_is_registered(&master->dev))
1095 		return 0;
1096 
1097 	return del_mtd_device(master);
1098 }
1099 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1100 
1101 /**
1102  *	register_mtd_user - register a 'user' of MTD devices.
1103  *	@new: pointer to notifier info structure
1104  *
1105  *	Registers a pair of callbacks function to be called upon addition
1106  *	or removal of MTD devices. Causes the 'add' callback to be immediately
1107  *	invoked for each MTD device currently present in the system.
1108  */
1109 void register_mtd_user (struct mtd_notifier *new)
1110 {
1111 	struct mtd_info *mtd;
1112 
1113 	mutex_lock(&mtd_table_mutex);
1114 
1115 	list_add(&new->list, &mtd_notifiers);
1116 
1117 	__module_get(THIS_MODULE);
1118 
1119 	mtd_for_each_device(mtd)
1120 		new->add(mtd);
1121 
1122 	mutex_unlock(&mtd_table_mutex);
1123 }
1124 EXPORT_SYMBOL_GPL(register_mtd_user);
1125 
1126 /**
1127  *	unregister_mtd_user - unregister a 'user' of MTD devices.
1128  *	@old: pointer to notifier info structure
1129  *
1130  *	Removes a callback function pair from the list of 'users' to be
1131  *	notified upon addition or removal of MTD devices. Causes the
1132  *	'remove' callback to be immediately invoked for each MTD device
1133  *	currently present in the system.
1134  */
1135 int unregister_mtd_user (struct mtd_notifier *old)
1136 {
1137 	struct mtd_info *mtd;
1138 
1139 	mutex_lock(&mtd_table_mutex);
1140 
1141 	module_put(THIS_MODULE);
1142 
1143 	mtd_for_each_device(mtd)
1144 		old->remove(mtd);
1145 
1146 	list_del(&old->list);
1147 	mutex_unlock(&mtd_table_mutex);
1148 	return 0;
1149 }
1150 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1151 
1152 /**
1153  *	get_mtd_device - obtain a validated handle for an MTD device
1154  *	@mtd: last known address of the required MTD device
1155  *	@num: internal device number of the required MTD device
1156  *
1157  *	Given a number and NULL address, return the num'th entry in the device
1158  *	table, if any.	Given an address and num == -1, search the device table
1159  *	for a device with that address and return if it's still present. Given
1160  *	both, return the num'th driver only if its address matches. Return
1161  *	error code if not.
1162  */
1163 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1164 {
1165 	struct mtd_info *ret = NULL, *other;
1166 	int err = -ENODEV;
1167 
1168 	mutex_lock(&mtd_table_mutex);
1169 
1170 	if (num == -1) {
1171 		mtd_for_each_device(other) {
1172 			if (other == mtd) {
1173 				ret = mtd;
1174 				break;
1175 			}
1176 		}
1177 	} else if (num >= 0) {
1178 		ret = idr_find(&mtd_idr, num);
1179 		if (mtd && mtd != ret)
1180 			ret = NULL;
1181 	}
1182 
1183 	if (!ret) {
1184 		ret = ERR_PTR(err);
1185 		goto out;
1186 	}
1187 
1188 	err = __get_mtd_device(ret);
1189 	if (err)
1190 		ret = ERR_PTR(err);
1191 out:
1192 	mutex_unlock(&mtd_table_mutex);
1193 	return ret;
1194 }
1195 EXPORT_SYMBOL_GPL(get_mtd_device);
1196 
1197 
1198 int __get_mtd_device(struct mtd_info *mtd)
1199 {
1200 	struct mtd_info *master = mtd_get_master(mtd);
1201 	int err;
1202 
1203 	if (!try_module_get(master->owner))
1204 		return -ENODEV;
1205 
1206 	if (master->_get_device) {
1207 		err = master->_get_device(mtd);
1208 
1209 		if (err) {
1210 			module_put(master->owner);
1211 			return err;
1212 		}
1213 	}
1214 
1215 	master->usecount++;
1216 
1217 	while (mtd->parent) {
1218 		mtd->usecount++;
1219 		mtd = mtd->parent;
1220 	}
1221 
1222 	return 0;
1223 }
1224 EXPORT_SYMBOL_GPL(__get_mtd_device);
1225 
1226 /**
1227  * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1228  *
1229  * @np: device tree node
1230  */
1231 struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1232 {
1233 	struct mtd_info *mtd = NULL;
1234 	struct mtd_info *tmp;
1235 	int err;
1236 
1237 	mutex_lock(&mtd_table_mutex);
1238 
1239 	err = -EPROBE_DEFER;
1240 	mtd_for_each_device(tmp) {
1241 		if (mtd_get_of_node(tmp) == np) {
1242 			mtd = tmp;
1243 			err = __get_mtd_device(mtd);
1244 			break;
1245 		}
1246 	}
1247 
1248 	mutex_unlock(&mtd_table_mutex);
1249 
1250 	return err ? ERR_PTR(err) : mtd;
1251 }
1252 EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1253 
1254 /**
1255  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1256  *	device name
1257  *	@name: MTD device name to open
1258  *
1259  * 	This function returns MTD device description structure in case of
1260  * 	success and an error code in case of failure.
1261  */
1262 struct mtd_info *get_mtd_device_nm(const char *name)
1263 {
1264 	int err = -ENODEV;
1265 	struct mtd_info *mtd = NULL, *other;
1266 
1267 	mutex_lock(&mtd_table_mutex);
1268 
1269 	mtd_for_each_device(other) {
1270 		if (!strcmp(name, other->name)) {
1271 			mtd = other;
1272 			break;
1273 		}
1274 	}
1275 
1276 	if (!mtd)
1277 		goto out_unlock;
1278 
1279 	err = __get_mtd_device(mtd);
1280 	if (err)
1281 		goto out_unlock;
1282 
1283 	mutex_unlock(&mtd_table_mutex);
1284 	return mtd;
1285 
1286 out_unlock:
1287 	mutex_unlock(&mtd_table_mutex);
1288 	return ERR_PTR(err);
1289 }
1290 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1291 
1292 void put_mtd_device(struct mtd_info *mtd)
1293 {
1294 	mutex_lock(&mtd_table_mutex);
1295 	__put_mtd_device(mtd);
1296 	mutex_unlock(&mtd_table_mutex);
1297 
1298 }
1299 EXPORT_SYMBOL_GPL(put_mtd_device);
1300 
1301 void __put_mtd_device(struct mtd_info *mtd)
1302 {
1303 	struct mtd_info *master = mtd_get_master(mtd);
1304 
1305 	while (mtd->parent) {
1306 		--mtd->usecount;
1307 		BUG_ON(mtd->usecount < 0);
1308 		mtd = mtd->parent;
1309 	}
1310 
1311 	master->usecount--;
1312 
1313 	if (master->_put_device)
1314 		master->_put_device(master);
1315 
1316 	module_put(master->owner);
1317 }
1318 EXPORT_SYMBOL_GPL(__put_mtd_device);
1319 
1320 /*
1321  * Erase is an synchronous operation. Device drivers are epected to return a
1322  * negative error code if the operation failed and update instr->fail_addr
1323  * to point the portion that was not properly erased.
1324  */
1325 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1326 {
1327 	struct mtd_info *master = mtd_get_master(mtd);
1328 	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1329 	struct erase_info adjinstr;
1330 	int ret;
1331 
1332 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1333 	adjinstr = *instr;
1334 
1335 	if (!mtd->erasesize || !master->_erase)
1336 		return -ENOTSUPP;
1337 
1338 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1339 		return -EINVAL;
1340 	if (!(mtd->flags & MTD_WRITEABLE))
1341 		return -EROFS;
1342 
1343 	if (!instr->len)
1344 		return 0;
1345 
1346 	ledtrig_mtd_activity();
1347 
1348 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1349 		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1350 				master->erasesize;
1351 		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1352 				master->erasesize) -
1353 			       adjinstr.addr;
1354 	}
1355 
1356 	adjinstr.addr += mst_ofs;
1357 
1358 	ret = master->_erase(master, &adjinstr);
1359 
1360 	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1361 		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1362 		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1363 			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1364 							 master);
1365 			instr->fail_addr *= mtd->erasesize;
1366 		}
1367 	}
1368 
1369 	return ret;
1370 }
1371 EXPORT_SYMBOL_GPL(mtd_erase);
1372 
1373 /*
1374  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1375  */
1376 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1377 	      void **virt, resource_size_t *phys)
1378 {
1379 	struct mtd_info *master = mtd_get_master(mtd);
1380 
1381 	*retlen = 0;
1382 	*virt = NULL;
1383 	if (phys)
1384 		*phys = 0;
1385 	if (!master->_point)
1386 		return -EOPNOTSUPP;
1387 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1388 		return -EINVAL;
1389 	if (!len)
1390 		return 0;
1391 
1392 	from = mtd_get_master_ofs(mtd, from);
1393 	return master->_point(master, from, len, retlen, virt, phys);
1394 }
1395 EXPORT_SYMBOL_GPL(mtd_point);
1396 
1397 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1398 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1399 {
1400 	struct mtd_info *master = mtd_get_master(mtd);
1401 
1402 	if (!master->_unpoint)
1403 		return -EOPNOTSUPP;
1404 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1405 		return -EINVAL;
1406 	if (!len)
1407 		return 0;
1408 	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1409 }
1410 EXPORT_SYMBOL_GPL(mtd_unpoint);
1411 
1412 /*
1413  * Allow NOMMU mmap() to directly map the device (if not NULL)
1414  * - return the address to which the offset maps
1415  * - return -ENOSYS to indicate refusal to do the mapping
1416  */
1417 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1418 				    unsigned long offset, unsigned long flags)
1419 {
1420 	size_t retlen;
1421 	void *virt;
1422 	int ret;
1423 
1424 	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1425 	if (ret)
1426 		return ret;
1427 	if (retlen != len) {
1428 		mtd_unpoint(mtd, offset, retlen);
1429 		return -ENOSYS;
1430 	}
1431 	return (unsigned long)virt;
1432 }
1433 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1434 
1435 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1436 				 const struct mtd_ecc_stats *old_stats)
1437 {
1438 	struct mtd_ecc_stats diff;
1439 
1440 	if (master == mtd)
1441 		return;
1442 
1443 	diff = master->ecc_stats;
1444 	diff.failed -= old_stats->failed;
1445 	diff.corrected -= old_stats->corrected;
1446 
1447 	while (mtd->parent) {
1448 		mtd->ecc_stats.failed += diff.failed;
1449 		mtd->ecc_stats.corrected += diff.corrected;
1450 		mtd = mtd->parent;
1451 	}
1452 }
1453 
1454 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1455 	     u_char *buf)
1456 {
1457 	struct mtd_oob_ops ops = {
1458 		.len = len,
1459 		.datbuf = buf,
1460 	};
1461 	int ret;
1462 
1463 	ret = mtd_read_oob(mtd, from, &ops);
1464 	*retlen = ops.retlen;
1465 
1466 	return ret;
1467 }
1468 EXPORT_SYMBOL_GPL(mtd_read);
1469 
1470 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1471 	      const u_char *buf)
1472 {
1473 	struct mtd_oob_ops ops = {
1474 		.len = len,
1475 		.datbuf = (u8 *)buf,
1476 	};
1477 	int ret;
1478 
1479 	ret = mtd_write_oob(mtd, to, &ops);
1480 	*retlen = ops.retlen;
1481 
1482 	return ret;
1483 }
1484 EXPORT_SYMBOL_GPL(mtd_write);
1485 
1486 /*
1487  * In blackbox flight recorder like scenarios we want to make successful writes
1488  * in interrupt context. panic_write() is only intended to be called when its
1489  * known the kernel is about to panic and we need the write to succeed. Since
1490  * the kernel is not going to be running for much longer, this function can
1491  * break locks and delay to ensure the write succeeds (but not sleep).
1492  */
1493 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1494 		    const u_char *buf)
1495 {
1496 	struct mtd_info *master = mtd_get_master(mtd);
1497 
1498 	*retlen = 0;
1499 	if (!master->_panic_write)
1500 		return -EOPNOTSUPP;
1501 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1502 		return -EINVAL;
1503 	if (!(mtd->flags & MTD_WRITEABLE))
1504 		return -EROFS;
1505 	if (!len)
1506 		return 0;
1507 	if (!master->oops_panic_write)
1508 		master->oops_panic_write = true;
1509 
1510 	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1511 				    retlen, buf);
1512 }
1513 EXPORT_SYMBOL_GPL(mtd_panic_write);
1514 
1515 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1516 			     struct mtd_oob_ops *ops)
1517 {
1518 	/*
1519 	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1520 	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1521 	 *  this case.
1522 	 */
1523 	if (!ops->datbuf)
1524 		ops->len = 0;
1525 
1526 	if (!ops->oobbuf)
1527 		ops->ooblen = 0;
1528 
1529 	if (offs < 0 || offs + ops->len > mtd->size)
1530 		return -EINVAL;
1531 
1532 	if (ops->ooblen) {
1533 		size_t maxooblen;
1534 
1535 		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1536 			return -EINVAL;
1537 
1538 		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1539 				      mtd_div_by_ws(offs, mtd)) *
1540 			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1541 		if (ops->ooblen > maxooblen)
1542 			return -EINVAL;
1543 	}
1544 
1545 	return 0;
1546 }
1547 
1548 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1549 			    struct mtd_oob_ops *ops)
1550 {
1551 	struct mtd_info *master = mtd_get_master(mtd);
1552 	int ret;
1553 
1554 	from = mtd_get_master_ofs(mtd, from);
1555 	if (master->_read_oob)
1556 		ret = master->_read_oob(master, from, ops);
1557 	else
1558 		ret = master->_read(master, from, ops->len, &ops->retlen,
1559 				    ops->datbuf);
1560 
1561 	return ret;
1562 }
1563 
1564 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1565 			     struct mtd_oob_ops *ops)
1566 {
1567 	struct mtd_info *master = mtd_get_master(mtd);
1568 	int ret;
1569 
1570 	to = mtd_get_master_ofs(mtd, to);
1571 	if (master->_write_oob)
1572 		ret = master->_write_oob(master, to, ops);
1573 	else
1574 		ret = master->_write(master, to, ops->len, &ops->retlen,
1575 				     ops->datbuf);
1576 
1577 	return ret;
1578 }
1579 
1580 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1581 			       struct mtd_oob_ops *ops)
1582 {
1583 	struct mtd_info *master = mtd_get_master(mtd);
1584 	int ngroups = mtd_pairing_groups(master);
1585 	int npairs = mtd_wunit_per_eb(master) / ngroups;
1586 	struct mtd_oob_ops adjops = *ops;
1587 	unsigned int wunit, oobavail;
1588 	struct mtd_pairing_info info;
1589 	int max_bitflips = 0;
1590 	u32 ebofs, pageofs;
1591 	loff_t base, pos;
1592 
1593 	ebofs = mtd_mod_by_eb(start, mtd);
1594 	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1595 	info.group = 0;
1596 	info.pair = mtd_div_by_ws(ebofs, mtd);
1597 	pageofs = mtd_mod_by_ws(ebofs, mtd);
1598 	oobavail = mtd_oobavail(mtd, ops);
1599 
1600 	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1601 		int ret;
1602 
1603 		if (info.pair >= npairs) {
1604 			info.pair = 0;
1605 			base += master->erasesize;
1606 		}
1607 
1608 		wunit = mtd_pairing_info_to_wunit(master, &info);
1609 		pos = mtd_wunit_to_offset(mtd, base, wunit);
1610 
1611 		adjops.len = ops->len - ops->retlen;
1612 		if (adjops.len > mtd->writesize - pageofs)
1613 			adjops.len = mtd->writesize - pageofs;
1614 
1615 		adjops.ooblen = ops->ooblen - ops->oobretlen;
1616 		if (adjops.ooblen > oobavail - adjops.ooboffs)
1617 			adjops.ooblen = oobavail - adjops.ooboffs;
1618 
1619 		if (read) {
1620 			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1621 			if (ret > 0)
1622 				max_bitflips = max(max_bitflips, ret);
1623 		} else {
1624 			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1625 		}
1626 
1627 		if (ret < 0)
1628 			return ret;
1629 
1630 		max_bitflips = max(max_bitflips, ret);
1631 		ops->retlen += adjops.retlen;
1632 		ops->oobretlen += adjops.oobretlen;
1633 		adjops.datbuf += adjops.retlen;
1634 		adjops.oobbuf += adjops.oobretlen;
1635 		adjops.ooboffs = 0;
1636 		pageofs = 0;
1637 		info.pair++;
1638 	}
1639 
1640 	return max_bitflips;
1641 }
1642 
1643 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1644 {
1645 	struct mtd_info *master = mtd_get_master(mtd);
1646 	struct mtd_ecc_stats old_stats = master->ecc_stats;
1647 	int ret_code;
1648 
1649 	ops->retlen = ops->oobretlen = 0;
1650 
1651 	ret_code = mtd_check_oob_ops(mtd, from, ops);
1652 	if (ret_code)
1653 		return ret_code;
1654 
1655 	ledtrig_mtd_activity();
1656 
1657 	/* Check the validity of a potential fallback on mtd->_read */
1658 	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1659 		return -EOPNOTSUPP;
1660 
1661 	if (ops->stats)
1662 		memset(ops->stats, 0, sizeof(*ops->stats));
1663 
1664 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1665 		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1666 	else
1667 		ret_code = mtd_read_oob_std(mtd, from, ops);
1668 
1669 	mtd_update_ecc_stats(mtd, master, &old_stats);
1670 
1671 	/*
1672 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1673 	 * similar to mtd->_read(), returning a non-negative integer
1674 	 * representing max bitflips. In other cases, mtd->_read_oob() may
1675 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1676 	 */
1677 	if (unlikely(ret_code < 0))
1678 		return ret_code;
1679 	if (mtd->ecc_strength == 0)
1680 		return 0;	/* device lacks ecc */
1681 	if (ops->stats)
1682 		ops->stats->max_bitflips = ret_code;
1683 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1684 }
1685 EXPORT_SYMBOL_GPL(mtd_read_oob);
1686 
1687 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1688 				struct mtd_oob_ops *ops)
1689 {
1690 	struct mtd_info *master = mtd_get_master(mtd);
1691 	int ret;
1692 
1693 	ops->retlen = ops->oobretlen = 0;
1694 
1695 	if (!(mtd->flags & MTD_WRITEABLE))
1696 		return -EROFS;
1697 
1698 	ret = mtd_check_oob_ops(mtd, to, ops);
1699 	if (ret)
1700 		return ret;
1701 
1702 	ledtrig_mtd_activity();
1703 
1704 	/* Check the validity of a potential fallback on mtd->_write */
1705 	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1706 		return -EOPNOTSUPP;
1707 
1708 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1709 		return mtd_io_emulated_slc(mtd, to, false, ops);
1710 
1711 	return mtd_write_oob_std(mtd, to, ops);
1712 }
1713 EXPORT_SYMBOL_GPL(mtd_write_oob);
1714 
1715 /**
1716  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1717  * @mtd: MTD device structure
1718  * @section: ECC section. Depending on the layout you may have all the ECC
1719  *	     bytes stored in a single contiguous section, or one section
1720  *	     per ECC chunk (and sometime several sections for a single ECC
1721  *	     ECC chunk)
1722  * @oobecc: OOB region struct filled with the appropriate ECC position
1723  *	    information
1724  *
1725  * This function returns ECC section information in the OOB area. If you want
1726  * to get all the ECC bytes information, then you should call
1727  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1728  *
1729  * Returns zero on success, a negative error code otherwise.
1730  */
1731 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1732 		      struct mtd_oob_region *oobecc)
1733 {
1734 	struct mtd_info *master = mtd_get_master(mtd);
1735 
1736 	memset(oobecc, 0, sizeof(*oobecc));
1737 
1738 	if (!master || section < 0)
1739 		return -EINVAL;
1740 
1741 	if (!master->ooblayout || !master->ooblayout->ecc)
1742 		return -ENOTSUPP;
1743 
1744 	return master->ooblayout->ecc(master, section, oobecc);
1745 }
1746 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1747 
1748 /**
1749  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1750  *			section
1751  * @mtd: MTD device structure
1752  * @section: Free section you are interested in. Depending on the layout
1753  *	     you may have all the free bytes stored in a single contiguous
1754  *	     section, or one section per ECC chunk plus an extra section
1755  *	     for the remaining bytes (or other funky layout).
1756  * @oobfree: OOB region struct filled with the appropriate free position
1757  *	     information
1758  *
1759  * This function returns free bytes position in the OOB area. If you want
1760  * to get all the free bytes information, then you should call
1761  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1762  *
1763  * Returns zero on success, a negative error code otherwise.
1764  */
1765 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1766 		       struct mtd_oob_region *oobfree)
1767 {
1768 	struct mtd_info *master = mtd_get_master(mtd);
1769 
1770 	memset(oobfree, 0, sizeof(*oobfree));
1771 
1772 	if (!master || section < 0)
1773 		return -EINVAL;
1774 
1775 	if (!master->ooblayout || !master->ooblayout->free)
1776 		return -ENOTSUPP;
1777 
1778 	return master->ooblayout->free(master, section, oobfree);
1779 }
1780 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1781 
1782 /**
1783  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1784  * @mtd: mtd info structure
1785  * @byte: the byte we are searching for
1786  * @sectionp: pointer where the section id will be stored
1787  * @oobregion: used to retrieve the ECC position
1788  * @iter: iterator function. Should be either mtd_ooblayout_free or
1789  *	  mtd_ooblayout_ecc depending on the region type you're searching for
1790  *
1791  * This function returns the section id and oobregion information of a
1792  * specific byte. For example, say you want to know where the 4th ECC byte is
1793  * stored, you'll use:
1794  *
1795  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1796  *
1797  * Returns zero on success, a negative error code otherwise.
1798  */
1799 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1800 				int *sectionp, struct mtd_oob_region *oobregion,
1801 				int (*iter)(struct mtd_info *,
1802 					    int section,
1803 					    struct mtd_oob_region *oobregion))
1804 {
1805 	int pos = 0, ret, section = 0;
1806 
1807 	memset(oobregion, 0, sizeof(*oobregion));
1808 
1809 	while (1) {
1810 		ret = iter(mtd, section, oobregion);
1811 		if (ret)
1812 			return ret;
1813 
1814 		if (pos + oobregion->length > byte)
1815 			break;
1816 
1817 		pos += oobregion->length;
1818 		section++;
1819 	}
1820 
1821 	/*
1822 	 * Adjust region info to make it start at the beginning at the
1823 	 * 'start' ECC byte.
1824 	 */
1825 	oobregion->offset += byte - pos;
1826 	oobregion->length -= byte - pos;
1827 	*sectionp = section;
1828 
1829 	return 0;
1830 }
1831 
1832 /**
1833  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1834  *				  ECC byte
1835  * @mtd: mtd info structure
1836  * @eccbyte: the byte we are searching for
1837  * @section: pointer where the section id will be stored
1838  * @oobregion: OOB region information
1839  *
1840  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1841  * byte.
1842  *
1843  * Returns zero on success, a negative error code otherwise.
1844  */
1845 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1846 				 int *section,
1847 				 struct mtd_oob_region *oobregion)
1848 {
1849 	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1850 					 mtd_ooblayout_ecc);
1851 }
1852 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1853 
1854 /**
1855  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1856  * @mtd: mtd info structure
1857  * @buf: destination buffer to store OOB bytes
1858  * @oobbuf: OOB buffer
1859  * @start: first byte to retrieve
1860  * @nbytes: number of bytes to retrieve
1861  * @iter: section iterator
1862  *
1863  * Extract bytes attached to a specific category (ECC or free)
1864  * from the OOB buffer and copy them into buf.
1865  *
1866  * Returns zero on success, a negative error code otherwise.
1867  */
1868 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1869 				const u8 *oobbuf, int start, int nbytes,
1870 				int (*iter)(struct mtd_info *,
1871 					    int section,
1872 					    struct mtd_oob_region *oobregion))
1873 {
1874 	struct mtd_oob_region oobregion;
1875 	int section, ret;
1876 
1877 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1878 					&oobregion, iter);
1879 
1880 	while (!ret) {
1881 		int cnt;
1882 
1883 		cnt = min_t(int, nbytes, oobregion.length);
1884 		memcpy(buf, oobbuf + oobregion.offset, cnt);
1885 		buf += cnt;
1886 		nbytes -= cnt;
1887 
1888 		if (!nbytes)
1889 			break;
1890 
1891 		ret = iter(mtd, ++section, &oobregion);
1892 	}
1893 
1894 	return ret;
1895 }
1896 
1897 /**
1898  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1899  * @mtd: mtd info structure
1900  * @buf: source buffer to get OOB bytes from
1901  * @oobbuf: OOB buffer
1902  * @start: first OOB byte to set
1903  * @nbytes: number of OOB bytes to set
1904  * @iter: section iterator
1905  *
1906  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1907  * is selected by passing the appropriate iterator.
1908  *
1909  * Returns zero on success, a negative error code otherwise.
1910  */
1911 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1912 				u8 *oobbuf, int start, int nbytes,
1913 				int (*iter)(struct mtd_info *,
1914 					    int section,
1915 					    struct mtd_oob_region *oobregion))
1916 {
1917 	struct mtd_oob_region oobregion;
1918 	int section, ret;
1919 
1920 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1921 					&oobregion, iter);
1922 
1923 	while (!ret) {
1924 		int cnt;
1925 
1926 		cnt = min_t(int, nbytes, oobregion.length);
1927 		memcpy(oobbuf + oobregion.offset, buf, cnt);
1928 		buf += cnt;
1929 		nbytes -= cnt;
1930 
1931 		if (!nbytes)
1932 			break;
1933 
1934 		ret = iter(mtd, ++section, &oobregion);
1935 	}
1936 
1937 	return ret;
1938 }
1939 
1940 /**
1941  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1942  * @mtd: mtd info structure
1943  * @iter: category iterator
1944  *
1945  * Count the number of bytes in a given category.
1946  *
1947  * Returns a positive value on success, a negative error code otherwise.
1948  */
1949 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1950 				int (*iter)(struct mtd_info *,
1951 					    int section,
1952 					    struct mtd_oob_region *oobregion))
1953 {
1954 	struct mtd_oob_region oobregion;
1955 	int section = 0, ret, nbytes = 0;
1956 
1957 	while (1) {
1958 		ret = iter(mtd, section++, &oobregion);
1959 		if (ret) {
1960 			if (ret == -ERANGE)
1961 				ret = nbytes;
1962 			break;
1963 		}
1964 
1965 		nbytes += oobregion.length;
1966 	}
1967 
1968 	return ret;
1969 }
1970 
1971 /**
1972  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1973  * @mtd: mtd info structure
1974  * @eccbuf: destination buffer to store ECC bytes
1975  * @oobbuf: OOB buffer
1976  * @start: first ECC byte to retrieve
1977  * @nbytes: number of ECC bytes to retrieve
1978  *
1979  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1980  *
1981  * Returns zero on success, a negative error code otherwise.
1982  */
1983 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1984 			       const u8 *oobbuf, int start, int nbytes)
1985 {
1986 	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1987 				       mtd_ooblayout_ecc);
1988 }
1989 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1990 
1991 /**
1992  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1993  * @mtd: mtd info structure
1994  * @eccbuf: source buffer to get ECC bytes from
1995  * @oobbuf: OOB buffer
1996  * @start: first ECC byte to set
1997  * @nbytes: number of ECC bytes to set
1998  *
1999  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2000  *
2001  * Returns zero on success, a negative error code otherwise.
2002  */
2003 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2004 			       u8 *oobbuf, int start, int nbytes)
2005 {
2006 	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2007 				       mtd_ooblayout_ecc);
2008 }
2009 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2010 
2011 /**
2012  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2013  * @mtd: mtd info structure
2014  * @databuf: destination buffer to store ECC bytes
2015  * @oobbuf: OOB buffer
2016  * @start: first ECC byte to retrieve
2017  * @nbytes: number of ECC bytes to retrieve
2018  *
2019  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2020  *
2021  * Returns zero on success, a negative error code otherwise.
2022  */
2023 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2024 				const u8 *oobbuf, int start, int nbytes)
2025 {
2026 	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2027 				       mtd_ooblayout_free);
2028 }
2029 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2030 
2031 /**
2032  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2033  * @mtd: mtd info structure
2034  * @databuf: source buffer to get data bytes from
2035  * @oobbuf: OOB buffer
2036  * @start: first ECC byte to set
2037  * @nbytes: number of ECC bytes to set
2038  *
2039  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2040  *
2041  * Returns zero on success, a negative error code otherwise.
2042  */
2043 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2044 				u8 *oobbuf, int start, int nbytes)
2045 {
2046 	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2047 				       mtd_ooblayout_free);
2048 }
2049 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2050 
2051 /**
2052  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2053  * @mtd: mtd info structure
2054  *
2055  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2056  *
2057  * Returns zero on success, a negative error code otherwise.
2058  */
2059 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2060 {
2061 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2062 }
2063 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2064 
2065 /**
2066  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2067  * @mtd: mtd info structure
2068  *
2069  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2070  *
2071  * Returns zero on success, a negative error code otherwise.
2072  */
2073 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2074 {
2075 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2076 }
2077 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2078 
2079 /*
2080  * Method to access the protection register area, present in some flash
2081  * devices. The user data is one time programmable but the factory data is read
2082  * only.
2083  */
2084 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2085 			   struct otp_info *buf)
2086 {
2087 	struct mtd_info *master = mtd_get_master(mtd);
2088 
2089 	if (!master->_get_fact_prot_info)
2090 		return -EOPNOTSUPP;
2091 	if (!len)
2092 		return 0;
2093 	return master->_get_fact_prot_info(master, len, retlen, buf);
2094 }
2095 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2096 
2097 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2098 			   size_t *retlen, u_char *buf)
2099 {
2100 	struct mtd_info *master = mtd_get_master(mtd);
2101 
2102 	*retlen = 0;
2103 	if (!master->_read_fact_prot_reg)
2104 		return -EOPNOTSUPP;
2105 	if (!len)
2106 		return 0;
2107 	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2108 }
2109 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2110 
2111 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2112 			   struct otp_info *buf)
2113 {
2114 	struct mtd_info *master = mtd_get_master(mtd);
2115 
2116 	if (!master->_get_user_prot_info)
2117 		return -EOPNOTSUPP;
2118 	if (!len)
2119 		return 0;
2120 	return master->_get_user_prot_info(master, len, retlen, buf);
2121 }
2122 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2123 
2124 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2125 			   size_t *retlen, u_char *buf)
2126 {
2127 	struct mtd_info *master = mtd_get_master(mtd);
2128 
2129 	*retlen = 0;
2130 	if (!master->_read_user_prot_reg)
2131 		return -EOPNOTSUPP;
2132 	if (!len)
2133 		return 0;
2134 	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2135 }
2136 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2137 
2138 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2139 			    size_t *retlen, const u_char *buf)
2140 {
2141 	struct mtd_info *master = mtd_get_master(mtd);
2142 	int ret;
2143 
2144 	*retlen = 0;
2145 	if (!master->_write_user_prot_reg)
2146 		return -EOPNOTSUPP;
2147 	if (!len)
2148 		return 0;
2149 	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2150 	if (ret)
2151 		return ret;
2152 
2153 	/*
2154 	 * If no data could be written at all, we are out of memory and
2155 	 * must return -ENOSPC.
2156 	 */
2157 	return (*retlen) ? 0 : -ENOSPC;
2158 }
2159 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2160 
2161 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2162 {
2163 	struct mtd_info *master = mtd_get_master(mtd);
2164 
2165 	if (!master->_lock_user_prot_reg)
2166 		return -EOPNOTSUPP;
2167 	if (!len)
2168 		return 0;
2169 	return master->_lock_user_prot_reg(master, from, len);
2170 }
2171 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2172 
2173 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2174 {
2175 	struct mtd_info *master = mtd_get_master(mtd);
2176 
2177 	if (!master->_erase_user_prot_reg)
2178 		return -EOPNOTSUPP;
2179 	if (!len)
2180 		return 0;
2181 	return master->_erase_user_prot_reg(master, from, len);
2182 }
2183 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2184 
2185 /* Chip-supported device locking */
2186 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2187 {
2188 	struct mtd_info *master = mtd_get_master(mtd);
2189 
2190 	if (!master->_lock)
2191 		return -EOPNOTSUPP;
2192 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2193 		return -EINVAL;
2194 	if (!len)
2195 		return 0;
2196 
2197 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2198 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2199 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2200 	}
2201 
2202 	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2203 }
2204 EXPORT_SYMBOL_GPL(mtd_lock);
2205 
2206 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2207 {
2208 	struct mtd_info *master = mtd_get_master(mtd);
2209 
2210 	if (!master->_unlock)
2211 		return -EOPNOTSUPP;
2212 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2213 		return -EINVAL;
2214 	if (!len)
2215 		return 0;
2216 
2217 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2218 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2219 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2220 	}
2221 
2222 	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2223 }
2224 EXPORT_SYMBOL_GPL(mtd_unlock);
2225 
2226 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2227 {
2228 	struct mtd_info *master = mtd_get_master(mtd);
2229 
2230 	if (!master->_is_locked)
2231 		return -EOPNOTSUPP;
2232 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2233 		return -EINVAL;
2234 	if (!len)
2235 		return 0;
2236 
2237 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2238 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2239 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2240 	}
2241 
2242 	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2243 }
2244 EXPORT_SYMBOL_GPL(mtd_is_locked);
2245 
2246 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2247 {
2248 	struct mtd_info *master = mtd_get_master(mtd);
2249 
2250 	if (ofs < 0 || ofs >= mtd->size)
2251 		return -EINVAL;
2252 	if (!master->_block_isreserved)
2253 		return 0;
2254 
2255 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2256 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2257 
2258 	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2259 }
2260 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2261 
2262 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2263 {
2264 	struct mtd_info *master = mtd_get_master(mtd);
2265 
2266 	if (ofs < 0 || ofs >= mtd->size)
2267 		return -EINVAL;
2268 	if (!master->_block_isbad)
2269 		return 0;
2270 
2271 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2272 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2273 
2274 	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2275 }
2276 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2277 
2278 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2279 {
2280 	struct mtd_info *master = mtd_get_master(mtd);
2281 	int ret;
2282 
2283 	if (!master->_block_markbad)
2284 		return -EOPNOTSUPP;
2285 	if (ofs < 0 || ofs >= mtd->size)
2286 		return -EINVAL;
2287 	if (!(mtd->flags & MTD_WRITEABLE))
2288 		return -EROFS;
2289 
2290 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2291 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2292 
2293 	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2294 	if (ret)
2295 		return ret;
2296 
2297 	while (mtd->parent) {
2298 		mtd->ecc_stats.badblocks++;
2299 		mtd = mtd->parent;
2300 	}
2301 
2302 	return 0;
2303 }
2304 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2305 
2306 /*
2307  * default_mtd_writev - the default writev method
2308  * @mtd: mtd device description object pointer
2309  * @vecs: the vectors to write
2310  * @count: count of vectors in @vecs
2311  * @to: the MTD device offset to write to
2312  * @retlen: on exit contains the count of bytes written to the MTD device.
2313  *
2314  * This function returns zero in case of success and a negative error code in
2315  * case of failure.
2316  */
2317 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2318 			      unsigned long count, loff_t to, size_t *retlen)
2319 {
2320 	unsigned long i;
2321 	size_t totlen = 0, thislen;
2322 	int ret = 0;
2323 
2324 	for (i = 0; i < count; i++) {
2325 		if (!vecs[i].iov_len)
2326 			continue;
2327 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2328 				vecs[i].iov_base);
2329 		totlen += thislen;
2330 		if (ret || thislen != vecs[i].iov_len)
2331 			break;
2332 		to += vecs[i].iov_len;
2333 	}
2334 	*retlen = totlen;
2335 	return ret;
2336 }
2337 
2338 /*
2339  * mtd_writev - the vector-based MTD write method
2340  * @mtd: mtd device description object pointer
2341  * @vecs: the vectors to write
2342  * @count: count of vectors in @vecs
2343  * @to: the MTD device offset to write to
2344  * @retlen: on exit contains the count of bytes written to the MTD device.
2345  *
2346  * This function returns zero in case of success and a negative error code in
2347  * case of failure.
2348  */
2349 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2350 	       unsigned long count, loff_t to, size_t *retlen)
2351 {
2352 	struct mtd_info *master = mtd_get_master(mtd);
2353 
2354 	*retlen = 0;
2355 	if (!(mtd->flags & MTD_WRITEABLE))
2356 		return -EROFS;
2357 
2358 	if (!master->_writev)
2359 		return default_mtd_writev(mtd, vecs, count, to, retlen);
2360 
2361 	return master->_writev(master, vecs, count,
2362 			       mtd_get_master_ofs(mtd, to), retlen);
2363 }
2364 EXPORT_SYMBOL_GPL(mtd_writev);
2365 
2366 /**
2367  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2368  * @mtd: mtd device description object pointer
2369  * @size: a pointer to the ideal or maximum size of the allocation, points
2370  *        to the actual allocation size on success.
2371  *
2372  * This routine attempts to allocate a contiguous kernel buffer up to
2373  * the specified size, backing off the size of the request exponentially
2374  * until the request succeeds or until the allocation size falls below
2375  * the system page size. This attempts to make sure it does not adversely
2376  * impact system performance, so when allocating more than one page, we
2377  * ask the memory allocator to avoid re-trying, swapping, writing back
2378  * or performing I/O.
2379  *
2380  * Note, this function also makes sure that the allocated buffer is aligned to
2381  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2382  *
2383  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2384  * to handle smaller (i.e. degraded) buffer allocations under low- or
2385  * fragmented-memory situations where such reduced allocations, from a
2386  * requested ideal, are allowed.
2387  *
2388  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2389  */
2390 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2391 {
2392 	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2393 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2394 	void *kbuf;
2395 
2396 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2397 
2398 	while (*size > min_alloc) {
2399 		kbuf = kmalloc(*size, flags);
2400 		if (kbuf)
2401 			return kbuf;
2402 
2403 		*size >>= 1;
2404 		*size = ALIGN(*size, mtd->writesize);
2405 	}
2406 
2407 	/*
2408 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2409 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2410 	 */
2411 	return kmalloc(*size, GFP_KERNEL);
2412 }
2413 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2414 
2415 #ifdef CONFIG_PROC_FS
2416 
2417 /*====================================================================*/
2418 /* Support for /proc/mtd */
2419 
2420 static int mtd_proc_show(struct seq_file *m, void *v)
2421 {
2422 	struct mtd_info *mtd;
2423 
2424 	seq_puts(m, "dev:    size   erasesize  name\n");
2425 	mutex_lock(&mtd_table_mutex);
2426 	mtd_for_each_device(mtd) {
2427 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2428 			   mtd->index, (unsigned long long)mtd->size,
2429 			   mtd->erasesize, mtd->name);
2430 	}
2431 	mutex_unlock(&mtd_table_mutex);
2432 	return 0;
2433 }
2434 #endif /* CONFIG_PROC_FS */
2435 
2436 /*====================================================================*/
2437 /* Init code */
2438 
2439 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2440 {
2441 	struct backing_dev_info *bdi;
2442 	int ret;
2443 
2444 	bdi = bdi_alloc(NUMA_NO_NODE);
2445 	if (!bdi)
2446 		return ERR_PTR(-ENOMEM);
2447 	bdi->ra_pages = 0;
2448 	bdi->io_pages = 0;
2449 
2450 	/*
2451 	 * We put '-0' suffix to the name to get the same name format as we
2452 	 * used to get. Since this is called only once, we get a unique name.
2453 	 */
2454 	ret = bdi_register(bdi, "%.28s-0", name);
2455 	if (ret)
2456 		bdi_put(bdi);
2457 
2458 	return ret ? ERR_PTR(ret) : bdi;
2459 }
2460 
2461 static struct proc_dir_entry *proc_mtd;
2462 
2463 static int __init init_mtd(void)
2464 {
2465 	int ret;
2466 
2467 	ret = class_register(&mtd_class);
2468 	if (ret)
2469 		goto err_reg;
2470 
2471 	mtd_bdi = mtd_bdi_init("mtd");
2472 	if (IS_ERR(mtd_bdi)) {
2473 		ret = PTR_ERR(mtd_bdi);
2474 		goto err_bdi;
2475 	}
2476 
2477 	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2478 
2479 	ret = init_mtdchar();
2480 	if (ret)
2481 		goto out_procfs;
2482 
2483 	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2484 	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2485 			    &mtd_expert_analysis_mode);
2486 
2487 	return 0;
2488 
2489 out_procfs:
2490 	if (proc_mtd)
2491 		remove_proc_entry("mtd", NULL);
2492 	bdi_unregister(mtd_bdi);
2493 	bdi_put(mtd_bdi);
2494 err_bdi:
2495 	class_unregister(&mtd_class);
2496 err_reg:
2497 	pr_err("Error registering mtd class or bdi: %d\n", ret);
2498 	return ret;
2499 }
2500 
2501 static void __exit cleanup_mtd(void)
2502 {
2503 	debugfs_remove_recursive(dfs_dir_mtd);
2504 	cleanup_mtdchar();
2505 	if (proc_mtd)
2506 		remove_proc_entry("mtd", NULL);
2507 	class_unregister(&mtd_class);
2508 	bdi_unregister(mtd_bdi);
2509 	bdi_put(mtd_bdi);
2510 	idr_destroy(&mtd_idr);
2511 }
2512 
2513 module_init(init_mtd);
2514 module_exit(cleanup_mtd);
2515 
2516 MODULE_LICENSE("GPL");
2517 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2518 MODULE_DESCRIPTION("Core MTD registration and access routines");
2519