xref: /linux/drivers/mtd/mtdcore.c (revision 3f75bfff44be0646580fe4efda45d646f9c1693b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Core registration and callback routines for MTD
4  * drivers and users.
5  *
6  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7  * Copyright © 2006      Red Hat UK Limited
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29 #include <linux/leds.h>
30 #include <linux/debugfs.h>
31 #include <linux/nvmem-provider.h>
32 #include <linux/root_dev.h>
33 #include <linux/error-injection.h>
34 
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/partitions.h>
37 
38 #include "mtdcore.h"
39 
40 struct backing_dev_info *mtd_bdi;
41 
42 #ifdef CONFIG_PM_SLEEP
43 
mtd_cls_suspend(struct device * dev)44 static int mtd_cls_suspend(struct device *dev)
45 {
46 	struct mtd_info *mtd = dev_get_drvdata(dev);
47 
48 	return mtd ? mtd_suspend(mtd) : 0;
49 }
50 
mtd_cls_resume(struct device * dev)51 static int mtd_cls_resume(struct device *dev)
52 {
53 	struct mtd_info *mtd = dev_get_drvdata(dev);
54 
55 	if (mtd)
56 		mtd_resume(mtd);
57 	return 0;
58 }
59 
60 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
61 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
62 #else
63 #define MTD_CLS_PM_OPS NULL
64 #endif
65 
66 static struct class mtd_class = {
67 	.name = "mtd",
68 	.pm = MTD_CLS_PM_OPS,
69 };
70 
71 static DEFINE_IDR(mtd_idr);
72 
73 /* These are exported solely for the purpose of mtd_blkdevs.c. You
74    should not use them for _anything_ else */
75 DEFINE_MUTEX(mtd_table_mutex);
76 EXPORT_SYMBOL_GPL(mtd_table_mutex);
77 
__mtd_next_device(int i)78 struct mtd_info *__mtd_next_device(int i)
79 {
80 	return idr_get_next(&mtd_idr, &i);
81 }
82 EXPORT_SYMBOL_GPL(__mtd_next_device);
83 
84 static LIST_HEAD(mtd_notifiers);
85 
86 
87 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
88 
89 /* REVISIT once MTD uses the driver model better, whoever allocates
90  * the mtd_info will probably want to use the release() hook...
91  */
mtd_release(struct device * dev)92 static void mtd_release(struct device *dev)
93 {
94 	struct mtd_info *mtd = dev_get_drvdata(dev);
95 	dev_t index = MTD_DEVT(mtd->index);
96 
97 	idr_remove(&mtd_idr, mtd->index);
98 	of_node_put(mtd_get_of_node(mtd));
99 
100 	if (mtd_is_partition(mtd))
101 		release_mtd_partition(mtd);
102 
103 	/* remove /dev/mtdXro node */
104 	device_destroy(&mtd_class, index + 1);
105 }
106 
mtd_device_release(struct kref * kref)107 static void mtd_device_release(struct kref *kref)
108 {
109 	struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
110 	bool is_partition = mtd_is_partition(mtd);
111 
112 	debugfs_remove_recursive(mtd->dbg.dfs_dir);
113 
114 	/* Try to remove the NVMEM provider */
115 	nvmem_unregister(mtd->nvmem);
116 
117 	device_unregister(&mtd->dev);
118 
119 	/*
120 	 *  Clear dev so mtd can be safely re-registered later if desired.
121 	 *  Should not be done for partition,
122 	 *  as it was already destroyed in device_unregister().
123 	 */
124 	if (!is_partition)
125 		memset(&mtd->dev, 0, sizeof(mtd->dev));
126 
127 	module_put(THIS_MODULE);
128 }
129 
130 #define MTD_DEVICE_ATTR_RO(name) \
131 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
132 
133 #define MTD_DEVICE_ATTR_RW(name) \
134 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
135 
mtd_type_show(struct device * dev,struct device_attribute * attr,char * buf)136 static ssize_t mtd_type_show(struct device *dev,
137 		struct device_attribute *attr, char *buf)
138 {
139 	struct mtd_info *mtd = dev_get_drvdata(dev);
140 	char *type;
141 
142 	switch (mtd->type) {
143 	case MTD_ABSENT:
144 		type = "absent";
145 		break;
146 	case MTD_RAM:
147 		type = "ram";
148 		break;
149 	case MTD_ROM:
150 		type = "rom";
151 		break;
152 	case MTD_NORFLASH:
153 		type = "nor";
154 		break;
155 	case MTD_NANDFLASH:
156 		type = "nand";
157 		break;
158 	case MTD_DATAFLASH:
159 		type = "dataflash";
160 		break;
161 	case MTD_UBIVOLUME:
162 		type = "ubi";
163 		break;
164 	case MTD_MLCNANDFLASH:
165 		type = "mlc-nand";
166 		break;
167 	default:
168 		type = "unknown";
169 	}
170 
171 	return sysfs_emit(buf, "%s\n", type);
172 }
173 MTD_DEVICE_ATTR_RO(type);
174 
mtd_flags_show(struct device * dev,struct device_attribute * attr,char * buf)175 static ssize_t mtd_flags_show(struct device *dev,
176 		struct device_attribute *attr, char *buf)
177 {
178 	struct mtd_info *mtd = dev_get_drvdata(dev);
179 
180 	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
181 }
182 MTD_DEVICE_ATTR_RO(flags);
183 
mtd_size_show(struct device * dev,struct device_attribute * attr,char * buf)184 static ssize_t mtd_size_show(struct device *dev,
185 		struct device_attribute *attr, char *buf)
186 {
187 	struct mtd_info *mtd = dev_get_drvdata(dev);
188 
189 	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
190 }
191 MTD_DEVICE_ATTR_RO(size);
192 
mtd_erasesize_show(struct device * dev,struct device_attribute * attr,char * buf)193 static ssize_t mtd_erasesize_show(struct device *dev,
194 		struct device_attribute *attr, char *buf)
195 {
196 	struct mtd_info *mtd = dev_get_drvdata(dev);
197 
198 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
199 }
200 MTD_DEVICE_ATTR_RO(erasesize);
201 
mtd_writesize_show(struct device * dev,struct device_attribute * attr,char * buf)202 static ssize_t mtd_writesize_show(struct device *dev,
203 		struct device_attribute *attr, char *buf)
204 {
205 	struct mtd_info *mtd = dev_get_drvdata(dev);
206 
207 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
208 }
209 MTD_DEVICE_ATTR_RO(writesize);
210 
mtd_subpagesize_show(struct device * dev,struct device_attribute * attr,char * buf)211 static ssize_t mtd_subpagesize_show(struct device *dev,
212 		struct device_attribute *attr, char *buf)
213 {
214 	struct mtd_info *mtd = dev_get_drvdata(dev);
215 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
216 
217 	return sysfs_emit(buf, "%u\n", subpagesize);
218 }
219 MTD_DEVICE_ATTR_RO(subpagesize);
220 
mtd_oobsize_show(struct device * dev,struct device_attribute * attr,char * buf)221 static ssize_t mtd_oobsize_show(struct device *dev,
222 		struct device_attribute *attr, char *buf)
223 {
224 	struct mtd_info *mtd = dev_get_drvdata(dev);
225 
226 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
227 }
228 MTD_DEVICE_ATTR_RO(oobsize);
229 
mtd_oobavail_show(struct device * dev,struct device_attribute * attr,char * buf)230 static ssize_t mtd_oobavail_show(struct device *dev,
231 				 struct device_attribute *attr, char *buf)
232 {
233 	struct mtd_info *mtd = dev_get_drvdata(dev);
234 
235 	return sysfs_emit(buf, "%u\n", mtd->oobavail);
236 }
237 MTD_DEVICE_ATTR_RO(oobavail);
238 
mtd_numeraseregions_show(struct device * dev,struct device_attribute * attr,char * buf)239 static ssize_t mtd_numeraseregions_show(struct device *dev,
240 		struct device_attribute *attr, char *buf)
241 {
242 	struct mtd_info *mtd = dev_get_drvdata(dev);
243 
244 	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
245 }
246 MTD_DEVICE_ATTR_RO(numeraseregions);
247 
mtd_name_show(struct device * dev,struct device_attribute * attr,char * buf)248 static ssize_t mtd_name_show(struct device *dev,
249 		struct device_attribute *attr, char *buf)
250 {
251 	struct mtd_info *mtd = dev_get_drvdata(dev);
252 
253 	return sysfs_emit(buf, "%s\n", mtd->name);
254 }
255 MTD_DEVICE_ATTR_RO(name);
256 
mtd_ecc_strength_show(struct device * dev,struct device_attribute * attr,char * buf)257 static ssize_t mtd_ecc_strength_show(struct device *dev,
258 				     struct device_attribute *attr, char *buf)
259 {
260 	struct mtd_info *mtd = dev_get_drvdata(dev);
261 
262 	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
263 }
264 MTD_DEVICE_ATTR_RO(ecc_strength);
265 
mtd_bitflip_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)266 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
267 					  struct device_attribute *attr,
268 					  char *buf)
269 {
270 	struct mtd_info *mtd = dev_get_drvdata(dev);
271 
272 	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
273 }
274 
mtd_bitflip_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)275 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
276 					   struct device_attribute *attr,
277 					   const char *buf, size_t count)
278 {
279 	struct mtd_info *mtd = dev_get_drvdata(dev);
280 	unsigned int bitflip_threshold;
281 	int retval;
282 
283 	retval = kstrtouint(buf, 0, &bitflip_threshold);
284 	if (retval)
285 		return retval;
286 
287 	mtd->bitflip_threshold = bitflip_threshold;
288 	return count;
289 }
290 MTD_DEVICE_ATTR_RW(bitflip_threshold);
291 
mtd_ecc_step_size_show(struct device * dev,struct device_attribute * attr,char * buf)292 static ssize_t mtd_ecc_step_size_show(struct device *dev,
293 		struct device_attribute *attr, char *buf)
294 {
295 	struct mtd_info *mtd = dev_get_drvdata(dev);
296 
297 	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
298 
299 }
300 MTD_DEVICE_ATTR_RO(ecc_step_size);
301 
mtd_corrected_bits_show(struct device * dev,struct device_attribute * attr,char * buf)302 static ssize_t mtd_corrected_bits_show(struct device *dev,
303 		struct device_attribute *attr, char *buf)
304 {
305 	struct mtd_info *mtd = dev_get_drvdata(dev);
306 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
307 
308 	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
309 }
310 MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
311 
mtd_ecc_failures_show(struct device * dev,struct device_attribute * attr,char * buf)312 static ssize_t mtd_ecc_failures_show(struct device *dev,
313 		struct device_attribute *attr, char *buf)
314 {
315 	struct mtd_info *mtd = dev_get_drvdata(dev);
316 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
317 
318 	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
319 }
320 MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
321 
mtd_bad_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)322 static ssize_t mtd_bad_blocks_show(struct device *dev,
323 		struct device_attribute *attr, char *buf)
324 {
325 	struct mtd_info *mtd = dev_get_drvdata(dev);
326 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
327 
328 	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
329 }
330 MTD_DEVICE_ATTR_RO(bad_blocks);
331 
mtd_bbt_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)332 static ssize_t mtd_bbt_blocks_show(struct device *dev,
333 		struct device_attribute *attr, char *buf)
334 {
335 	struct mtd_info *mtd = dev_get_drvdata(dev);
336 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
337 
338 	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
339 }
340 MTD_DEVICE_ATTR_RO(bbt_blocks);
341 
342 static struct attribute *mtd_attrs[] = {
343 	&dev_attr_type.attr,
344 	&dev_attr_flags.attr,
345 	&dev_attr_size.attr,
346 	&dev_attr_erasesize.attr,
347 	&dev_attr_writesize.attr,
348 	&dev_attr_subpagesize.attr,
349 	&dev_attr_oobsize.attr,
350 	&dev_attr_oobavail.attr,
351 	&dev_attr_numeraseregions.attr,
352 	&dev_attr_name.attr,
353 	&dev_attr_ecc_strength.attr,
354 	&dev_attr_ecc_step_size.attr,
355 	&dev_attr_corrected_bits.attr,
356 	&dev_attr_ecc_failures.attr,
357 	&dev_attr_bad_blocks.attr,
358 	&dev_attr_bbt_blocks.attr,
359 	&dev_attr_bitflip_threshold.attr,
360 	NULL,
361 };
362 ATTRIBUTE_GROUPS(mtd);
363 
364 static const struct device_type mtd_devtype = {
365 	.name		= "mtd",
366 	.groups		= mtd_groups,
367 	.release	= mtd_release,
368 };
369 
370 static bool mtd_expert_analysis_mode;
371 
372 #ifdef CONFIG_DEBUG_FS
mtd_check_expert_analysis_mode(void)373 bool mtd_check_expert_analysis_mode(void)
374 {
375 	const char *mtd_expert_analysis_warning =
376 		"Bad block checks have been entirely disabled.\n"
377 		"This is only reserved for post-mortem forensics and debug purposes.\n"
378 		"Never enable this mode if you do not know what you are doing!\n";
379 
380 	return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
381 }
382 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
383 #endif
384 
385 static struct dentry *dfs_dir_mtd;
386 
mtd_debugfs_populate(struct mtd_info * mtd)387 static void mtd_debugfs_populate(struct mtd_info *mtd)
388 {
389 	struct device *dev = &mtd->dev;
390 
391 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
392 		return;
393 
394 	mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
395 }
396 
397 #ifndef CONFIG_MMU
mtd_mmap_capabilities(struct mtd_info * mtd)398 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
399 {
400 	switch (mtd->type) {
401 	case MTD_RAM:
402 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
403 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
404 	case MTD_ROM:
405 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
406 			NOMMU_MAP_READ;
407 	default:
408 		return NOMMU_MAP_COPY;
409 	}
410 }
411 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
412 #endif
413 
mtd_reboot_notifier(struct notifier_block * n,unsigned long state,void * cmd)414 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
415 			       void *cmd)
416 {
417 	struct mtd_info *mtd;
418 
419 	mtd = container_of(n, struct mtd_info, reboot_notifier);
420 	mtd->_reboot(mtd);
421 
422 	return NOTIFY_DONE;
423 }
424 
425 /**
426  * mtd_wunit_to_pairing_info - get pairing information of a wunit
427  * @mtd: pointer to new MTD device info structure
428  * @wunit: write unit we are interested in
429  * @info: returned pairing information
430  *
431  * Retrieve pairing information associated to the wunit.
432  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
433  * paired together, and where programming a page may influence the page it is
434  * paired with.
435  * The notion of page is replaced by the term wunit (write-unit) to stay
436  * consistent with the ->writesize field.
437  *
438  * The @wunit argument can be extracted from an absolute offset using
439  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
440  * to @wunit.
441  *
442  * From the pairing info the MTD user can find all the wunits paired with
443  * @wunit using the following loop:
444  *
445  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
446  *	info.pair = i;
447  *	mtd_pairing_info_to_wunit(mtd, &info);
448  *	...
449  * }
450  */
mtd_wunit_to_pairing_info(struct mtd_info * mtd,int wunit,struct mtd_pairing_info * info)451 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
452 			      struct mtd_pairing_info *info)
453 {
454 	struct mtd_info *master = mtd_get_master(mtd);
455 	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
456 
457 	if (wunit < 0 || wunit >= npairs)
458 		return -EINVAL;
459 
460 	if (master->pairing && master->pairing->get_info)
461 		return master->pairing->get_info(master, wunit, info);
462 
463 	info->group = 0;
464 	info->pair = wunit;
465 
466 	return 0;
467 }
468 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
469 
470 /**
471  * mtd_pairing_info_to_wunit - get wunit from pairing information
472  * @mtd: pointer to new MTD device info structure
473  * @info: pairing information struct
474  *
475  * Returns a positive number representing the wunit associated to the info
476  * struct, or a negative error code.
477  *
478  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
479  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
480  * doc).
481  *
482  * It can also be used to only program the first page of each pair (i.e.
483  * page attached to group 0), which allows one to use an MLC NAND in
484  * software-emulated SLC mode:
485  *
486  * info.group = 0;
487  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
488  * for (info.pair = 0; info.pair < npairs; info.pair++) {
489  *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
490  *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
491  *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
492  * }
493  */
mtd_pairing_info_to_wunit(struct mtd_info * mtd,const struct mtd_pairing_info * info)494 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
495 			      const struct mtd_pairing_info *info)
496 {
497 	struct mtd_info *master = mtd_get_master(mtd);
498 	int ngroups = mtd_pairing_groups(master);
499 	int npairs = mtd_wunit_per_eb(master) / ngroups;
500 
501 	if (!info || info->pair < 0 || info->pair >= npairs ||
502 	    info->group < 0 || info->group >= ngroups)
503 		return -EINVAL;
504 
505 	if (master->pairing && master->pairing->get_wunit)
506 		return mtd->pairing->get_wunit(master, info);
507 
508 	return info->pair;
509 }
510 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
511 
512 /**
513  * mtd_pairing_groups - get the number of pairing groups
514  * @mtd: pointer to new MTD device info structure
515  *
516  * Returns the number of pairing groups.
517  *
518  * This number is usually equal to the number of bits exposed by a single
519  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
520  * to iterate over all pages of a given pair.
521  */
mtd_pairing_groups(struct mtd_info * mtd)522 int mtd_pairing_groups(struct mtd_info *mtd)
523 {
524 	struct mtd_info *master = mtd_get_master(mtd);
525 
526 	if (!master->pairing || !master->pairing->ngroups)
527 		return 1;
528 
529 	return master->pairing->ngroups;
530 }
531 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
532 
mtd_nvmem_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)533 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
534 			      void *val, size_t bytes)
535 {
536 	struct mtd_info *mtd = priv;
537 	size_t retlen;
538 	int err;
539 
540 	err = mtd_read(mtd, offset, bytes, &retlen, val);
541 	if (err && err != -EUCLEAN)
542 		return err;
543 
544 	return retlen == bytes ? 0 : -EIO;
545 }
546 
mtd_nvmem_add(struct mtd_info * mtd)547 static int mtd_nvmem_add(struct mtd_info *mtd)
548 {
549 	struct device_node *node = mtd_get_of_node(mtd);
550 	struct nvmem_config config = {};
551 
552 	config.id = NVMEM_DEVID_NONE;
553 	config.dev = &mtd->dev;
554 	config.name = dev_name(&mtd->dev);
555 	config.owner = THIS_MODULE;
556 	config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
557 	config.reg_read = mtd_nvmem_reg_read;
558 	config.size = mtd->size;
559 	config.word_size = 1;
560 	config.stride = 1;
561 	config.read_only = true;
562 	config.root_only = true;
563 	config.ignore_wp = true;
564 	config.priv = mtd;
565 
566 	mtd->nvmem = nvmem_register(&config);
567 	if (IS_ERR(mtd->nvmem)) {
568 		/* Just ignore if there is no NVMEM support in the kernel */
569 		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
570 			mtd->nvmem = NULL;
571 		else
572 			return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
573 					     "Failed to register NVMEM device\n");
574 	}
575 
576 	return 0;
577 }
578 
mtd_check_of_node(struct mtd_info * mtd)579 static void mtd_check_of_node(struct mtd_info *mtd)
580 {
581 	struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
582 	const char *pname, *prefix = "partition-";
583 	int plen, mtd_name_len, offset, prefix_len;
584 
585 	/* Check if MTD already has a device node */
586 	if (mtd_get_of_node(mtd))
587 		return;
588 
589 	if (!mtd_is_partition(mtd))
590 		return;
591 
592 	parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
593 	if (!parent_dn)
594 		return;
595 
596 	if (mtd_is_partition(mtd->parent))
597 		partitions = of_node_get(parent_dn);
598 	else
599 		partitions = of_get_child_by_name(parent_dn, "partitions");
600 	if (!partitions)
601 		goto exit_parent;
602 
603 	prefix_len = strlen(prefix);
604 	mtd_name_len = strlen(mtd->name);
605 
606 	/* Search if a partition is defined with the same name */
607 	for_each_child_of_node(partitions, mtd_dn) {
608 		/* Skip partition with no/wrong prefix */
609 		if (!of_node_name_prefix(mtd_dn, prefix))
610 			continue;
611 
612 		/* Label have priority. Check that first */
613 		if (!of_property_read_string(mtd_dn, "label", &pname)) {
614 			offset = 0;
615 		} else {
616 			pname = mtd_dn->name;
617 			offset = prefix_len;
618 		}
619 
620 		plen = strlen(pname) - offset;
621 		if (plen == mtd_name_len &&
622 		    !strncmp(mtd->name, pname + offset, plen)) {
623 			mtd_set_of_node(mtd, mtd_dn);
624 			of_node_put(mtd_dn);
625 			break;
626 		}
627 	}
628 
629 	of_node_put(partitions);
630 exit_parent:
631 	of_node_put(parent_dn);
632 }
633 
634 /**
635  *	add_mtd_device - register an MTD device
636  *	@mtd: pointer to new MTD device info structure
637  *
638  *	Add a device to the list of MTD devices present in the system, and
639  *	notify each currently active MTD 'user' of its arrival. Returns
640  *	zero on success or non-zero on failure.
641  */
642 
add_mtd_device(struct mtd_info * mtd)643 int add_mtd_device(struct mtd_info *mtd)
644 {
645 	struct device_node *np = mtd_get_of_node(mtd);
646 	struct mtd_info *master = mtd_get_master(mtd);
647 	struct mtd_notifier *not;
648 	int i, error, ofidx;
649 
650 	/*
651 	 * May occur, for instance, on buggy drivers which call
652 	 * mtd_device_parse_register() multiple times on the same master MTD,
653 	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
654 	 */
655 	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
656 		return -EEXIST;
657 
658 	BUG_ON(mtd->writesize == 0);
659 
660 	/*
661 	 * MTD drivers should implement ->_{write,read}() or
662 	 * ->_{write,read}_oob(), but not both.
663 	 */
664 	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
665 		    (mtd->_read && mtd->_read_oob)))
666 		return -EINVAL;
667 
668 	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
669 		    !(mtd->flags & MTD_NO_ERASE)))
670 		return -EINVAL;
671 
672 	/*
673 	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
674 	 * master is an MLC NAND and has a proper pairing scheme defined.
675 	 * We also reject masters that implement ->_writev() for now, because
676 	 * NAND controller drivers don't implement this hook, and adding the
677 	 * SLC -> MLC address/length conversion to this path is useless if we
678 	 * don't have a user.
679 	 */
680 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
681 	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
682 	     !master->pairing || master->_writev))
683 		return -EINVAL;
684 
685 	mutex_lock(&mtd_table_mutex);
686 
687 	ofidx = -1;
688 	if (np)
689 		ofidx = of_alias_get_id(np, "mtd");
690 	if (ofidx >= 0)
691 		i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
692 	else
693 		i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
694 	if (i < 0) {
695 		error = i;
696 		goto fail_locked;
697 	}
698 
699 	mtd->index = i;
700 	kref_init(&mtd->refcnt);
701 
702 	/* default value if not set by driver */
703 	if (mtd->bitflip_threshold == 0)
704 		mtd->bitflip_threshold = mtd->ecc_strength;
705 
706 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
707 		int ngroups = mtd_pairing_groups(master);
708 
709 		mtd->erasesize /= ngroups;
710 		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
711 			    mtd->erasesize;
712 	}
713 
714 	if (is_power_of_2(mtd->erasesize))
715 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
716 	else
717 		mtd->erasesize_shift = 0;
718 
719 	if (is_power_of_2(mtd->writesize))
720 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
721 	else
722 		mtd->writesize_shift = 0;
723 
724 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
725 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
726 
727 	/* Some chips always power up locked. Unlock them now */
728 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
729 		error = mtd_unlock(mtd, 0, mtd->size);
730 		if (error && error != -EOPNOTSUPP)
731 			printk(KERN_WARNING
732 			       "%s: unlock failed, writes may not work\n",
733 			       mtd->name);
734 		/* Ignore unlock failures? */
735 		error = 0;
736 	}
737 
738 	/* Caller should have set dev.parent to match the
739 	 * physical device, if appropriate.
740 	 */
741 	mtd->dev.type = &mtd_devtype;
742 	mtd->dev.class = &mtd_class;
743 	mtd->dev.devt = MTD_DEVT(i);
744 	error = dev_set_name(&mtd->dev, "mtd%d", i);
745 	if (error)
746 		goto fail_devname;
747 	dev_set_drvdata(&mtd->dev, mtd);
748 	mtd_check_of_node(mtd);
749 	of_node_get(mtd_get_of_node(mtd));
750 	error = device_register(&mtd->dev);
751 	if (error) {
752 		put_device(&mtd->dev);
753 		goto fail_added;
754 	}
755 
756 	/* Add the nvmem provider */
757 	error = mtd_nvmem_add(mtd);
758 	if (error)
759 		goto fail_nvmem_add;
760 
761 	mtd_debugfs_populate(mtd);
762 
763 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
764 		      "mtd%dro", i);
765 
766 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
767 	/* No need to get a refcount on the module containing
768 	   the notifier, since we hold the mtd_table_mutex */
769 	list_for_each_entry(not, &mtd_notifiers, list)
770 		not->add(mtd);
771 
772 	mutex_unlock(&mtd_table_mutex);
773 
774 	if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
775 		if (IS_BUILTIN(CONFIG_MTD)) {
776 			pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
777 			ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
778 		} else {
779 			pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
780 				mtd->index, mtd->name);
781 		}
782 	}
783 
784 	/* We _know_ we aren't being removed, because
785 	   our caller is still holding us here. So none
786 	   of this try_ nonsense, and no bitching about it
787 	   either. :) */
788 	__module_get(THIS_MODULE);
789 	return 0;
790 
791 fail_nvmem_add:
792 	device_unregister(&mtd->dev);
793 fail_added:
794 	of_node_put(mtd_get_of_node(mtd));
795 fail_devname:
796 	idr_remove(&mtd_idr, i);
797 fail_locked:
798 	mutex_unlock(&mtd_table_mutex);
799 	return error;
800 }
801 
802 /**
803  *	del_mtd_device - unregister an MTD device
804  *	@mtd: pointer to MTD device info structure
805  *
806  *	Remove a device from the list of MTD devices present in the system,
807  *	and notify each currently active MTD 'user' of its departure.
808  *	Returns zero on success or 1 on failure, which currently will happen
809  *	if the requested device does not appear to be present in the list.
810  */
811 
del_mtd_device(struct mtd_info * mtd)812 int del_mtd_device(struct mtd_info *mtd)
813 {
814 	int ret;
815 	struct mtd_notifier *not;
816 
817 	mutex_lock(&mtd_table_mutex);
818 
819 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
820 		ret = -ENODEV;
821 		goto out_error;
822 	}
823 
824 	/* No need to get a refcount on the module containing
825 		the notifier, since we hold the mtd_table_mutex */
826 	list_for_each_entry(not, &mtd_notifiers, list)
827 		not->remove(mtd);
828 
829 	kref_put(&mtd->refcnt, mtd_device_release);
830 	ret = 0;
831 
832 out_error:
833 	mutex_unlock(&mtd_table_mutex);
834 	return ret;
835 }
836 
837 /*
838  * Set a few defaults based on the parent devices, if not provided by the
839  * driver
840  */
mtd_set_dev_defaults(struct mtd_info * mtd)841 static void mtd_set_dev_defaults(struct mtd_info *mtd)
842 {
843 	if (mtd->dev.parent) {
844 		if (!mtd->owner && mtd->dev.parent->driver)
845 			mtd->owner = mtd->dev.parent->driver->owner;
846 		if (!mtd->name)
847 			mtd->name = dev_name(mtd->dev.parent);
848 	} else {
849 		pr_debug("mtd device won't show a device symlink in sysfs\n");
850 	}
851 
852 	INIT_LIST_HEAD(&mtd->partitions);
853 	mutex_init(&mtd->master.partitions_lock);
854 	mutex_init(&mtd->master.chrdev_lock);
855 }
856 
mtd_otp_size(struct mtd_info * mtd,bool is_user)857 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
858 {
859 	struct otp_info *info;
860 	ssize_t size = 0;
861 	unsigned int i;
862 	size_t retlen;
863 	int ret;
864 
865 	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
866 	if (!info)
867 		return -ENOMEM;
868 
869 	if (is_user)
870 		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
871 	else
872 		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
873 	if (ret)
874 		goto err;
875 
876 	for (i = 0; i < retlen / sizeof(*info); i++)
877 		size += info[i].length;
878 
879 	kfree(info);
880 	return size;
881 
882 err:
883 	kfree(info);
884 
885 	/* ENODATA means there is no OTP region. */
886 	return ret == -ENODATA ? 0 : ret;
887 }
888 
mtd_otp_nvmem_register(struct mtd_info * mtd,const char * compatible,int size,nvmem_reg_read_t reg_read)889 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
890 						   const char *compatible,
891 						   int size,
892 						   nvmem_reg_read_t reg_read)
893 {
894 	struct nvmem_device *nvmem = NULL;
895 	struct nvmem_config config = {};
896 	struct device_node *np;
897 
898 	/* DT binding is optional */
899 	np = of_get_compatible_child(mtd->dev.of_node, compatible);
900 
901 	/* OTP nvmem will be registered on the physical device */
902 	config.dev = mtd->dev.parent;
903 	config.name = compatible;
904 	config.id = NVMEM_DEVID_AUTO;
905 	config.owner = THIS_MODULE;
906 	config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
907 	config.type = NVMEM_TYPE_OTP;
908 	config.root_only = true;
909 	config.ignore_wp = true;
910 	config.reg_read = reg_read;
911 	config.size = size;
912 	config.of_node = np;
913 	config.priv = mtd;
914 
915 	nvmem = nvmem_register(&config);
916 	/* Just ignore if there is no NVMEM support in the kernel */
917 	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
918 		nvmem = NULL;
919 
920 	of_node_put(np);
921 
922 	return nvmem;
923 }
924 
mtd_nvmem_user_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)925 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
926 				       void *val, size_t bytes)
927 {
928 	struct mtd_info *mtd = priv;
929 	size_t retlen;
930 	int ret;
931 
932 	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
933 	if (ret)
934 		return ret;
935 
936 	return retlen == bytes ? 0 : -EIO;
937 }
938 
mtd_nvmem_fact_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)939 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
940 				       void *val, size_t bytes)
941 {
942 	struct mtd_info *mtd = priv;
943 	size_t retlen;
944 	int ret;
945 
946 	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
947 	if (ret)
948 		return ret;
949 
950 	return retlen == bytes ? 0 : -EIO;
951 }
952 
mtd_otp_nvmem_add(struct mtd_info * mtd)953 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
954 {
955 	struct device *dev = mtd->dev.parent;
956 	struct nvmem_device *nvmem;
957 	ssize_t size;
958 	int err;
959 
960 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
961 		size = mtd_otp_size(mtd, true);
962 		if (size < 0) {
963 			err = size;
964 			goto err;
965 		}
966 
967 		if (size > 0) {
968 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
969 						       mtd_nvmem_user_otp_reg_read);
970 			if (IS_ERR(nvmem)) {
971 				err = PTR_ERR(nvmem);
972 				goto err;
973 			}
974 			mtd->otp_user_nvmem = nvmem;
975 		}
976 	}
977 
978 	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
979 		size = mtd_otp_size(mtd, false);
980 		if (size < 0) {
981 			err = size;
982 			goto err;
983 		}
984 
985 		if (size > 0) {
986 			/*
987 			 * The factory OTP contains thing such as a unique serial
988 			 * number and is small, so let's read it out and put it
989 			 * into the entropy pool.
990 			 */
991 			void *otp;
992 
993 			otp = kmalloc(size, GFP_KERNEL);
994 			if (!otp) {
995 				err = -ENOMEM;
996 				goto err;
997 			}
998 			err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
999 			if (err < 0) {
1000 				kfree(otp);
1001 				goto err;
1002 			}
1003 			add_device_randomness(otp, err);
1004 			kfree(otp);
1005 
1006 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
1007 						       mtd_nvmem_fact_otp_reg_read);
1008 			if (IS_ERR(nvmem)) {
1009 				err = PTR_ERR(nvmem);
1010 				goto err;
1011 			}
1012 			mtd->otp_factory_nvmem = nvmem;
1013 		}
1014 	}
1015 
1016 	return 0;
1017 
1018 err:
1019 	nvmem_unregister(mtd->otp_user_nvmem);
1020 	/* Don't report error if OTP is not supported. */
1021 	if (err == -EOPNOTSUPP)
1022 		return 0;
1023 	return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
1024 }
1025 
1026 /**
1027  * mtd_device_parse_register - parse partitions and register an MTD device.
1028  *
1029  * @mtd: the MTD device to register
1030  * @types: the list of MTD partition probes to try, see
1031  *         'parse_mtd_partitions()' for more information
1032  * @parser_data: MTD partition parser-specific data
1033  * @parts: fallback partition information to register, if parsing fails;
1034  *         only valid if %nr_parts > %0
1035  * @nr_parts: the number of partitions in parts, if zero then the full
1036  *            MTD device is registered if no partition info is found
1037  *
1038  * This function aggregates MTD partitions parsing (done by
1039  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1040  * basically follows the most common pattern found in many MTD drivers:
1041  *
1042  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1043  *   registered first.
1044  * * Then It tries to probe partitions on MTD device @mtd using parsers
1045  *   specified in @types (if @types is %NULL, then the default list of parsers
1046  *   is used, see 'parse_mtd_partitions()' for more information). If none are
1047  *   found this functions tries to fallback to information specified in
1048  *   @parts/@nr_parts.
1049  * * If no partitions were found this function just registers the MTD device
1050  *   @mtd and exits.
1051  *
1052  * Returns zero in case of success and a negative error code in case of failure.
1053  */
mtd_device_parse_register(struct mtd_info * mtd,const char * const * types,struct mtd_part_parser_data * parser_data,const struct mtd_partition * parts,int nr_parts)1054 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1055 			      struct mtd_part_parser_data *parser_data,
1056 			      const struct mtd_partition *parts,
1057 			      int nr_parts)
1058 {
1059 	int ret, err;
1060 
1061 	mtd_set_dev_defaults(mtd);
1062 
1063 	ret = mtd_otp_nvmem_add(mtd);
1064 	if (ret)
1065 		goto out;
1066 
1067 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1068 		ret = add_mtd_device(mtd);
1069 		if (ret)
1070 			goto out;
1071 	}
1072 
1073 	/* Prefer parsed partitions over driver-provided fallback */
1074 	ret = parse_mtd_partitions(mtd, types, parser_data);
1075 	if (ret == -EPROBE_DEFER)
1076 		goto out;
1077 
1078 	if (ret > 0)
1079 		ret = 0;
1080 	else if (nr_parts)
1081 		ret = add_mtd_partitions(mtd, parts, nr_parts);
1082 	else if (!device_is_registered(&mtd->dev))
1083 		ret = add_mtd_device(mtd);
1084 	else
1085 		ret = 0;
1086 
1087 	if (ret)
1088 		goto out;
1089 
1090 	/*
1091 	 * FIXME: some drivers unfortunately call this function more than once.
1092 	 * So we have to check if we've already assigned the reboot notifier.
1093 	 *
1094 	 * Generally, we can make multiple calls work for most cases, but it
1095 	 * does cause problems with parse_mtd_partitions() above (e.g.,
1096 	 * cmdlineparts will register partitions more than once).
1097 	 */
1098 	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1099 		  "MTD already registered\n");
1100 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1101 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1102 		register_reboot_notifier(&mtd->reboot_notifier);
1103 	}
1104 
1105 out:
1106 	if (ret) {
1107 		nvmem_unregister(mtd->otp_user_nvmem);
1108 		nvmem_unregister(mtd->otp_factory_nvmem);
1109 	}
1110 
1111 	if (ret && device_is_registered(&mtd->dev)) {
1112 		err = del_mtd_device(mtd);
1113 		if (err)
1114 			pr_err("Error when deleting MTD device (%d)\n", err);
1115 	}
1116 
1117 	return ret;
1118 }
1119 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1120 
1121 /**
1122  * mtd_device_unregister - unregister an existing MTD device.
1123  *
1124  * @master: the MTD device to unregister.  This will unregister both the master
1125  *          and any partitions if registered.
1126  */
mtd_device_unregister(struct mtd_info * master)1127 int mtd_device_unregister(struct mtd_info *master)
1128 {
1129 	int err;
1130 
1131 	if (master->_reboot) {
1132 		unregister_reboot_notifier(&master->reboot_notifier);
1133 		memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1134 	}
1135 
1136 	nvmem_unregister(master->otp_user_nvmem);
1137 	nvmem_unregister(master->otp_factory_nvmem);
1138 
1139 	err = del_mtd_partitions(master);
1140 	if (err)
1141 		return err;
1142 
1143 	if (!device_is_registered(&master->dev))
1144 		return 0;
1145 
1146 	return del_mtd_device(master);
1147 }
1148 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1149 
1150 /**
1151  *	register_mtd_user - register a 'user' of MTD devices.
1152  *	@new: pointer to notifier info structure
1153  *
1154  *	Registers a pair of callbacks function to be called upon addition
1155  *	or removal of MTD devices. Causes the 'add' callback to be immediately
1156  *	invoked for each MTD device currently present in the system.
1157  */
register_mtd_user(struct mtd_notifier * new)1158 void register_mtd_user (struct mtd_notifier *new)
1159 {
1160 	struct mtd_info *mtd;
1161 
1162 	mutex_lock(&mtd_table_mutex);
1163 
1164 	list_add(&new->list, &mtd_notifiers);
1165 
1166 	__module_get(THIS_MODULE);
1167 
1168 	mtd_for_each_device(mtd)
1169 		new->add(mtd);
1170 
1171 	mutex_unlock(&mtd_table_mutex);
1172 }
1173 EXPORT_SYMBOL_GPL(register_mtd_user);
1174 
1175 /**
1176  *	unregister_mtd_user - unregister a 'user' of MTD devices.
1177  *	@old: pointer to notifier info structure
1178  *
1179  *	Removes a callback function pair from the list of 'users' to be
1180  *	notified upon addition or removal of MTD devices. Causes the
1181  *	'remove' callback to be immediately invoked for each MTD device
1182  *	currently present in the system.
1183  */
unregister_mtd_user(struct mtd_notifier * old)1184 int unregister_mtd_user (struct mtd_notifier *old)
1185 {
1186 	struct mtd_info *mtd;
1187 
1188 	mutex_lock(&mtd_table_mutex);
1189 
1190 	module_put(THIS_MODULE);
1191 
1192 	mtd_for_each_device(mtd)
1193 		old->remove(mtd);
1194 
1195 	list_del(&old->list);
1196 	mutex_unlock(&mtd_table_mutex);
1197 	return 0;
1198 }
1199 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1200 
1201 /**
1202  *	get_mtd_device - obtain a validated handle for an MTD device
1203  *	@mtd: last known address of the required MTD device
1204  *	@num: internal device number of the required MTD device
1205  *
1206  *	Given a number and NULL address, return the num'th entry in the device
1207  *	table, if any.	Given an address and num == -1, search the device table
1208  *	for a device with that address and return if it's still present. Given
1209  *	both, return the num'th driver only if its address matches. Return
1210  *	error code if not.
1211  */
get_mtd_device(struct mtd_info * mtd,int num)1212 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1213 {
1214 	struct mtd_info *ret = NULL, *other;
1215 	int err = -ENODEV;
1216 
1217 	mutex_lock(&mtd_table_mutex);
1218 
1219 	if (num == -1) {
1220 		mtd_for_each_device(other) {
1221 			if (other == mtd) {
1222 				ret = mtd;
1223 				break;
1224 			}
1225 		}
1226 	} else if (num >= 0) {
1227 		ret = idr_find(&mtd_idr, num);
1228 		if (mtd && mtd != ret)
1229 			ret = NULL;
1230 	}
1231 
1232 	if (!ret) {
1233 		ret = ERR_PTR(err);
1234 		goto out;
1235 	}
1236 
1237 	err = __get_mtd_device(ret);
1238 	if (err)
1239 		ret = ERR_PTR(err);
1240 out:
1241 	mutex_unlock(&mtd_table_mutex);
1242 	return ret;
1243 }
1244 EXPORT_SYMBOL_GPL(get_mtd_device);
1245 
1246 
__get_mtd_device(struct mtd_info * mtd)1247 int __get_mtd_device(struct mtd_info *mtd)
1248 {
1249 	struct mtd_info *master = mtd_get_master(mtd);
1250 	int err;
1251 
1252 	if (master->_get_device) {
1253 		err = master->_get_device(mtd);
1254 		if (err)
1255 			return err;
1256 	}
1257 
1258 	if (!try_module_get(master->owner)) {
1259 		if (master->_put_device)
1260 			master->_put_device(master);
1261 		return -ENODEV;
1262 	}
1263 
1264 	while (mtd) {
1265 		if (mtd != master)
1266 			kref_get(&mtd->refcnt);
1267 		mtd = mtd->parent;
1268 	}
1269 
1270 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1271 		kref_get(&master->refcnt);
1272 
1273 	return 0;
1274 }
1275 EXPORT_SYMBOL_GPL(__get_mtd_device);
1276 
1277 /**
1278  * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1279  *
1280  * @np: device tree node
1281  */
of_get_mtd_device_by_node(struct device_node * np)1282 struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1283 {
1284 	struct mtd_info *mtd = NULL;
1285 	struct mtd_info *tmp;
1286 	int err;
1287 
1288 	mutex_lock(&mtd_table_mutex);
1289 
1290 	err = -EPROBE_DEFER;
1291 	mtd_for_each_device(tmp) {
1292 		if (mtd_get_of_node(tmp) == np) {
1293 			mtd = tmp;
1294 			err = __get_mtd_device(mtd);
1295 			break;
1296 		}
1297 	}
1298 
1299 	mutex_unlock(&mtd_table_mutex);
1300 
1301 	return err ? ERR_PTR(err) : mtd;
1302 }
1303 EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1304 
1305 /**
1306  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1307  *	device name
1308  *	@name: MTD device name to open
1309  *
1310  * 	This function returns MTD device description structure in case of
1311  * 	success and an error code in case of failure.
1312  */
get_mtd_device_nm(const char * name)1313 struct mtd_info *get_mtd_device_nm(const char *name)
1314 {
1315 	int err = -ENODEV;
1316 	struct mtd_info *mtd = NULL, *other;
1317 
1318 	mutex_lock(&mtd_table_mutex);
1319 
1320 	mtd_for_each_device(other) {
1321 		if (!strcmp(name, other->name)) {
1322 			mtd = other;
1323 			break;
1324 		}
1325 	}
1326 
1327 	if (!mtd)
1328 		goto out_unlock;
1329 
1330 	err = __get_mtd_device(mtd);
1331 	if (err)
1332 		goto out_unlock;
1333 
1334 	mutex_unlock(&mtd_table_mutex);
1335 	return mtd;
1336 
1337 out_unlock:
1338 	mutex_unlock(&mtd_table_mutex);
1339 	return ERR_PTR(err);
1340 }
1341 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1342 
put_mtd_device(struct mtd_info * mtd)1343 void put_mtd_device(struct mtd_info *mtd)
1344 {
1345 	mutex_lock(&mtd_table_mutex);
1346 	__put_mtd_device(mtd);
1347 	mutex_unlock(&mtd_table_mutex);
1348 
1349 }
1350 EXPORT_SYMBOL_GPL(put_mtd_device);
1351 
__put_mtd_device(struct mtd_info * mtd)1352 void __put_mtd_device(struct mtd_info *mtd)
1353 {
1354 	struct mtd_info *master = mtd_get_master(mtd);
1355 
1356 	while (mtd) {
1357 		/* kref_put() can relese mtd, so keep a reference mtd->parent */
1358 		struct mtd_info *parent = mtd->parent;
1359 
1360 		if (mtd != master)
1361 			kref_put(&mtd->refcnt, mtd_device_release);
1362 		mtd = parent;
1363 	}
1364 
1365 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1366 		kref_put(&master->refcnt, mtd_device_release);
1367 
1368 	module_put(master->owner);
1369 
1370 	/* must be the last as master can be freed in the _put_device */
1371 	if (master->_put_device)
1372 		master->_put_device(master);
1373 }
1374 EXPORT_SYMBOL_GPL(__put_mtd_device);
1375 
1376 /*
1377  * Erase is an synchronous operation. Device drivers are epected to return a
1378  * negative error code if the operation failed and update instr->fail_addr
1379  * to point the portion that was not properly erased.
1380  */
mtd_erase(struct mtd_info * mtd,struct erase_info * instr)1381 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1382 {
1383 	struct mtd_info *master = mtd_get_master(mtd);
1384 	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1385 	struct erase_info adjinstr;
1386 	int ret;
1387 
1388 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1389 	adjinstr = *instr;
1390 
1391 	if (!mtd->erasesize || !master->_erase)
1392 		return -ENOTSUPP;
1393 
1394 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1395 		return -EINVAL;
1396 	if (!(mtd->flags & MTD_WRITEABLE))
1397 		return -EROFS;
1398 
1399 	if (!instr->len)
1400 		return 0;
1401 
1402 	ledtrig_mtd_activity();
1403 
1404 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1405 		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1406 				master->erasesize;
1407 		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1408 				master->erasesize) -
1409 			       adjinstr.addr;
1410 	}
1411 
1412 	adjinstr.addr += mst_ofs;
1413 
1414 	ret = master->_erase(master, &adjinstr);
1415 
1416 	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1417 		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1418 		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1419 			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1420 							 master);
1421 			instr->fail_addr *= mtd->erasesize;
1422 		}
1423 	}
1424 
1425 	return ret;
1426 }
1427 EXPORT_SYMBOL_GPL(mtd_erase);
1428 ALLOW_ERROR_INJECTION(mtd_erase, ERRNO);
1429 
1430 /*
1431  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1432  */
mtd_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1433 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1434 	      void **virt, resource_size_t *phys)
1435 {
1436 	struct mtd_info *master = mtd_get_master(mtd);
1437 
1438 	*retlen = 0;
1439 	*virt = NULL;
1440 	if (phys)
1441 		*phys = 0;
1442 	if (!master->_point)
1443 		return -EOPNOTSUPP;
1444 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1445 		return -EINVAL;
1446 	if (!len)
1447 		return 0;
1448 
1449 	from = mtd_get_master_ofs(mtd, from);
1450 	return master->_point(master, from, len, retlen, virt, phys);
1451 }
1452 EXPORT_SYMBOL_GPL(mtd_point);
1453 
1454 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
mtd_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1455 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1456 {
1457 	struct mtd_info *master = mtd_get_master(mtd);
1458 
1459 	if (!master->_unpoint)
1460 		return -EOPNOTSUPP;
1461 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1462 		return -EINVAL;
1463 	if (!len)
1464 		return 0;
1465 	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1466 }
1467 EXPORT_SYMBOL_GPL(mtd_unpoint);
1468 
1469 /*
1470  * Allow NOMMU mmap() to directly map the device (if not NULL)
1471  * - return the address to which the offset maps
1472  * - return -ENOSYS to indicate refusal to do the mapping
1473  */
mtd_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)1474 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1475 				    unsigned long offset, unsigned long flags)
1476 {
1477 	size_t retlen;
1478 	void *virt;
1479 	int ret;
1480 
1481 	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1482 	if (ret)
1483 		return ret;
1484 	if (retlen != len) {
1485 		mtd_unpoint(mtd, offset, retlen);
1486 		return -ENOSYS;
1487 	}
1488 	return (unsigned long)virt;
1489 }
1490 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1491 
mtd_update_ecc_stats(struct mtd_info * mtd,struct mtd_info * master,const struct mtd_ecc_stats * old_stats)1492 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1493 				 const struct mtd_ecc_stats *old_stats)
1494 {
1495 	struct mtd_ecc_stats diff;
1496 
1497 	if (master == mtd)
1498 		return;
1499 
1500 	diff = master->ecc_stats;
1501 	diff.failed -= old_stats->failed;
1502 	diff.corrected -= old_stats->corrected;
1503 
1504 	while (mtd->parent) {
1505 		mtd->ecc_stats.failed += diff.failed;
1506 		mtd->ecc_stats.corrected += diff.corrected;
1507 		mtd = mtd->parent;
1508 	}
1509 }
1510 
mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1511 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1512 	     u_char *buf)
1513 {
1514 	struct mtd_oob_ops ops = {
1515 		.len = len,
1516 		.datbuf = buf,
1517 	};
1518 	int ret;
1519 
1520 	ret = mtd_read_oob(mtd, from, &ops);
1521 	*retlen = ops.retlen;
1522 
1523 	WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret));
1524 
1525 	return ret;
1526 }
1527 EXPORT_SYMBOL_GPL(mtd_read);
1528 ALLOW_ERROR_INJECTION(mtd_read, ERRNO);
1529 
mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1530 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1531 	      const u_char *buf)
1532 {
1533 	struct mtd_oob_ops ops = {
1534 		.len = len,
1535 		.datbuf = (u8 *)buf,
1536 	};
1537 	int ret;
1538 
1539 	ret = mtd_write_oob(mtd, to, &ops);
1540 	*retlen = ops.retlen;
1541 
1542 	return ret;
1543 }
1544 EXPORT_SYMBOL_GPL(mtd_write);
1545 ALLOW_ERROR_INJECTION(mtd_write, ERRNO);
1546 
1547 /*
1548  * In blackbox flight recorder like scenarios we want to make successful writes
1549  * in interrupt context. panic_write() is only intended to be called when its
1550  * known the kernel is about to panic and we need the write to succeed. Since
1551  * the kernel is not going to be running for much longer, this function can
1552  * break locks and delay to ensure the write succeeds (but not sleep).
1553  */
mtd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1554 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1555 		    const u_char *buf)
1556 {
1557 	struct mtd_info *master = mtd_get_master(mtd);
1558 
1559 	*retlen = 0;
1560 	if (!master->_panic_write)
1561 		return -EOPNOTSUPP;
1562 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1563 		return -EINVAL;
1564 	if (!(mtd->flags & MTD_WRITEABLE))
1565 		return -EROFS;
1566 	if (!len)
1567 		return 0;
1568 	if (!master->oops_panic_write)
1569 		master->oops_panic_write = true;
1570 
1571 	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1572 				    retlen, buf);
1573 }
1574 EXPORT_SYMBOL_GPL(mtd_panic_write);
1575 
mtd_check_oob_ops(struct mtd_info * mtd,loff_t offs,struct mtd_oob_ops * ops)1576 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1577 			     struct mtd_oob_ops *ops)
1578 {
1579 	/*
1580 	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1581 	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1582 	 *  this case.
1583 	 */
1584 	if (!ops->datbuf)
1585 		ops->len = 0;
1586 
1587 	if (!ops->oobbuf)
1588 		ops->ooblen = 0;
1589 
1590 	if (offs < 0 || offs + ops->len > mtd->size)
1591 		return -EINVAL;
1592 
1593 	if (ops->ooblen) {
1594 		size_t maxooblen;
1595 
1596 		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1597 			return -EINVAL;
1598 
1599 		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1600 				      mtd_div_by_ws(offs, mtd)) *
1601 			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1602 		if (ops->ooblen > maxooblen)
1603 			return -EINVAL;
1604 	}
1605 
1606 	return 0;
1607 }
1608 
mtd_read_oob_std(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1609 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1610 			    struct mtd_oob_ops *ops)
1611 {
1612 	struct mtd_info *master = mtd_get_master(mtd);
1613 	int ret;
1614 
1615 	from = mtd_get_master_ofs(mtd, from);
1616 	if (master->_read_oob)
1617 		ret = master->_read_oob(master, from, ops);
1618 	else
1619 		ret = master->_read(master, from, ops->len, &ops->retlen,
1620 				    ops->datbuf);
1621 
1622 	return ret;
1623 }
1624 
mtd_write_oob_std(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1625 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1626 			     struct mtd_oob_ops *ops)
1627 {
1628 	struct mtd_info *master = mtd_get_master(mtd);
1629 	int ret;
1630 
1631 	to = mtd_get_master_ofs(mtd, to);
1632 	if (master->_write_oob)
1633 		ret = master->_write_oob(master, to, ops);
1634 	else
1635 		ret = master->_write(master, to, ops->len, &ops->retlen,
1636 				     ops->datbuf);
1637 
1638 	return ret;
1639 }
1640 
mtd_io_emulated_slc(struct mtd_info * mtd,loff_t start,bool read,struct mtd_oob_ops * ops)1641 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1642 			       struct mtd_oob_ops *ops)
1643 {
1644 	struct mtd_info *master = mtd_get_master(mtd);
1645 	int ngroups = mtd_pairing_groups(master);
1646 	int npairs = mtd_wunit_per_eb(master) / ngroups;
1647 	struct mtd_oob_ops adjops = *ops;
1648 	unsigned int wunit, oobavail;
1649 	struct mtd_pairing_info info;
1650 	int max_bitflips = 0;
1651 	u32 ebofs, pageofs;
1652 	loff_t base, pos;
1653 
1654 	ebofs = mtd_mod_by_eb(start, mtd);
1655 	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1656 	info.group = 0;
1657 	info.pair = mtd_div_by_ws(ebofs, mtd);
1658 	pageofs = mtd_mod_by_ws(ebofs, mtd);
1659 	oobavail = mtd_oobavail(mtd, ops);
1660 
1661 	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1662 		int ret;
1663 
1664 		if (info.pair >= npairs) {
1665 			info.pair = 0;
1666 			base += master->erasesize;
1667 		}
1668 
1669 		wunit = mtd_pairing_info_to_wunit(master, &info);
1670 		pos = mtd_wunit_to_offset(mtd, base, wunit);
1671 
1672 		adjops.len = ops->len - ops->retlen;
1673 		if (adjops.len > mtd->writesize - pageofs)
1674 			adjops.len = mtd->writesize - pageofs;
1675 
1676 		adjops.ooblen = ops->ooblen - ops->oobretlen;
1677 		if (adjops.ooblen > oobavail - adjops.ooboffs)
1678 			adjops.ooblen = oobavail - adjops.ooboffs;
1679 
1680 		if (read) {
1681 			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1682 			if (ret > 0)
1683 				max_bitflips = max(max_bitflips, ret);
1684 		} else {
1685 			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1686 		}
1687 
1688 		if (ret < 0)
1689 			return ret;
1690 
1691 		max_bitflips = max(max_bitflips, ret);
1692 		ops->retlen += adjops.retlen;
1693 		ops->oobretlen += adjops.oobretlen;
1694 		adjops.datbuf += adjops.retlen;
1695 		adjops.oobbuf += adjops.oobretlen;
1696 		adjops.ooboffs = 0;
1697 		pageofs = 0;
1698 		info.pair++;
1699 	}
1700 
1701 	return max_bitflips;
1702 }
1703 
mtd_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1704 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1705 {
1706 	struct mtd_info *master = mtd_get_master(mtd);
1707 	struct mtd_ecc_stats old_stats = master->ecc_stats;
1708 	int ret_code;
1709 
1710 	ops->retlen = ops->oobretlen = 0;
1711 
1712 	ret_code = mtd_check_oob_ops(mtd, from, ops);
1713 	if (ret_code)
1714 		return ret_code;
1715 
1716 	ledtrig_mtd_activity();
1717 
1718 	/* Check the validity of a potential fallback on mtd->_read */
1719 	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1720 		return -EOPNOTSUPP;
1721 
1722 	if (ops->stats)
1723 		memset(ops->stats, 0, sizeof(*ops->stats));
1724 
1725 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1726 		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1727 	else
1728 		ret_code = mtd_read_oob_std(mtd, from, ops);
1729 
1730 	mtd_update_ecc_stats(mtd, master, &old_stats);
1731 
1732 	/*
1733 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1734 	 * similar to mtd->_read(), returning a non-negative integer
1735 	 * representing max bitflips. In other cases, mtd->_read_oob() may
1736 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1737 	 */
1738 	if (unlikely(ret_code < 0))
1739 		return ret_code;
1740 	if (mtd->ecc_strength == 0)
1741 		return 0;	/* device lacks ecc */
1742 	if (ops->stats)
1743 		ops->stats->max_bitflips = ret_code;
1744 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1745 }
1746 EXPORT_SYMBOL_GPL(mtd_read_oob);
1747 
mtd_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1748 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1749 				struct mtd_oob_ops *ops)
1750 {
1751 	struct mtd_info *master = mtd_get_master(mtd);
1752 	int ret;
1753 
1754 	ops->retlen = ops->oobretlen = 0;
1755 
1756 	if (!(mtd->flags & MTD_WRITEABLE))
1757 		return -EROFS;
1758 
1759 	ret = mtd_check_oob_ops(mtd, to, ops);
1760 	if (ret)
1761 		return ret;
1762 
1763 	ledtrig_mtd_activity();
1764 
1765 	/* Check the validity of a potential fallback on mtd->_write */
1766 	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1767 		return -EOPNOTSUPP;
1768 
1769 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1770 		return mtd_io_emulated_slc(mtd, to, false, ops);
1771 
1772 	return mtd_write_oob_std(mtd, to, ops);
1773 }
1774 EXPORT_SYMBOL_GPL(mtd_write_oob);
1775 
1776 /**
1777  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1778  * @mtd: MTD device structure
1779  * @section: ECC section. Depending on the layout you may have all the ECC
1780  *	     bytes stored in a single contiguous section, or one section
1781  *	     per ECC chunk (and sometime several sections for a single ECC
1782  *	     ECC chunk)
1783  * @oobecc: OOB region struct filled with the appropriate ECC position
1784  *	    information
1785  *
1786  * This function returns ECC section information in the OOB area. If you want
1787  * to get all the ECC bytes information, then you should call
1788  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1789  *
1790  * Returns zero on success, a negative error code otherwise.
1791  */
mtd_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobecc)1792 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1793 		      struct mtd_oob_region *oobecc)
1794 {
1795 	struct mtd_info *master = mtd_get_master(mtd);
1796 
1797 	memset(oobecc, 0, sizeof(*oobecc));
1798 
1799 	if (!master || section < 0)
1800 		return -EINVAL;
1801 
1802 	if (!master->ooblayout || !master->ooblayout->ecc)
1803 		return -ENOTSUPP;
1804 
1805 	return master->ooblayout->ecc(master, section, oobecc);
1806 }
1807 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1808 
1809 /**
1810  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1811  *			section
1812  * @mtd: MTD device structure
1813  * @section: Free section you are interested in. Depending on the layout
1814  *	     you may have all the free bytes stored in a single contiguous
1815  *	     section, or one section per ECC chunk plus an extra section
1816  *	     for the remaining bytes (or other funky layout).
1817  * @oobfree: OOB region struct filled with the appropriate free position
1818  *	     information
1819  *
1820  * This function returns free bytes position in the OOB area. If you want
1821  * to get all the free bytes information, then you should call
1822  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1823  *
1824  * Returns zero on success, a negative error code otherwise.
1825  */
mtd_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobfree)1826 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1827 		       struct mtd_oob_region *oobfree)
1828 {
1829 	struct mtd_info *master = mtd_get_master(mtd);
1830 
1831 	memset(oobfree, 0, sizeof(*oobfree));
1832 
1833 	if (!master || section < 0)
1834 		return -EINVAL;
1835 
1836 	if (!master->ooblayout || !master->ooblayout->free)
1837 		return -ENOTSUPP;
1838 
1839 	return master->ooblayout->free(master, section, oobfree);
1840 }
1841 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1842 
1843 /**
1844  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1845  * @mtd: mtd info structure
1846  * @byte: the byte we are searching for
1847  * @sectionp: pointer where the section id will be stored
1848  * @oobregion: used to retrieve the ECC position
1849  * @iter: iterator function. Should be either mtd_ooblayout_free or
1850  *	  mtd_ooblayout_ecc depending on the region type you're searching for
1851  *
1852  * This function returns the section id and oobregion information of a
1853  * specific byte. For example, say you want to know where the 4th ECC byte is
1854  * stored, you'll use:
1855  *
1856  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1857  *
1858  * Returns zero on success, a negative error code otherwise.
1859  */
mtd_ooblayout_find_region(struct mtd_info * mtd,int byte,int * sectionp,struct mtd_oob_region * oobregion,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1860 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1861 				int *sectionp, struct mtd_oob_region *oobregion,
1862 				int (*iter)(struct mtd_info *,
1863 					    int section,
1864 					    struct mtd_oob_region *oobregion))
1865 {
1866 	int pos = 0, ret, section = 0;
1867 
1868 	memset(oobregion, 0, sizeof(*oobregion));
1869 
1870 	while (1) {
1871 		ret = iter(mtd, section, oobregion);
1872 		if (ret)
1873 			return ret;
1874 
1875 		if (pos + oobregion->length > byte)
1876 			break;
1877 
1878 		pos += oobregion->length;
1879 		section++;
1880 	}
1881 
1882 	/*
1883 	 * Adjust region info to make it start at the beginning at the
1884 	 * 'start' ECC byte.
1885 	 */
1886 	oobregion->offset += byte - pos;
1887 	oobregion->length -= byte - pos;
1888 	*sectionp = section;
1889 
1890 	return 0;
1891 }
1892 
1893 /**
1894  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1895  *				  ECC byte
1896  * @mtd: mtd info structure
1897  * @eccbyte: the byte we are searching for
1898  * @section: pointer where the section id will be stored
1899  * @oobregion: OOB region information
1900  *
1901  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1902  * byte.
1903  *
1904  * Returns zero on success, a negative error code otherwise.
1905  */
mtd_ooblayout_find_eccregion(struct mtd_info * mtd,int eccbyte,int * section,struct mtd_oob_region * oobregion)1906 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1907 				 int *section,
1908 				 struct mtd_oob_region *oobregion)
1909 {
1910 	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1911 					 mtd_ooblayout_ecc);
1912 }
1913 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1914 
1915 /**
1916  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1917  * @mtd: mtd info structure
1918  * @buf: destination buffer to store OOB bytes
1919  * @oobbuf: OOB buffer
1920  * @start: first byte to retrieve
1921  * @nbytes: number of bytes to retrieve
1922  * @iter: section iterator
1923  *
1924  * Extract bytes attached to a specific category (ECC or free)
1925  * from the OOB buffer and copy them into buf.
1926  *
1927  * Returns zero on success, a negative error code otherwise.
1928  */
mtd_ooblayout_get_bytes(struct mtd_info * mtd,u8 * buf,const u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1929 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1930 				const u8 *oobbuf, int start, int nbytes,
1931 				int (*iter)(struct mtd_info *,
1932 					    int section,
1933 					    struct mtd_oob_region *oobregion))
1934 {
1935 	struct mtd_oob_region oobregion;
1936 	int section, ret;
1937 
1938 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1939 					&oobregion, iter);
1940 
1941 	while (!ret) {
1942 		int cnt;
1943 
1944 		cnt = min_t(int, nbytes, oobregion.length);
1945 		memcpy(buf, oobbuf + oobregion.offset, cnt);
1946 		buf += cnt;
1947 		nbytes -= cnt;
1948 
1949 		if (!nbytes)
1950 			break;
1951 
1952 		ret = iter(mtd, ++section, &oobregion);
1953 	}
1954 
1955 	return ret;
1956 }
1957 
1958 /**
1959  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1960  * @mtd: mtd info structure
1961  * @buf: source buffer to get OOB bytes from
1962  * @oobbuf: OOB buffer
1963  * @start: first OOB byte to set
1964  * @nbytes: number of OOB bytes to set
1965  * @iter: section iterator
1966  *
1967  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1968  * is selected by passing the appropriate iterator.
1969  *
1970  * Returns zero on success, a negative error code otherwise.
1971  */
mtd_ooblayout_set_bytes(struct mtd_info * mtd,const u8 * buf,u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1972 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1973 				u8 *oobbuf, int start, int nbytes,
1974 				int (*iter)(struct mtd_info *,
1975 					    int section,
1976 					    struct mtd_oob_region *oobregion))
1977 {
1978 	struct mtd_oob_region oobregion;
1979 	int section, ret;
1980 
1981 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1982 					&oobregion, iter);
1983 
1984 	while (!ret) {
1985 		int cnt;
1986 
1987 		cnt = min_t(int, nbytes, oobregion.length);
1988 		memcpy(oobbuf + oobregion.offset, buf, cnt);
1989 		buf += cnt;
1990 		nbytes -= cnt;
1991 
1992 		if (!nbytes)
1993 			break;
1994 
1995 		ret = iter(mtd, ++section, &oobregion);
1996 	}
1997 
1998 	return ret;
1999 }
2000 
2001 /**
2002  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
2003  * @mtd: mtd info structure
2004  * @iter: category iterator
2005  *
2006  * Count the number of bytes in a given category.
2007  *
2008  * Returns a positive value on success, a negative error code otherwise.
2009  */
mtd_ooblayout_count_bytes(struct mtd_info * mtd,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))2010 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
2011 				int (*iter)(struct mtd_info *,
2012 					    int section,
2013 					    struct mtd_oob_region *oobregion))
2014 {
2015 	struct mtd_oob_region oobregion;
2016 	int section = 0, ret, nbytes = 0;
2017 
2018 	while (1) {
2019 		ret = iter(mtd, section++, &oobregion);
2020 		if (ret) {
2021 			if (ret == -ERANGE)
2022 				ret = nbytes;
2023 			break;
2024 		}
2025 
2026 		nbytes += oobregion.length;
2027 	}
2028 
2029 	return ret;
2030 }
2031 
2032 /**
2033  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
2034  * @mtd: mtd info structure
2035  * @eccbuf: destination buffer to store ECC bytes
2036  * @oobbuf: OOB buffer
2037  * @start: first ECC byte to retrieve
2038  * @nbytes: number of ECC bytes to retrieve
2039  *
2040  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
2041  *
2042  * Returns zero on success, a negative error code otherwise.
2043  */
mtd_ooblayout_get_eccbytes(struct mtd_info * mtd,u8 * eccbuf,const u8 * oobbuf,int start,int nbytes)2044 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
2045 			       const u8 *oobbuf, int start, int nbytes)
2046 {
2047 	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2048 				       mtd_ooblayout_ecc);
2049 }
2050 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
2051 
2052 /**
2053  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
2054  * @mtd: mtd info structure
2055  * @eccbuf: source buffer to get ECC bytes from
2056  * @oobbuf: OOB buffer
2057  * @start: first ECC byte to set
2058  * @nbytes: number of ECC bytes to set
2059  *
2060  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2061  *
2062  * Returns zero on success, a negative error code otherwise.
2063  */
mtd_ooblayout_set_eccbytes(struct mtd_info * mtd,const u8 * eccbuf,u8 * oobbuf,int start,int nbytes)2064 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2065 			       u8 *oobbuf, int start, int nbytes)
2066 {
2067 	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2068 				       mtd_ooblayout_ecc);
2069 }
2070 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2071 
2072 /**
2073  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2074  * @mtd: mtd info structure
2075  * @databuf: destination buffer to store ECC bytes
2076  * @oobbuf: OOB buffer
2077  * @start: first ECC byte to retrieve
2078  * @nbytes: number of ECC bytes to retrieve
2079  *
2080  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2081  *
2082  * Returns zero on success, a negative error code otherwise.
2083  */
mtd_ooblayout_get_databytes(struct mtd_info * mtd,u8 * databuf,const u8 * oobbuf,int start,int nbytes)2084 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2085 				const u8 *oobbuf, int start, int nbytes)
2086 {
2087 	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2088 				       mtd_ooblayout_free);
2089 }
2090 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2091 
2092 /**
2093  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2094  * @mtd: mtd info structure
2095  * @databuf: source buffer to get data bytes from
2096  * @oobbuf: OOB buffer
2097  * @start: first ECC byte to set
2098  * @nbytes: number of ECC bytes to set
2099  *
2100  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2101  *
2102  * Returns zero on success, a negative error code otherwise.
2103  */
mtd_ooblayout_set_databytes(struct mtd_info * mtd,const u8 * databuf,u8 * oobbuf,int start,int nbytes)2104 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2105 				u8 *oobbuf, int start, int nbytes)
2106 {
2107 	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2108 				       mtd_ooblayout_free);
2109 }
2110 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2111 
2112 /**
2113  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2114  * @mtd: mtd info structure
2115  *
2116  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2117  *
2118  * Returns zero on success, a negative error code otherwise.
2119  */
mtd_ooblayout_count_freebytes(struct mtd_info * mtd)2120 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2121 {
2122 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2123 }
2124 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2125 
2126 /**
2127  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2128  * @mtd: mtd info structure
2129  *
2130  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2131  *
2132  * Returns zero on success, a negative error code otherwise.
2133  */
mtd_ooblayout_count_eccbytes(struct mtd_info * mtd)2134 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2135 {
2136 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2137 }
2138 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2139 
2140 /*
2141  * Method to access the protection register area, present in some flash
2142  * devices. The user data is one time programmable but the factory data is read
2143  * only.
2144  */
mtd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2145 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2146 			   struct otp_info *buf)
2147 {
2148 	struct mtd_info *master = mtd_get_master(mtd);
2149 
2150 	if (!master->_get_fact_prot_info)
2151 		return -EOPNOTSUPP;
2152 	if (!len)
2153 		return 0;
2154 	return master->_get_fact_prot_info(master, len, retlen, buf);
2155 }
2156 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2157 
mtd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2158 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2159 			   size_t *retlen, u_char *buf)
2160 {
2161 	struct mtd_info *master = mtd_get_master(mtd);
2162 
2163 	*retlen = 0;
2164 	if (!master->_read_fact_prot_reg)
2165 		return -EOPNOTSUPP;
2166 	if (!len)
2167 		return 0;
2168 	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2169 }
2170 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2171 
mtd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2172 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2173 			   struct otp_info *buf)
2174 {
2175 	struct mtd_info *master = mtd_get_master(mtd);
2176 
2177 	if (!master->_get_user_prot_info)
2178 		return -EOPNOTSUPP;
2179 	if (!len)
2180 		return 0;
2181 	return master->_get_user_prot_info(master, len, retlen, buf);
2182 }
2183 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2184 
mtd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2185 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2186 			   size_t *retlen, u_char *buf)
2187 {
2188 	struct mtd_info *master = mtd_get_master(mtd);
2189 
2190 	*retlen = 0;
2191 	if (!master->_read_user_prot_reg)
2192 		return -EOPNOTSUPP;
2193 	if (!len)
2194 		return 0;
2195 	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2196 }
2197 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2198 
mtd_write_user_prot_reg(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2199 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2200 			    size_t *retlen, const u_char *buf)
2201 {
2202 	struct mtd_info *master = mtd_get_master(mtd);
2203 	int ret;
2204 
2205 	*retlen = 0;
2206 	if (!master->_write_user_prot_reg)
2207 		return -EOPNOTSUPP;
2208 	if (!len)
2209 		return 0;
2210 	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2211 	if (ret)
2212 		return ret;
2213 
2214 	/*
2215 	 * If no data could be written at all, we are out of memory and
2216 	 * must return -ENOSPC.
2217 	 */
2218 	return (*retlen) ? 0 : -ENOSPC;
2219 }
2220 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2221 
mtd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2222 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2223 {
2224 	struct mtd_info *master = mtd_get_master(mtd);
2225 
2226 	if (!master->_lock_user_prot_reg)
2227 		return -EOPNOTSUPP;
2228 	if (!len)
2229 		return 0;
2230 	return master->_lock_user_prot_reg(master, from, len);
2231 }
2232 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2233 
mtd_erase_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2234 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2235 {
2236 	struct mtd_info *master = mtd_get_master(mtd);
2237 
2238 	if (!master->_erase_user_prot_reg)
2239 		return -EOPNOTSUPP;
2240 	if (!len)
2241 		return 0;
2242 	return master->_erase_user_prot_reg(master, from, len);
2243 }
2244 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2245 
2246 /* Chip-supported device locking */
mtd_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2247 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2248 {
2249 	struct mtd_info *master = mtd_get_master(mtd);
2250 
2251 	if (!master->_lock)
2252 		return -EOPNOTSUPP;
2253 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2254 		return -EINVAL;
2255 	if (!len)
2256 		return 0;
2257 
2258 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2259 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2260 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2261 	}
2262 
2263 	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2264 }
2265 EXPORT_SYMBOL_GPL(mtd_lock);
2266 
mtd_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2267 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2268 {
2269 	struct mtd_info *master = mtd_get_master(mtd);
2270 
2271 	if (!master->_unlock)
2272 		return -EOPNOTSUPP;
2273 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2274 		return -EINVAL;
2275 	if (!len)
2276 		return 0;
2277 
2278 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2279 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2280 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2281 	}
2282 
2283 	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2284 }
2285 EXPORT_SYMBOL_GPL(mtd_unlock);
2286 
mtd_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2287 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2288 {
2289 	struct mtd_info *master = mtd_get_master(mtd);
2290 
2291 	if (!master->_is_locked)
2292 		return -EOPNOTSUPP;
2293 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2294 		return -EINVAL;
2295 	if (!len)
2296 		return 0;
2297 
2298 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2299 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2300 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2301 	}
2302 
2303 	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2304 }
2305 EXPORT_SYMBOL_GPL(mtd_is_locked);
2306 
mtd_block_isreserved(struct mtd_info * mtd,loff_t ofs)2307 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2308 {
2309 	struct mtd_info *master = mtd_get_master(mtd);
2310 
2311 	if (ofs < 0 || ofs >= mtd->size)
2312 		return -EINVAL;
2313 	if (!master->_block_isreserved)
2314 		return 0;
2315 
2316 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2317 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2318 
2319 	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2320 }
2321 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2322 
mtd_block_isbad(struct mtd_info * mtd,loff_t ofs)2323 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2324 {
2325 	struct mtd_info *master = mtd_get_master(mtd);
2326 
2327 	if (ofs < 0 || ofs >= mtd->size)
2328 		return -EINVAL;
2329 	if (!master->_block_isbad)
2330 		return 0;
2331 
2332 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2333 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2334 
2335 	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2336 }
2337 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2338 
mtd_block_markbad(struct mtd_info * mtd,loff_t ofs)2339 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2340 {
2341 	struct mtd_info *master = mtd_get_master(mtd);
2342 	int ret;
2343 
2344 	if (!master->_block_markbad)
2345 		return -EOPNOTSUPP;
2346 	if (ofs < 0 || ofs >= mtd->size)
2347 		return -EINVAL;
2348 	if (!(mtd->flags & MTD_WRITEABLE))
2349 		return -EROFS;
2350 
2351 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2352 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2353 
2354 	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2355 	if (ret)
2356 		return ret;
2357 
2358 	while (mtd->parent) {
2359 		mtd->ecc_stats.badblocks++;
2360 		mtd = mtd->parent;
2361 	}
2362 
2363 	return 0;
2364 }
2365 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2366 ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO);
2367 
2368 /*
2369  * default_mtd_writev - the default writev method
2370  * @mtd: mtd device description object pointer
2371  * @vecs: the vectors to write
2372  * @count: count of vectors in @vecs
2373  * @to: the MTD device offset to write to
2374  * @retlen: on exit contains the count of bytes written to the MTD device.
2375  *
2376  * This function returns zero in case of success and a negative error code in
2377  * case of failure.
2378  */
default_mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2379 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2380 			      unsigned long count, loff_t to, size_t *retlen)
2381 {
2382 	unsigned long i;
2383 	size_t totlen = 0, thislen;
2384 	int ret = 0;
2385 
2386 	for (i = 0; i < count; i++) {
2387 		if (!vecs[i].iov_len)
2388 			continue;
2389 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2390 				vecs[i].iov_base);
2391 		totlen += thislen;
2392 		if (ret || thislen != vecs[i].iov_len)
2393 			break;
2394 		to += vecs[i].iov_len;
2395 	}
2396 	*retlen = totlen;
2397 	return ret;
2398 }
2399 
2400 /*
2401  * mtd_writev - the vector-based MTD write method
2402  * @mtd: mtd device description object pointer
2403  * @vecs: the vectors to write
2404  * @count: count of vectors in @vecs
2405  * @to: the MTD device offset to write to
2406  * @retlen: on exit contains the count of bytes written to the MTD device.
2407  *
2408  * This function returns zero in case of success and a negative error code in
2409  * case of failure.
2410  */
mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2411 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2412 	       unsigned long count, loff_t to, size_t *retlen)
2413 {
2414 	struct mtd_info *master = mtd_get_master(mtd);
2415 
2416 	*retlen = 0;
2417 	if (!(mtd->flags & MTD_WRITEABLE))
2418 		return -EROFS;
2419 
2420 	if (!master->_writev)
2421 		return default_mtd_writev(mtd, vecs, count, to, retlen);
2422 
2423 	return master->_writev(master, vecs, count,
2424 			       mtd_get_master_ofs(mtd, to), retlen);
2425 }
2426 EXPORT_SYMBOL_GPL(mtd_writev);
2427 
2428 /**
2429  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2430  * @mtd: mtd device description object pointer
2431  * @size: a pointer to the ideal or maximum size of the allocation, points
2432  *        to the actual allocation size on success.
2433  *
2434  * This routine attempts to allocate a contiguous kernel buffer up to
2435  * the specified size, backing off the size of the request exponentially
2436  * until the request succeeds or until the allocation size falls below
2437  * the system page size. This attempts to make sure it does not adversely
2438  * impact system performance, so when allocating more than one page, we
2439  * ask the memory allocator to avoid re-trying, swapping, writing back
2440  * or performing I/O.
2441  *
2442  * Note, this function also makes sure that the allocated buffer is aligned to
2443  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2444  *
2445  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2446  * to handle smaller (i.e. degraded) buffer allocations under low- or
2447  * fragmented-memory situations where such reduced allocations, from a
2448  * requested ideal, are allowed.
2449  *
2450  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2451  */
mtd_kmalloc_up_to(const struct mtd_info * mtd,size_t * size)2452 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2453 {
2454 	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2455 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2456 	void *kbuf;
2457 
2458 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2459 
2460 	while (*size > min_alloc) {
2461 		kbuf = kmalloc(*size, flags);
2462 		if (kbuf)
2463 			return kbuf;
2464 
2465 		*size >>= 1;
2466 		*size = ALIGN(*size, mtd->writesize);
2467 	}
2468 
2469 	/*
2470 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2471 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2472 	 */
2473 	return kmalloc(*size, GFP_KERNEL);
2474 }
2475 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2476 
2477 #ifdef CONFIG_PROC_FS
2478 
2479 /*====================================================================*/
2480 /* Support for /proc/mtd */
2481 
mtd_proc_show(struct seq_file * m,void * v)2482 static int mtd_proc_show(struct seq_file *m, void *v)
2483 {
2484 	struct mtd_info *mtd;
2485 
2486 	seq_puts(m, "dev:    size   erasesize  name\n");
2487 	mutex_lock(&mtd_table_mutex);
2488 	mtd_for_each_device(mtd) {
2489 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2490 			   mtd->index, (unsigned long long)mtd->size,
2491 			   mtd->erasesize, mtd->name);
2492 	}
2493 	mutex_unlock(&mtd_table_mutex);
2494 	return 0;
2495 }
2496 #endif /* CONFIG_PROC_FS */
2497 
2498 /*====================================================================*/
2499 /* Init code */
2500 
mtd_bdi_init(const char * name)2501 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2502 {
2503 	struct backing_dev_info *bdi;
2504 	int ret;
2505 
2506 	bdi = bdi_alloc(NUMA_NO_NODE);
2507 	if (!bdi)
2508 		return ERR_PTR(-ENOMEM);
2509 	bdi->ra_pages = 0;
2510 	bdi->io_pages = 0;
2511 
2512 	/*
2513 	 * We put '-0' suffix to the name to get the same name format as we
2514 	 * used to get. Since this is called only once, we get a unique name.
2515 	 */
2516 	ret = bdi_register(bdi, "%.28s-0", name);
2517 	if (ret)
2518 		bdi_put(bdi);
2519 
2520 	return ret ? ERR_PTR(ret) : bdi;
2521 }
2522 
2523 static struct proc_dir_entry *proc_mtd;
2524 
init_mtd(void)2525 static int __init init_mtd(void)
2526 {
2527 	int ret;
2528 
2529 	ret = class_register(&mtd_class);
2530 	if (ret)
2531 		goto err_reg;
2532 
2533 	mtd_bdi = mtd_bdi_init("mtd");
2534 	if (IS_ERR(mtd_bdi)) {
2535 		ret = PTR_ERR(mtd_bdi);
2536 		goto err_bdi;
2537 	}
2538 
2539 	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2540 
2541 	ret = init_mtdchar();
2542 	if (ret)
2543 		goto out_procfs;
2544 
2545 	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2546 	debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2547 			    &mtd_expert_analysis_mode);
2548 
2549 	return 0;
2550 
2551 out_procfs:
2552 	if (proc_mtd)
2553 		remove_proc_entry("mtd", NULL);
2554 	bdi_unregister(mtd_bdi);
2555 	bdi_put(mtd_bdi);
2556 err_bdi:
2557 	class_unregister(&mtd_class);
2558 err_reg:
2559 	pr_err("Error registering mtd class or bdi: %d\n", ret);
2560 	return ret;
2561 }
2562 
cleanup_mtd(void)2563 static void __exit cleanup_mtd(void)
2564 {
2565 	debugfs_remove_recursive(dfs_dir_mtd);
2566 	cleanup_mtdchar();
2567 	if (proc_mtd)
2568 		remove_proc_entry("mtd", NULL);
2569 	class_unregister(&mtd_class);
2570 	bdi_unregister(mtd_bdi);
2571 	bdi_put(mtd_bdi);
2572 	idr_destroy(&mtd_idr);
2573 }
2574 
2575 module_init(init_mtd);
2576 module_exit(cleanup_mtd);
2577 
2578 MODULE_LICENSE("GPL");
2579 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2580 MODULE_DESCRIPTION("Core MTD registration and access routines");
2581