xref: /linux/drivers/mtd/mtdcore.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /*
2  * Core registration and callback routines for MTD
3  * drivers and users.
4  *
5  * bdi bits are:
6  * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
7  * Written by David Howells (dhowells@redhat.com)
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/string.h>
14 #include <linux/timer.h>
15 #include <linux/major.h>
16 #include <linux/fs.h>
17 #include <linux/err.h>
18 #include <linux/ioctl.h>
19 #include <linux/init.h>
20 #include <linux/mtd/compatmac.h>
21 #include <linux/proc_fs.h>
22 #include <linux/backing-dev.h>
23 
24 #include <linux/mtd/mtd.h>
25 
26 #include "mtdcore.h"
27 /*
28  * backing device capabilities for non-mappable devices (such as NAND flash)
29  * - permits private mappings, copies are taken of the data
30  */
31 struct backing_dev_info mtd_bdi_unmappable = {
32 	.capabilities	= BDI_CAP_MAP_COPY,
33 };
34 
35 /*
36  * backing device capabilities for R/O mappable devices (such as ROM)
37  * - permits private mappings, copies are taken of the data
38  * - permits non-writable shared mappings
39  */
40 struct backing_dev_info mtd_bdi_ro_mappable = {
41 	.capabilities	= (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
42 			   BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
43 };
44 
45 /*
46  * backing device capabilities for writable mappable devices (such as RAM)
47  * - permits private mappings, copies are taken of the data
48  * - permits non-writable shared mappings
49  */
50 struct backing_dev_info mtd_bdi_rw_mappable = {
51 	.capabilities	= (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
52 			   BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
53 			   BDI_CAP_WRITE_MAP),
54 };
55 
56 static int mtd_cls_suspend(struct device *dev, pm_message_t state);
57 static int mtd_cls_resume(struct device *dev);
58 
59 static struct class mtd_class = {
60 	.name = "mtd",
61 	.owner = THIS_MODULE,
62 	.suspend = mtd_cls_suspend,
63 	.resume = mtd_cls_resume,
64 };
65 
66 /* These are exported solely for the purpose of mtd_blkdevs.c. You
67    should not use them for _anything_ else */
68 DEFINE_MUTEX(mtd_table_mutex);
69 struct mtd_info *mtd_table[MAX_MTD_DEVICES];
70 
71 EXPORT_SYMBOL_GPL(mtd_table_mutex);
72 EXPORT_SYMBOL_GPL(mtd_table);
73 
74 static LIST_HEAD(mtd_notifiers);
75 
76 
77 #if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE)
78 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
79 #else
80 #define MTD_DEVT(index) 0
81 #endif
82 
83 /* REVISIT once MTD uses the driver model better, whoever allocates
84  * the mtd_info will probably want to use the release() hook...
85  */
86 static void mtd_release(struct device *dev)
87 {
88 	dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
89 
90 	/* remove /dev/mtdXro node if needed */
91 	if (index)
92 		device_destroy(&mtd_class, index + 1);
93 }
94 
95 static int mtd_cls_suspend(struct device *dev, pm_message_t state)
96 {
97 	struct mtd_info *mtd = dev_to_mtd(dev);
98 
99 	if (mtd && mtd->suspend)
100 		return mtd->suspend(mtd);
101 	else
102 		return 0;
103 }
104 
105 static int mtd_cls_resume(struct device *dev)
106 {
107 	struct mtd_info *mtd = dev_to_mtd(dev);
108 
109 	if (mtd && mtd->resume)
110 		mtd->resume(mtd);
111 	return 0;
112 }
113 
114 static ssize_t mtd_type_show(struct device *dev,
115 		struct device_attribute *attr, char *buf)
116 {
117 	struct mtd_info *mtd = dev_to_mtd(dev);
118 	char *type;
119 
120 	switch (mtd->type) {
121 	case MTD_ABSENT:
122 		type = "absent";
123 		break;
124 	case MTD_RAM:
125 		type = "ram";
126 		break;
127 	case MTD_ROM:
128 		type = "rom";
129 		break;
130 	case MTD_NORFLASH:
131 		type = "nor";
132 		break;
133 	case MTD_NANDFLASH:
134 		type = "nand";
135 		break;
136 	case MTD_DATAFLASH:
137 		type = "dataflash";
138 		break;
139 	case MTD_UBIVOLUME:
140 		type = "ubi";
141 		break;
142 	default:
143 		type = "unknown";
144 	}
145 
146 	return snprintf(buf, PAGE_SIZE, "%s\n", type);
147 }
148 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
149 
150 static ssize_t mtd_flags_show(struct device *dev,
151 		struct device_attribute *attr, char *buf)
152 {
153 	struct mtd_info *mtd = dev_to_mtd(dev);
154 
155 	return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
156 
157 }
158 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
159 
160 static ssize_t mtd_size_show(struct device *dev,
161 		struct device_attribute *attr, char *buf)
162 {
163 	struct mtd_info *mtd = dev_to_mtd(dev);
164 
165 	return snprintf(buf, PAGE_SIZE, "%llu\n",
166 		(unsigned long long)mtd->size);
167 
168 }
169 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
170 
171 static ssize_t mtd_erasesize_show(struct device *dev,
172 		struct device_attribute *attr, char *buf)
173 {
174 	struct mtd_info *mtd = dev_to_mtd(dev);
175 
176 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
177 
178 }
179 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
180 
181 static ssize_t mtd_writesize_show(struct device *dev,
182 		struct device_attribute *attr, char *buf)
183 {
184 	struct mtd_info *mtd = dev_to_mtd(dev);
185 
186 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
187 
188 }
189 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
190 
191 static ssize_t mtd_subpagesize_show(struct device *dev,
192 		struct device_attribute *attr, char *buf)
193 {
194 	struct mtd_info *mtd = dev_to_mtd(dev);
195 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
196 
197 	return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
198 
199 }
200 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
201 
202 static ssize_t mtd_oobsize_show(struct device *dev,
203 		struct device_attribute *attr, char *buf)
204 {
205 	struct mtd_info *mtd = dev_to_mtd(dev);
206 
207 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
208 
209 }
210 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
211 
212 static ssize_t mtd_numeraseregions_show(struct device *dev,
213 		struct device_attribute *attr, char *buf)
214 {
215 	struct mtd_info *mtd = dev_to_mtd(dev);
216 
217 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
218 
219 }
220 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
221 	NULL);
222 
223 static ssize_t mtd_name_show(struct device *dev,
224 		struct device_attribute *attr, char *buf)
225 {
226 	struct mtd_info *mtd = dev_to_mtd(dev);
227 
228 	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
229 
230 }
231 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
232 
233 static struct attribute *mtd_attrs[] = {
234 	&dev_attr_type.attr,
235 	&dev_attr_flags.attr,
236 	&dev_attr_size.attr,
237 	&dev_attr_erasesize.attr,
238 	&dev_attr_writesize.attr,
239 	&dev_attr_subpagesize.attr,
240 	&dev_attr_oobsize.attr,
241 	&dev_attr_numeraseregions.attr,
242 	&dev_attr_name.attr,
243 	NULL,
244 };
245 
246 static struct attribute_group mtd_group = {
247 	.attrs		= mtd_attrs,
248 };
249 
250 static const struct attribute_group *mtd_groups[] = {
251 	&mtd_group,
252 	NULL,
253 };
254 
255 static struct device_type mtd_devtype = {
256 	.name		= "mtd",
257 	.groups		= mtd_groups,
258 	.release	= mtd_release,
259 };
260 
261 /**
262  *	add_mtd_device - register an MTD device
263  *	@mtd: pointer to new MTD device info structure
264  *
265  *	Add a device to the list of MTD devices present in the system, and
266  *	notify each currently active MTD 'user' of its arrival. Returns
267  *	zero on success or 1 on failure, which currently will only happen
268  *	if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
269  *	or there's a sysfs error.
270  */
271 
272 int add_mtd_device(struct mtd_info *mtd)
273 {
274 	int i;
275 
276 	if (!mtd->backing_dev_info) {
277 		switch (mtd->type) {
278 		case MTD_RAM:
279 			mtd->backing_dev_info = &mtd_bdi_rw_mappable;
280 			break;
281 		case MTD_ROM:
282 			mtd->backing_dev_info = &mtd_bdi_ro_mappable;
283 			break;
284 		default:
285 			mtd->backing_dev_info = &mtd_bdi_unmappable;
286 			break;
287 		}
288 	}
289 
290 	BUG_ON(mtd->writesize == 0);
291 	mutex_lock(&mtd_table_mutex);
292 
293 	for (i=0; i < MAX_MTD_DEVICES; i++)
294 		if (!mtd_table[i]) {
295 			struct mtd_notifier *not;
296 
297 			mtd_table[i] = mtd;
298 			mtd->index = i;
299 			mtd->usecount = 0;
300 
301 			if (is_power_of_2(mtd->erasesize))
302 				mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
303 			else
304 				mtd->erasesize_shift = 0;
305 
306 			if (is_power_of_2(mtd->writesize))
307 				mtd->writesize_shift = ffs(mtd->writesize) - 1;
308 			else
309 				mtd->writesize_shift = 0;
310 
311 			mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
312 			mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
313 
314 			/* Some chips always power up locked. Unlock them now */
315 			if ((mtd->flags & MTD_WRITEABLE)
316 			    && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
317 				if (mtd->unlock(mtd, 0, mtd->size))
318 					printk(KERN_WARNING
319 					       "%s: unlock failed, "
320 					       "writes may not work\n",
321 					       mtd->name);
322 			}
323 
324 			/* Caller should have set dev.parent to match the
325 			 * physical device.
326 			 */
327 			mtd->dev.type = &mtd_devtype;
328 			mtd->dev.class = &mtd_class;
329 			mtd->dev.devt = MTD_DEVT(i);
330 			dev_set_name(&mtd->dev, "mtd%d", i);
331 			dev_set_drvdata(&mtd->dev, mtd);
332 			if (device_register(&mtd->dev) != 0) {
333 				mtd_table[i] = NULL;
334 				break;
335 			}
336 
337 			if (MTD_DEVT(i))
338 				device_create(&mtd_class, mtd->dev.parent,
339 						MTD_DEVT(i) + 1,
340 						NULL, "mtd%dro", i);
341 
342 			DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
343 			/* No need to get a refcount on the module containing
344 			   the notifier, since we hold the mtd_table_mutex */
345 			list_for_each_entry(not, &mtd_notifiers, list)
346 				not->add(mtd);
347 
348 			mutex_unlock(&mtd_table_mutex);
349 			/* We _know_ we aren't being removed, because
350 			   our caller is still holding us here. So none
351 			   of this try_ nonsense, and no bitching about it
352 			   either. :) */
353 			__module_get(THIS_MODULE);
354 			return 0;
355 		}
356 
357 	mutex_unlock(&mtd_table_mutex);
358 	return 1;
359 }
360 
361 /**
362  *	del_mtd_device - unregister an MTD device
363  *	@mtd: pointer to MTD device info structure
364  *
365  *	Remove a device from the list of MTD devices present in the system,
366  *	and notify each currently active MTD 'user' of its departure.
367  *	Returns zero on success or 1 on failure, which currently will happen
368  *	if the requested device does not appear to be present in the list.
369  */
370 
371 int del_mtd_device (struct mtd_info *mtd)
372 {
373 	int ret;
374 
375 	mutex_lock(&mtd_table_mutex);
376 
377 	if (mtd_table[mtd->index] != mtd) {
378 		ret = -ENODEV;
379 	} else if (mtd->usecount) {
380 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
381 		       mtd->index, mtd->name, mtd->usecount);
382 		ret = -EBUSY;
383 	} else {
384 		struct mtd_notifier *not;
385 
386 		device_unregister(&mtd->dev);
387 
388 		/* No need to get a refcount on the module containing
389 		   the notifier, since we hold the mtd_table_mutex */
390 		list_for_each_entry(not, &mtd_notifiers, list)
391 			not->remove(mtd);
392 
393 		mtd_table[mtd->index] = NULL;
394 
395 		module_put(THIS_MODULE);
396 		ret = 0;
397 	}
398 
399 	mutex_unlock(&mtd_table_mutex);
400 	return ret;
401 }
402 
403 /**
404  *	register_mtd_user - register a 'user' of MTD devices.
405  *	@new: pointer to notifier info structure
406  *
407  *	Registers a pair of callbacks function to be called upon addition
408  *	or removal of MTD devices. Causes the 'add' callback to be immediately
409  *	invoked for each MTD device currently present in the system.
410  */
411 
412 void register_mtd_user (struct mtd_notifier *new)
413 {
414 	int i;
415 
416 	mutex_lock(&mtd_table_mutex);
417 
418 	list_add(&new->list, &mtd_notifiers);
419 
420  	__module_get(THIS_MODULE);
421 
422 	for (i=0; i< MAX_MTD_DEVICES; i++)
423 		if (mtd_table[i])
424 			new->add(mtd_table[i]);
425 
426 	mutex_unlock(&mtd_table_mutex);
427 }
428 
429 /**
430  *	unregister_mtd_user - unregister a 'user' of MTD devices.
431  *	@old: pointer to notifier info structure
432  *
433  *	Removes a callback function pair from the list of 'users' to be
434  *	notified upon addition or removal of MTD devices. Causes the
435  *	'remove' callback to be immediately invoked for each MTD device
436  *	currently present in the system.
437  */
438 
439 int unregister_mtd_user (struct mtd_notifier *old)
440 {
441 	int i;
442 
443 	mutex_lock(&mtd_table_mutex);
444 
445 	module_put(THIS_MODULE);
446 
447 	for (i=0; i< MAX_MTD_DEVICES; i++)
448 		if (mtd_table[i])
449 			old->remove(mtd_table[i]);
450 
451 	list_del(&old->list);
452 	mutex_unlock(&mtd_table_mutex);
453 	return 0;
454 }
455 
456 
457 /**
458  *	get_mtd_device - obtain a validated handle for an MTD device
459  *	@mtd: last known address of the required MTD device
460  *	@num: internal device number of the required MTD device
461  *
462  *	Given a number and NULL address, return the num'th entry in the device
463  *	table, if any.	Given an address and num == -1, search the device table
464  *	for a device with that address and return if it's still present. Given
465  *	both, return the num'th driver only if its address matches. Return
466  *	error code if not.
467  */
468 
469 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
470 {
471 	struct mtd_info *ret = NULL;
472 	int i, err = -ENODEV;
473 
474 	mutex_lock(&mtd_table_mutex);
475 
476 	if (num == -1) {
477 		for (i=0; i< MAX_MTD_DEVICES; i++)
478 			if (mtd_table[i] == mtd)
479 				ret = mtd_table[i];
480 	} else if (num >= 0 && num < MAX_MTD_DEVICES) {
481 		ret = mtd_table[num];
482 		if (mtd && mtd != ret)
483 			ret = NULL;
484 	}
485 
486 	if (!ret)
487 		goto out_unlock;
488 
489 	if (!try_module_get(ret->owner))
490 		goto out_unlock;
491 
492 	if (ret->get_device) {
493 		err = ret->get_device(ret);
494 		if (err)
495 			goto out_put;
496 	}
497 
498 	ret->usecount++;
499 	mutex_unlock(&mtd_table_mutex);
500 	return ret;
501 
502 out_put:
503 	module_put(ret->owner);
504 out_unlock:
505 	mutex_unlock(&mtd_table_mutex);
506 	return ERR_PTR(err);
507 }
508 
509 /**
510  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
511  *	device name
512  *	@name: MTD device name to open
513  *
514  * 	This function returns MTD device description structure in case of
515  * 	success and an error code in case of failure.
516  */
517 
518 struct mtd_info *get_mtd_device_nm(const char *name)
519 {
520 	int i, err = -ENODEV;
521 	struct mtd_info *mtd = NULL;
522 
523 	mutex_lock(&mtd_table_mutex);
524 
525 	for (i = 0; i < MAX_MTD_DEVICES; i++) {
526 		if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
527 			mtd = mtd_table[i];
528 			break;
529 		}
530 	}
531 
532 	if (!mtd)
533 		goto out_unlock;
534 
535 	if (!try_module_get(mtd->owner))
536 		goto out_unlock;
537 
538 	if (mtd->get_device) {
539 		err = mtd->get_device(mtd);
540 		if (err)
541 			goto out_put;
542 	}
543 
544 	mtd->usecount++;
545 	mutex_unlock(&mtd_table_mutex);
546 	return mtd;
547 
548 out_put:
549 	module_put(mtd->owner);
550 out_unlock:
551 	mutex_unlock(&mtd_table_mutex);
552 	return ERR_PTR(err);
553 }
554 
555 void put_mtd_device(struct mtd_info *mtd)
556 {
557 	int c;
558 
559 	mutex_lock(&mtd_table_mutex);
560 	c = --mtd->usecount;
561 	if (mtd->put_device)
562 		mtd->put_device(mtd);
563 	mutex_unlock(&mtd_table_mutex);
564 	BUG_ON(c < 0);
565 
566 	module_put(mtd->owner);
567 }
568 
569 /* default_mtd_writev - default mtd writev method for MTD devices that
570  *			don't implement their own
571  */
572 
573 int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
574 		       unsigned long count, loff_t to, size_t *retlen)
575 {
576 	unsigned long i;
577 	size_t totlen = 0, thislen;
578 	int ret = 0;
579 
580 	if(!mtd->write) {
581 		ret = -EROFS;
582 	} else {
583 		for (i=0; i<count; i++) {
584 			if (!vecs[i].iov_len)
585 				continue;
586 			ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
587 			totlen += thislen;
588 			if (ret || thislen != vecs[i].iov_len)
589 				break;
590 			to += vecs[i].iov_len;
591 		}
592 	}
593 	if (retlen)
594 		*retlen = totlen;
595 	return ret;
596 }
597 
598 EXPORT_SYMBOL_GPL(add_mtd_device);
599 EXPORT_SYMBOL_GPL(del_mtd_device);
600 EXPORT_SYMBOL_GPL(get_mtd_device);
601 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
602 EXPORT_SYMBOL_GPL(put_mtd_device);
603 EXPORT_SYMBOL_GPL(register_mtd_user);
604 EXPORT_SYMBOL_GPL(unregister_mtd_user);
605 EXPORT_SYMBOL_GPL(default_mtd_writev);
606 
607 #ifdef CONFIG_PROC_FS
608 
609 /*====================================================================*/
610 /* Support for /proc/mtd */
611 
612 static struct proc_dir_entry *proc_mtd;
613 
614 static inline int mtd_proc_info (char *buf, int i)
615 {
616 	struct mtd_info *this = mtd_table[i];
617 
618 	if (!this)
619 		return 0;
620 
621 	return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
622 		       (unsigned long long)this->size,
623 		       this->erasesize, this->name);
624 }
625 
626 static int mtd_read_proc (char *page, char **start, off_t off, int count,
627 			  int *eof, void *data_unused)
628 {
629 	int len, l, i;
630         off_t   begin = 0;
631 
632 	mutex_lock(&mtd_table_mutex);
633 
634 	len = sprintf(page, "dev:    size   erasesize  name\n");
635         for (i=0; i< MAX_MTD_DEVICES; i++) {
636 
637                 l = mtd_proc_info(page + len, i);
638                 len += l;
639                 if (len+begin > off+count)
640                         goto done;
641                 if (len+begin < off) {
642                         begin += len;
643                         len = 0;
644                 }
645         }
646 
647         *eof = 1;
648 
649 done:
650 	mutex_unlock(&mtd_table_mutex);
651         if (off >= len+begin)
652                 return 0;
653         *start = page + (off-begin);
654         return ((count < begin+len-off) ? count : begin+len-off);
655 }
656 
657 #endif /* CONFIG_PROC_FS */
658 
659 /*====================================================================*/
660 /* Init code */
661 
662 static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
663 {
664 	int ret;
665 
666 	ret = bdi_init(bdi);
667 	if (!ret)
668 		ret = bdi_register(bdi, NULL, name);
669 
670 	if (ret)
671 		bdi_destroy(bdi);
672 
673 	return ret;
674 }
675 
676 static int __init init_mtd(void)
677 {
678 	int ret;
679 
680 	ret = class_register(&mtd_class);
681 	if (ret)
682 		goto err_reg;
683 
684 	ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
685 	if (ret)
686 		goto err_bdi1;
687 
688 	ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
689 	if (ret)
690 		goto err_bdi2;
691 
692 	ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
693 	if (ret)
694 		goto err_bdi3;
695 
696 #ifdef CONFIG_PROC_FS
697 	if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
698 		proc_mtd->read_proc = mtd_read_proc;
699 #endif /* CONFIG_PROC_FS */
700 	return 0;
701 
702 err_bdi3:
703 	bdi_destroy(&mtd_bdi_ro_mappable);
704 err_bdi2:
705 	bdi_destroy(&mtd_bdi_unmappable);
706 err_bdi1:
707 	class_unregister(&mtd_class);
708 err_reg:
709 	pr_err("Error registering mtd class or bdi: %d\n", ret);
710 	return ret;
711 }
712 
713 static void __exit cleanup_mtd(void)
714 {
715 #ifdef CONFIG_PROC_FS
716         if (proc_mtd)
717 		remove_proc_entry( "mtd", NULL);
718 #endif /* CONFIG_PROC_FS */
719 	class_unregister(&mtd_class);
720 	bdi_destroy(&mtd_bdi_unmappable);
721 	bdi_destroy(&mtd_bdi_ro_mappable);
722 	bdi_destroy(&mtd_bdi_rw_mappable);
723 }
724 
725 module_init(init_mtd);
726 module_exit(cleanup_mtd);
727 
728 MODULE_LICENSE("GPL");
729 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
730 MODULE_DESCRIPTION("Core MTD registration and access routines");
731