xref: /linux/drivers/mtd/mtdchar.c (revision b3b77c8caef1750ebeea1054e39e358550ea9f55)
1 /*
2  * Character-device access to raw MTD devices.
3  *
4  */
5 
6 #include <linux/device.h>
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/backing-dev.h>
17 #include <linux/compat.h>
18 #include <linux/mount.h>
19 
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/compatmac.h>
22 
23 #include <asm/uaccess.h>
24 
25 #define MTD_INODE_FS_MAGIC 0x11307854
26 static struct vfsmount *mtd_inode_mnt __read_mostly;
27 
28 /*
29  * Data structure to hold the pointer to the mtd device as well
30  * as mode information ofr various use cases.
31  */
32 struct mtd_file_info {
33 	struct mtd_info *mtd;
34 	struct inode *ino;
35 	enum mtd_file_modes mode;
36 };
37 
38 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
39 {
40 	struct mtd_file_info *mfi = file->private_data;
41 	struct mtd_info *mtd = mfi->mtd;
42 
43 	switch (orig) {
44 	case SEEK_SET:
45 		break;
46 	case SEEK_CUR:
47 		offset += file->f_pos;
48 		break;
49 	case SEEK_END:
50 		offset += mtd->size;
51 		break;
52 	default:
53 		return -EINVAL;
54 	}
55 
56 	if (offset >= 0 && offset <= mtd->size)
57 		return file->f_pos = offset;
58 
59 	return -EINVAL;
60 }
61 
62 
63 
64 static int mtd_open(struct inode *inode, struct file *file)
65 {
66 	int minor = iminor(inode);
67 	int devnum = minor >> 1;
68 	int ret = 0;
69 	struct mtd_info *mtd;
70 	struct mtd_file_info *mfi;
71 	struct inode *mtd_ino;
72 
73 	DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
74 
75 	/* You can't open the RO devices RW */
76 	if ((file->f_mode & FMODE_WRITE) && (minor & 1))
77 		return -EACCES;
78 
79 	lock_kernel();
80 	mtd = get_mtd_device(NULL, devnum);
81 
82 	if (IS_ERR(mtd)) {
83 		ret = PTR_ERR(mtd);
84 		goto out;
85 	}
86 
87 	if (mtd->type == MTD_ABSENT) {
88 		put_mtd_device(mtd);
89 		ret = -ENODEV;
90 		goto out;
91 	}
92 
93 	mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
94 	if (!mtd_ino) {
95 		put_mtd_device(mtd);
96 		ret = -ENOMEM;
97 		goto out;
98 	}
99 	if (mtd_ino->i_state & I_NEW) {
100 		mtd_ino->i_private = mtd;
101 		mtd_ino->i_mode = S_IFCHR;
102 		mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
103 		unlock_new_inode(mtd_ino);
104 	}
105 	file->f_mapping = mtd_ino->i_mapping;
106 
107 	/* You can't open it RW if it's not a writeable device */
108 	if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
109 		iput(mtd_ino);
110 		put_mtd_device(mtd);
111 		ret = -EACCES;
112 		goto out;
113 	}
114 
115 	mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
116 	if (!mfi) {
117 		iput(mtd_ino);
118 		put_mtd_device(mtd);
119 		ret = -ENOMEM;
120 		goto out;
121 	}
122 	mfi->ino = mtd_ino;
123 	mfi->mtd = mtd;
124 	file->private_data = mfi;
125 
126 out:
127 	unlock_kernel();
128 	return ret;
129 } /* mtd_open */
130 
131 /*====================================================================*/
132 
133 static int mtd_close(struct inode *inode, struct file *file)
134 {
135 	struct mtd_file_info *mfi = file->private_data;
136 	struct mtd_info *mtd = mfi->mtd;
137 
138 	DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
139 
140 	/* Only sync if opened RW */
141 	if ((file->f_mode & FMODE_WRITE) && mtd->sync)
142 		mtd->sync(mtd);
143 
144 	iput(mfi->ino);
145 
146 	put_mtd_device(mtd);
147 	file->private_data = NULL;
148 	kfree(mfi);
149 
150 	return 0;
151 } /* mtd_close */
152 
153 /* FIXME: This _really_ needs to die. In 2.5, we should lock the
154    userspace buffer down and use it directly with readv/writev.
155 */
156 #define MAX_KMALLOC_SIZE 0x20000
157 
158 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
159 {
160 	struct mtd_file_info *mfi = file->private_data;
161 	struct mtd_info *mtd = mfi->mtd;
162 	size_t retlen=0;
163 	size_t total_retlen=0;
164 	int ret=0;
165 	int len;
166 	char *kbuf;
167 
168 	DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
169 
170 	if (*ppos + count > mtd->size)
171 		count = mtd->size - *ppos;
172 
173 	if (!count)
174 		return 0;
175 
176 	/* FIXME: Use kiovec in 2.5 to lock down the user's buffers
177 	   and pass them directly to the MTD functions */
178 
179 	if (count > MAX_KMALLOC_SIZE)
180 		kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
181 	else
182 		kbuf=kmalloc(count, GFP_KERNEL);
183 
184 	if (!kbuf)
185 		return -ENOMEM;
186 
187 	while (count) {
188 
189 		if (count > MAX_KMALLOC_SIZE)
190 			len = MAX_KMALLOC_SIZE;
191 		else
192 			len = count;
193 
194 		switch (mfi->mode) {
195 		case MTD_MODE_OTP_FACTORY:
196 			ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
197 			break;
198 		case MTD_MODE_OTP_USER:
199 			ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
200 			break;
201 		case MTD_MODE_RAW:
202 		{
203 			struct mtd_oob_ops ops;
204 
205 			ops.mode = MTD_OOB_RAW;
206 			ops.datbuf = kbuf;
207 			ops.oobbuf = NULL;
208 			ops.len = len;
209 
210 			ret = mtd->read_oob(mtd, *ppos, &ops);
211 			retlen = ops.retlen;
212 			break;
213 		}
214 		default:
215 			ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
216 		}
217 		/* Nand returns -EBADMSG on ecc errors, but it returns
218 		 * the data. For our userspace tools it is important
219 		 * to dump areas with ecc errors !
220 		 * For kernel internal usage it also might return -EUCLEAN
221 		 * to signal the caller that a bitflip has occured and has
222 		 * been corrected by the ECC algorithm.
223 		 * Userspace software which accesses NAND this way
224 		 * must be aware of the fact that it deals with NAND
225 		 */
226 		if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
227 			*ppos += retlen;
228 			if (copy_to_user(buf, kbuf, retlen)) {
229 				kfree(kbuf);
230 				return -EFAULT;
231 			}
232 			else
233 				total_retlen += retlen;
234 
235 			count -= retlen;
236 			buf += retlen;
237 			if (retlen == 0)
238 				count = 0;
239 		}
240 		else {
241 			kfree(kbuf);
242 			return ret;
243 		}
244 
245 	}
246 
247 	kfree(kbuf);
248 	return total_retlen;
249 } /* mtd_read */
250 
251 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
252 {
253 	struct mtd_file_info *mfi = file->private_data;
254 	struct mtd_info *mtd = mfi->mtd;
255 	char *kbuf;
256 	size_t retlen;
257 	size_t total_retlen=0;
258 	int ret=0;
259 	int len;
260 
261 	DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
262 
263 	if (*ppos == mtd->size)
264 		return -ENOSPC;
265 
266 	if (*ppos + count > mtd->size)
267 		count = mtd->size - *ppos;
268 
269 	if (!count)
270 		return 0;
271 
272 	if (count > MAX_KMALLOC_SIZE)
273 		kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
274 	else
275 		kbuf=kmalloc(count, GFP_KERNEL);
276 
277 	if (!kbuf)
278 		return -ENOMEM;
279 
280 	while (count) {
281 
282 		if (count > MAX_KMALLOC_SIZE)
283 			len = MAX_KMALLOC_SIZE;
284 		else
285 			len = count;
286 
287 		if (copy_from_user(kbuf, buf, len)) {
288 			kfree(kbuf);
289 			return -EFAULT;
290 		}
291 
292 		switch (mfi->mode) {
293 		case MTD_MODE_OTP_FACTORY:
294 			ret = -EROFS;
295 			break;
296 		case MTD_MODE_OTP_USER:
297 			if (!mtd->write_user_prot_reg) {
298 				ret = -EOPNOTSUPP;
299 				break;
300 			}
301 			ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
302 			break;
303 
304 		case MTD_MODE_RAW:
305 		{
306 			struct mtd_oob_ops ops;
307 
308 			ops.mode = MTD_OOB_RAW;
309 			ops.datbuf = kbuf;
310 			ops.oobbuf = NULL;
311 			ops.len = len;
312 
313 			ret = mtd->write_oob(mtd, *ppos, &ops);
314 			retlen = ops.retlen;
315 			break;
316 		}
317 
318 		default:
319 			ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
320 		}
321 		if (!ret) {
322 			*ppos += retlen;
323 			total_retlen += retlen;
324 			count -= retlen;
325 			buf += retlen;
326 		}
327 		else {
328 			kfree(kbuf);
329 			return ret;
330 		}
331 	}
332 
333 	kfree(kbuf);
334 	return total_retlen;
335 } /* mtd_write */
336 
337 /*======================================================================
338 
339     IOCTL calls for getting device parameters.
340 
341 ======================================================================*/
342 static void mtdchar_erase_callback (struct erase_info *instr)
343 {
344 	wake_up((wait_queue_head_t *)instr->priv);
345 }
346 
347 #ifdef CONFIG_HAVE_MTD_OTP
348 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
349 {
350 	struct mtd_info *mtd = mfi->mtd;
351 	int ret = 0;
352 
353 	switch (mode) {
354 	case MTD_OTP_FACTORY:
355 		if (!mtd->read_fact_prot_reg)
356 			ret = -EOPNOTSUPP;
357 		else
358 			mfi->mode = MTD_MODE_OTP_FACTORY;
359 		break;
360 	case MTD_OTP_USER:
361 		if (!mtd->read_fact_prot_reg)
362 			ret = -EOPNOTSUPP;
363 		else
364 			mfi->mode = MTD_MODE_OTP_USER;
365 		break;
366 	default:
367 		ret = -EINVAL;
368 	case MTD_OTP_OFF:
369 		break;
370 	}
371 	return ret;
372 }
373 #else
374 # define otp_select_filemode(f,m)	-EOPNOTSUPP
375 #endif
376 
377 static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
378 	uint64_t start, uint32_t length, void __user *ptr,
379 	uint32_t __user *retp)
380 {
381 	struct mtd_oob_ops ops;
382 	uint32_t retlen;
383 	int ret = 0;
384 
385 	if (!(file->f_mode & FMODE_WRITE))
386 		return -EPERM;
387 
388 	if (length > 4096)
389 		return -EINVAL;
390 
391 	if (!mtd->write_oob)
392 		ret = -EOPNOTSUPP;
393 	else
394 		ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
395 
396 	if (ret)
397 		return ret;
398 
399 	ops.ooblen = length;
400 	ops.ooboffs = start & (mtd->oobsize - 1);
401 	ops.datbuf = NULL;
402 	ops.mode = MTD_OOB_PLACE;
403 
404 	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
405 		return -EINVAL;
406 
407 	ops.oobbuf = kmalloc(length, GFP_KERNEL);
408 	if (!ops.oobbuf)
409 		return -ENOMEM;
410 
411 	if (copy_from_user(ops.oobbuf, ptr, length)) {
412 		kfree(ops.oobbuf);
413 		return -EFAULT;
414 	}
415 
416 	start &= ~((uint64_t)mtd->oobsize - 1);
417 	ret = mtd->write_oob(mtd, start, &ops);
418 
419 	if (ops.oobretlen > 0xFFFFFFFFU)
420 		ret = -EOVERFLOW;
421 	retlen = ops.oobretlen;
422 	if (copy_to_user(retp, &retlen, sizeof(length)))
423 		ret = -EFAULT;
424 
425 	kfree(ops.oobbuf);
426 	return ret;
427 }
428 
429 static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
430 	uint32_t length, void __user *ptr, uint32_t __user *retp)
431 {
432 	struct mtd_oob_ops ops;
433 	int ret = 0;
434 
435 	if (length > 4096)
436 		return -EINVAL;
437 
438 	if (!mtd->read_oob)
439 		ret = -EOPNOTSUPP;
440 	else
441 		ret = access_ok(VERIFY_WRITE, ptr,
442 				length) ? 0 : -EFAULT;
443 	if (ret)
444 		return ret;
445 
446 	ops.ooblen = length;
447 	ops.ooboffs = start & (mtd->oobsize - 1);
448 	ops.datbuf = NULL;
449 	ops.mode = MTD_OOB_PLACE;
450 
451 	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
452 		return -EINVAL;
453 
454 	ops.oobbuf = kmalloc(length, GFP_KERNEL);
455 	if (!ops.oobbuf)
456 		return -ENOMEM;
457 
458 	start &= ~((uint64_t)mtd->oobsize - 1);
459 	ret = mtd->read_oob(mtd, start, &ops);
460 
461 	if (put_user(ops.oobretlen, retp))
462 		ret = -EFAULT;
463 	else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
464 					    ops.oobretlen))
465 		ret = -EFAULT;
466 
467 	kfree(ops.oobbuf);
468 	return ret;
469 }
470 
471 static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
472 {
473 	struct mtd_file_info *mfi = file->private_data;
474 	struct mtd_info *mtd = mfi->mtd;
475 	void __user *argp = (void __user *)arg;
476 	int ret = 0;
477 	u_long size;
478 	struct mtd_info_user info;
479 
480 	DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
481 
482 	size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
483 	if (cmd & IOC_IN) {
484 		if (!access_ok(VERIFY_READ, argp, size))
485 			return -EFAULT;
486 	}
487 	if (cmd & IOC_OUT) {
488 		if (!access_ok(VERIFY_WRITE, argp, size))
489 			return -EFAULT;
490 	}
491 
492 	switch (cmd) {
493 	case MEMGETREGIONCOUNT:
494 		if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
495 			return -EFAULT;
496 		break;
497 
498 	case MEMGETREGIONINFO:
499 	{
500 		uint32_t ur_idx;
501 		struct mtd_erase_region_info *kr;
502 		struct region_info_user __user *ur = argp;
503 
504 		if (get_user(ur_idx, &(ur->regionindex)))
505 			return -EFAULT;
506 
507 		kr = &(mtd->eraseregions[ur_idx]);
508 
509 		if (put_user(kr->offset, &(ur->offset))
510 		    || put_user(kr->erasesize, &(ur->erasesize))
511 		    || put_user(kr->numblocks, &(ur->numblocks)))
512 			return -EFAULT;
513 
514 		break;
515 	}
516 
517 	case MEMGETINFO:
518 		info.type	= mtd->type;
519 		info.flags	= mtd->flags;
520 		info.size	= mtd->size;
521 		info.erasesize	= mtd->erasesize;
522 		info.writesize	= mtd->writesize;
523 		info.oobsize	= mtd->oobsize;
524 		/* The below fields are obsolete */
525 		info.ecctype	= -1;
526 		info.eccsize	= 0;
527 		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
528 			return -EFAULT;
529 		break;
530 
531 	case MEMERASE:
532 	case MEMERASE64:
533 	{
534 		struct erase_info *erase;
535 
536 		if(!(file->f_mode & FMODE_WRITE))
537 			return -EPERM;
538 
539 		erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
540 		if (!erase)
541 			ret = -ENOMEM;
542 		else {
543 			wait_queue_head_t waitq;
544 			DECLARE_WAITQUEUE(wait, current);
545 
546 			init_waitqueue_head(&waitq);
547 
548 			if (cmd == MEMERASE64) {
549 				struct erase_info_user64 einfo64;
550 
551 				if (copy_from_user(&einfo64, argp,
552 					    sizeof(struct erase_info_user64))) {
553 					kfree(erase);
554 					return -EFAULT;
555 				}
556 				erase->addr = einfo64.start;
557 				erase->len = einfo64.length;
558 			} else {
559 				struct erase_info_user einfo32;
560 
561 				if (copy_from_user(&einfo32, argp,
562 					    sizeof(struct erase_info_user))) {
563 					kfree(erase);
564 					return -EFAULT;
565 				}
566 				erase->addr = einfo32.start;
567 				erase->len = einfo32.length;
568 			}
569 			erase->mtd = mtd;
570 			erase->callback = mtdchar_erase_callback;
571 			erase->priv = (unsigned long)&waitq;
572 
573 			/*
574 			  FIXME: Allow INTERRUPTIBLE. Which means
575 			  not having the wait_queue head on the stack.
576 
577 			  If the wq_head is on the stack, and we
578 			  leave because we got interrupted, then the
579 			  wq_head is no longer there when the
580 			  callback routine tries to wake us up.
581 			*/
582 			ret = mtd->erase(mtd, erase);
583 			if (!ret) {
584 				set_current_state(TASK_UNINTERRUPTIBLE);
585 				add_wait_queue(&waitq, &wait);
586 				if (erase->state != MTD_ERASE_DONE &&
587 				    erase->state != MTD_ERASE_FAILED)
588 					schedule();
589 				remove_wait_queue(&waitq, &wait);
590 				set_current_state(TASK_RUNNING);
591 
592 				ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
593 			}
594 			kfree(erase);
595 		}
596 		break;
597 	}
598 
599 	case MEMWRITEOOB:
600 	{
601 		struct mtd_oob_buf buf;
602 		struct mtd_oob_buf __user *buf_user = argp;
603 
604 		/* NOTE: writes return length to buf_user->length */
605 		if (copy_from_user(&buf, argp, sizeof(buf)))
606 			ret = -EFAULT;
607 		else
608 			ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
609 				buf.ptr, &buf_user->length);
610 		break;
611 	}
612 
613 	case MEMREADOOB:
614 	{
615 		struct mtd_oob_buf buf;
616 		struct mtd_oob_buf __user *buf_user = argp;
617 
618 		/* NOTE: writes return length to buf_user->start */
619 		if (copy_from_user(&buf, argp, sizeof(buf)))
620 			ret = -EFAULT;
621 		else
622 			ret = mtd_do_readoob(mtd, buf.start, buf.length,
623 				buf.ptr, &buf_user->start);
624 		break;
625 	}
626 
627 	case MEMWRITEOOB64:
628 	{
629 		struct mtd_oob_buf64 buf;
630 		struct mtd_oob_buf64 __user *buf_user = argp;
631 
632 		if (copy_from_user(&buf, argp, sizeof(buf)))
633 			ret = -EFAULT;
634 		else
635 			ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
636 				(void __user *)(uintptr_t)buf.usr_ptr,
637 				&buf_user->length);
638 		break;
639 	}
640 
641 	case MEMREADOOB64:
642 	{
643 		struct mtd_oob_buf64 buf;
644 		struct mtd_oob_buf64 __user *buf_user = argp;
645 
646 		if (copy_from_user(&buf, argp, sizeof(buf)))
647 			ret = -EFAULT;
648 		else
649 			ret = mtd_do_readoob(mtd, buf.start, buf.length,
650 				(void __user *)(uintptr_t)buf.usr_ptr,
651 				&buf_user->length);
652 		break;
653 	}
654 
655 	case MEMLOCK:
656 	{
657 		struct erase_info_user einfo;
658 
659 		if (copy_from_user(&einfo, argp, sizeof(einfo)))
660 			return -EFAULT;
661 
662 		if (!mtd->lock)
663 			ret = -EOPNOTSUPP;
664 		else
665 			ret = mtd->lock(mtd, einfo.start, einfo.length);
666 		break;
667 	}
668 
669 	case MEMUNLOCK:
670 	{
671 		struct erase_info_user einfo;
672 
673 		if (copy_from_user(&einfo, argp, sizeof(einfo)))
674 			return -EFAULT;
675 
676 		if (!mtd->unlock)
677 			ret = -EOPNOTSUPP;
678 		else
679 			ret = mtd->unlock(mtd, einfo.start, einfo.length);
680 		break;
681 	}
682 
683 	/* Legacy interface */
684 	case MEMGETOOBSEL:
685 	{
686 		struct nand_oobinfo oi;
687 
688 		if (!mtd->ecclayout)
689 			return -EOPNOTSUPP;
690 		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
691 			return -EINVAL;
692 
693 		oi.useecc = MTD_NANDECC_AUTOPLACE;
694 		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
695 		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
696 		       sizeof(oi.oobfree));
697 		oi.eccbytes = mtd->ecclayout->eccbytes;
698 
699 		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
700 			return -EFAULT;
701 		break;
702 	}
703 
704 	case MEMGETBADBLOCK:
705 	{
706 		loff_t offs;
707 
708 		if (copy_from_user(&offs, argp, sizeof(loff_t)))
709 			return -EFAULT;
710 		if (!mtd->block_isbad)
711 			ret = -EOPNOTSUPP;
712 		else
713 			return mtd->block_isbad(mtd, offs);
714 		break;
715 	}
716 
717 	case MEMSETBADBLOCK:
718 	{
719 		loff_t offs;
720 
721 		if (copy_from_user(&offs, argp, sizeof(loff_t)))
722 			return -EFAULT;
723 		if (!mtd->block_markbad)
724 			ret = -EOPNOTSUPP;
725 		else
726 			return mtd->block_markbad(mtd, offs);
727 		break;
728 	}
729 
730 #ifdef CONFIG_HAVE_MTD_OTP
731 	case OTPSELECT:
732 	{
733 		int mode;
734 		if (copy_from_user(&mode, argp, sizeof(int)))
735 			return -EFAULT;
736 
737 		mfi->mode = MTD_MODE_NORMAL;
738 
739 		ret = otp_select_filemode(mfi, mode);
740 
741 		file->f_pos = 0;
742 		break;
743 	}
744 
745 	case OTPGETREGIONCOUNT:
746 	case OTPGETREGIONINFO:
747 	{
748 		struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
749 		if (!buf)
750 			return -ENOMEM;
751 		ret = -EOPNOTSUPP;
752 		switch (mfi->mode) {
753 		case MTD_MODE_OTP_FACTORY:
754 			if (mtd->get_fact_prot_info)
755 				ret = mtd->get_fact_prot_info(mtd, buf, 4096);
756 			break;
757 		case MTD_MODE_OTP_USER:
758 			if (mtd->get_user_prot_info)
759 				ret = mtd->get_user_prot_info(mtd, buf, 4096);
760 			break;
761 		default:
762 			break;
763 		}
764 		if (ret >= 0) {
765 			if (cmd == OTPGETREGIONCOUNT) {
766 				int nbr = ret / sizeof(struct otp_info);
767 				ret = copy_to_user(argp, &nbr, sizeof(int));
768 			} else
769 				ret = copy_to_user(argp, buf, ret);
770 			if (ret)
771 				ret = -EFAULT;
772 		}
773 		kfree(buf);
774 		break;
775 	}
776 
777 	case OTPLOCK:
778 	{
779 		struct otp_info oinfo;
780 
781 		if (mfi->mode != MTD_MODE_OTP_USER)
782 			return -EINVAL;
783 		if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
784 			return -EFAULT;
785 		if (!mtd->lock_user_prot_reg)
786 			return -EOPNOTSUPP;
787 		ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
788 		break;
789 	}
790 #endif
791 
792 	case ECCGETLAYOUT:
793 	{
794 		if (!mtd->ecclayout)
795 			return -EOPNOTSUPP;
796 
797 		if (copy_to_user(argp, mtd->ecclayout,
798 				 sizeof(struct nand_ecclayout)))
799 			return -EFAULT;
800 		break;
801 	}
802 
803 	case ECCGETSTATS:
804 	{
805 		if (copy_to_user(argp, &mtd->ecc_stats,
806 				 sizeof(struct mtd_ecc_stats)))
807 			return -EFAULT;
808 		break;
809 	}
810 
811 	case MTDFILEMODE:
812 	{
813 		mfi->mode = 0;
814 
815 		switch(arg) {
816 		case MTD_MODE_OTP_FACTORY:
817 		case MTD_MODE_OTP_USER:
818 			ret = otp_select_filemode(mfi, arg);
819 			break;
820 
821 		case MTD_MODE_RAW:
822 			if (!mtd->read_oob || !mtd->write_oob)
823 				return -EOPNOTSUPP;
824 			mfi->mode = arg;
825 
826 		case MTD_MODE_NORMAL:
827 			break;
828 		default:
829 			ret = -EINVAL;
830 		}
831 		file->f_pos = 0;
832 		break;
833 	}
834 
835 	default:
836 		ret = -ENOTTY;
837 	}
838 
839 	return ret;
840 } /* memory_ioctl */
841 
842 static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
843 {
844 	int ret;
845 
846 	lock_kernel();
847 	ret = mtd_ioctl(file, cmd, arg);
848 	unlock_kernel();
849 
850 	return ret;
851 }
852 
853 #ifdef CONFIG_COMPAT
854 
855 struct mtd_oob_buf32 {
856 	u_int32_t start;
857 	u_int32_t length;
858 	compat_caddr_t ptr;	/* unsigned char* */
859 };
860 
861 #define MEMWRITEOOB32		_IOWR('M', 3, struct mtd_oob_buf32)
862 #define MEMREADOOB32		_IOWR('M', 4, struct mtd_oob_buf32)
863 
864 static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
865 	unsigned long arg)
866 {
867 	struct mtd_file_info *mfi = file->private_data;
868 	struct mtd_info *mtd = mfi->mtd;
869 	void __user *argp = compat_ptr(arg);
870 	int ret = 0;
871 
872 	lock_kernel();
873 
874 	switch (cmd) {
875 	case MEMWRITEOOB32:
876 	{
877 		struct mtd_oob_buf32 buf;
878 		struct mtd_oob_buf32 __user *buf_user = argp;
879 
880 		if (copy_from_user(&buf, argp, sizeof(buf)))
881 			ret = -EFAULT;
882 		else
883 			ret = mtd_do_writeoob(file, mtd, buf.start,
884 				buf.length, compat_ptr(buf.ptr),
885 				&buf_user->length);
886 		break;
887 	}
888 
889 	case MEMREADOOB32:
890 	{
891 		struct mtd_oob_buf32 buf;
892 		struct mtd_oob_buf32 __user *buf_user = argp;
893 
894 		/* NOTE: writes return length to buf->start */
895 		if (copy_from_user(&buf, argp, sizeof(buf)))
896 			ret = -EFAULT;
897 		else
898 			ret = mtd_do_readoob(mtd, buf.start,
899 				buf.length, compat_ptr(buf.ptr),
900 				&buf_user->start);
901 		break;
902 	}
903 	default:
904 		ret = mtd_ioctl(file, cmd, (unsigned long)argp);
905 	}
906 
907 	unlock_kernel();
908 
909 	return ret;
910 }
911 
912 #endif /* CONFIG_COMPAT */
913 
914 /*
915  * try to determine where a shared mapping can be made
916  * - only supported for NOMMU at the moment (MMU can't doesn't copy private
917  *   mappings)
918  */
919 #ifndef CONFIG_MMU
920 static unsigned long mtd_get_unmapped_area(struct file *file,
921 					   unsigned long addr,
922 					   unsigned long len,
923 					   unsigned long pgoff,
924 					   unsigned long flags)
925 {
926 	struct mtd_file_info *mfi = file->private_data;
927 	struct mtd_info *mtd = mfi->mtd;
928 
929 	if (mtd->get_unmapped_area) {
930 		unsigned long offset;
931 
932 		if (addr != 0)
933 			return (unsigned long) -EINVAL;
934 
935 		if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
936 			return (unsigned long) -EINVAL;
937 
938 		offset = pgoff << PAGE_SHIFT;
939 		if (offset > mtd->size - len)
940 			return (unsigned long) -EINVAL;
941 
942 		return mtd->get_unmapped_area(mtd, len, offset, flags);
943 	}
944 
945 	/* can't map directly */
946 	return (unsigned long) -ENOSYS;
947 }
948 #endif
949 
950 /*
951  * set up a mapping for shared memory segments
952  */
953 static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
954 {
955 #ifdef CONFIG_MMU
956 	struct mtd_file_info *mfi = file->private_data;
957 	struct mtd_info *mtd = mfi->mtd;
958 
959 	if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
960 		return 0;
961 	return -ENOSYS;
962 #else
963 	return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
964 #endif
965 }
966 
967 static const struct file_operations mtd_fops = {
968 	.owner		= THIS_MODULE,
969 	.llseek		= mtd_lseek,
970 	.read		= mtd_read,
971 	.write		= mtd_write,
972 	.unlocked_ioctl	= mtd_unlocked_ioctl,
973 #ifdef CONFIG_COMPAT
974 	.compat_ioctl	= mtd_compat_ioctl,
975 #endif
976 	.open		= mtd_open,
977 	.release	= mtd_close,
978 	.mmap		= mtd_mmap,
979 #ifndef CONFIG_MMU
980 	.get_unmapped_area = mtd_get_unmapped_area,
981 #endif
982 };
983 
984 static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
985                                const char *dev_name, void *data,
986                                struct vfsmount *mnt)
987 {
988         return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
989                              mnt);
990 }
991 
992 static struct file_system_type mtd_inodefs_type = {
993        .name = "mtd_inodefs",
994        .get_sb = mtd_inodefs_get_sb,
995        .kill_sb = kill_anon_super,
996 };
997 
998 static void mtdchar_notify_add(struct mtd_info *mtd)
999 {
1000 }
1001 
1002 static void mtdchar_notify_remove(struct mtd_info *mtd)
1003 {
1004 	struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
1005 
1006 	if (mtd_ino) {
1007 		/* Destroy the inode if it exists */
1008 		mtd_ino->i_nlink = 0;
1009 		iput(mtd_ino);
1010 	}
1011 }
1012 
1013 static struct mtd_notifier mtdchar_notifier = {
1014 	.add = mtdchar_notify_add,
1015 	.remove = mtdchar_notify_remove,
1016 };
1017 
1018 static int __init init_mtdchar(void)
1019 {
1020 	int ret;
1021 
1022 	ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1023 				   "mtd", &mtd_fops);
1024 	if (ret < 0) {
1025 		pr_notice("Can't allocate major number %d for "
1026 				"Memory Technology Devices.\n", MTD_CHAR_MAJOR);
1027 		return ret;
1028 	}
1029 
1030 	ret = register_filesystem(&mtd_inodefs_type);
1031 	if (ret) {
1032 		pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
1033 		goto err_unregister_chdev;
1034 	}
1035 
1036 	mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
1037 	if (IS_ERR(mtd_inode_mnt)) {
1038 		ret = PTR_ERR(mtd_inode_mnt);
1039 		pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
1040 		goto err_unregister_filesystem;
1041 	}
1042 	register_mtd_user(&mtdchar_notifier);
1043 
1044 	return ret;
1045 
1046 err_unregister_filesystem:
1047 	unregister_filesystem(&mtd_inodefs_type);
1048 err_unregister_chdev:
1049 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1050 	return ret;
1051 }
1052 
1053 static void __exit cleanup_mtdchar(void)
1054 {
1055 	unregister_mtd_user(&mtdchar_notifier);
1056 	mntput(mtd_inode_mnt);
1057 	unregister_filesystem(&mtd_inodefs_type);
1058 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1059 }
1060 
1061 module_init(init_mtdchar);
1062 module_exit(cleanup_mtdchar);
1063 
1064 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1065 
1066 MODULE_LICENSE("GPL");
1067 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1068 MODULE_DESCRIPTION("Direct character-device access to MTD devices");
1069 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1070