xref: /linux/drivers/char/mem.c (revision 273b281fa22c293963ee3e6eec418f5dda2dbc83)
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36 
37 /*
38  * Architectures vary in how they handle caching for addresses
39  * outside of main memory.
40  *
41  */
42 static inline int uncached_access(struct file *file, unsigned long addr)
43 {
44 #if defined(CONFIG_IA64)
45 	/*
46 	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases.
47 	 */
48 	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
49 #elif defined(CONFIG_MIPS)
50 	{
51 		extern int __uncached_access(struct file *file,
52 					     unsigned long addr);
53 
54 		return __uncached_access(file, addr);
55 	}
56 #else
57 	/*
58 	 * Accessing memory above the top the kernel knows about or through a file pointer
59 	 * that was marked O_DSYNC will be done non-cached.
60 	 */
61 	if (file->f_flags & O_DSYNC)
62 		return 1;
63 	return addr >= __pa(high_memory);
64 #endif
65 }
66 
67 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
68 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
69 {
70 	if (addr + count > __pa(high_memory))
71 		return 0;
72 
73 	return 1;
74 }
75 
76 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
77 {
78 	return 1;
79 }
80 #endif
81 
82 #ifdef CONFIG_STRICT_DEVMEM
83 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84 {
85 	u64 from = ((u64)pfn) << PAGE_SHIFT;
86 	u64 to = from + size;
87 	u64 cursor = from;
88 
89 	while (cursor < to) {
90 		if (!devmem_is_allowed(pfn)) {
91 			printk(KERN_INFO
92 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
93 				current->comm, from, to);
94 			return 0;
95 		}
96 		cursor += PAGE_SIZE;
97 		pfn++;
98 	}
99 	return 1;
100 }
101 #else
102 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
103 {
104 	return 1;
105 }
106 #endif
107 
108 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
109 {
110 }
111 
112 /*
113  * This funcion reads the *physical* memory. The f_pos points directly to the
114  * memory location.
115  */
116 static ssize_t read_mem(struct file * file, char __user * buf,
117 			size_t count, loff_t *ppos)
118 {
119 	unsigned long p = *ppos;
120 	ssize_t read, sz;
121 	char *ptr;
122 
123 	if (!valid_phys_addr_range(p, count))
124 		return -EFAULT;
125 	read = 0;
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 	/* we don't have page 0 mapped on sparc and m68k.. */
128 	if (p < PAGE_SIZE) {
129 		sz = PAGE_SIZE - p;
130 		if (sz > count)
131 			sz = count;
132 		if (sz > 0) {
133 			if (clear_user(buf, sz))
134 				return -EFAULT;
135 			buf += sz;
136 			p += sz;
137 			count -= sz;
138 			read += sz;
139 		}
140 	}
141 #endif
142 
143 	while (count > 0) {
144 		/*
145 		 * Handle first page in case it's not aligned
146 		 */
147 		if (-p & (PAGE_SIZE - 1))
148 			sz = -p & (PAGE_SIZE - 1);
149 		else
150 			sz = PAGE_SIZE;
151 
152 		sz = min_t(unsigned long, sz, count);
153 
154 		if (!range_is_allowed(p >> PAGE_SHIFT, count))
155 			return -EPERM;
156 
157 		/*
158 		 * On ia64 if a page has been mapped somewhere as
159 		 * uncached, then it must also be accessed uncached
160 		 * by the kernel or data corruption may occur
161 		 */
162 		ptr = xlate_dev_mem_ptr(p);
163 		if (!ptr)
164 			return -EFAULT;
165 
166 		if (copy_to_user(buf, ptr, sz)) {
167 			unxlate_dev_mem_ptr(p, ptr);
168 			return -EFAULT;
169 		}
170 
171 		unxlate_dev_mem_ptr(p, ptr);
172 
173 		buf += sz;
174 		p += sz;
175 		count -= sz;
176 		read += sz;
177 	}
178 
179 	*ppos += read;
180 	return read;
181 }
182 
183 static ssize_t write_mem(struct file * file, const char __user * buf,
184 			 size_t count, loff_t *ppos)
185 {
186 	unsigned long p = *ppos;
187 	ssize_t written, sz;
188 	unsigned long copied;
189 	void *ptr;
190 
191 	if (!valid_phys_addr_range(p, count))
192 		return -EFAULT;
193 
194 	written = 0;
195 
196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 	/* we don't have page 0 mapped on sparc and m68k.. */
198 	if (p < PAGE_SIZE) {
199 		unsigned long sz = PAGE_SIZE - p;
200 		if (sz > count)
201 			sz = count;
202 		/* Hmm. Do something? */
203 		buf += sz;
204 		p += sz;
205 		count -= sz;
206 		written += sz;
207 	}
208 #endif
209 
210 	while (count > 0) {
211 		/*
212 		 * Handle first page in case it's not aligned
213 		 */
214 		if (-p & (PAGE_SIZE - 1))
215 			sz = -p & (PAGE_SIZE - 1);
216 		else
217 			sz = PAGE_SIZE;
218 
219 		sz = min_t(unsigned long, sz, count);
220 
221 		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222 			return -EPERM;
223 
224 		/*
225 		 * On ia64 if a page has been mapped somewhere as
226 		 * uncached, then it must also be accessed uncached
227 		 * by the kernel or data corruption may occur
228 		 */
229 		ptr = xlate_dev_mem_ptr(p);
230 		if (!ptr) {
231 			if (written)
232 				break;
233 			return -EFAULT;
234 		}
235 
236 		copied = copy_from_user(ptr, buf, sz);
237 		if (copied) {
238 			written += sz - copied;
239 			unxlate_dev_mem_ptr(p, ptr);
240 			if (written)
241 				break;
242 			return -EFAULT;
243 		}
244 
245 		unxlate_dev_mem_ptr(p, ptr);
246 
247 		buf += sz;
248 		p += sz;
249 		count -= sz;
250 		written += sz;
251 	}
252 
253 	*ppos += written;
254 	return written;
255 }
256 
257 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258 	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
259 {
260 	return 1;
261 }
262 
263 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
264 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265 				     unsigned long size, pgprot_t vma_prot)
266 {
267 #ifdef pgprot_noncached
268 	unsigned long offset = pfn << PAGE_SHIFT;
269 
270 	if (uncached_access(file, offset))
271 		return pgprot_noncached(vma_prot);
272 #endif
273 	return vma_prot;
274 }
275 #endif
276 
277 #ifndef CONFIG_MMU
278 static unsigned long get_unmapped_area_mem(struct file *file,
279 					   unsigned long addr,
280 					   unsigned long len,
281 					   unsigned long pgoff,
282 					   unsigned long flags)
283 {
284 	if (!valid_mmap_phys_addr_range(pgoff, len))
285 		return (unsigned long) -EINVAL;
286 	return pgoff << PAGE_SHIFT;
287 }
288 
289 /* can't do an in-place private mapping if there's no MMU */
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
291 {
292 	return vma->vm_flags & VM_MAYSHARE;
293 }
294 #else
295 #define get_unmapped_area_mem	NULL
296 
297 static inline int private_mapping_ok(struct vm_area_struct *vma)
298 {
299 	return 1;
300 }
301 #endif
302 
303 static const struct vm_operations_struct mmap_mem_ops = {
304 #ifdef CONFIG_HAVE_IOREMAP_PROT
305 	.access = generic_access_phys
306 #endif
307 };
308 
309 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
310 {
311 	size_t size = vma->vm_end - vma->vm_start;
312 
313 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
314 		return -EINVAL;
315 
316 	if (!private_mapping_ok(vma))
317 		return -ENOSYS;
318 
319 	if (!range_is_allowed(vma->vm_pgoff, size))
320 		return -EPERM;
321 
322 	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
323 						&vma->vm_page_prot))
324 		return -EINVAL;
325 
326 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
327 						 size,
328 						 vma->vm_page_prot);
329 
330 	vma->vm_ops = &mmap_mem_ops;
331 
332 	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
333 	if (remap_pfn_range(vma,
334 			    vma->vm_start,
335 			    vma->vm_pgoff,
336 			    size,
337 			    vma->vm_page_prot)) {
338 		return -EAGAIN;
339 	}
340 	return 0;
341 }
342 
343 #ifdef CONFIG_DEVKMEM
344 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
345 {
346 	unsigned long pfn;
347 
348 	/* Turn a kernel-virtual address into a physical page frame */
349 	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
350 
351 	/*
352 	 * RED-PEN: on some architectures there is more mapped memory
353 	 * than available in mem_map which pfn_valid checks
354 	 * for. Perhaps should add a new macro here.
355 	 *
356 	 * RED-PEN: vmalloc is not supported right now.
357 	 */
358 	if (!pfn_valid(pfn))
359 		return -EIO;
360 
361 	vma->vm_pgoff = pfn;
362 	return mmap_mem(file, vma);
363 }
364 #endif
365 
366 #ifdef CONFIG_CRASH_DUMP
367 /*
368  * Read memory corresponding to the old kernel.
369  */
370 static ssize_t read_oldmem(struct file *file, char __user *buf,
371 				size_t count, loff_t *ppos)
372 {
373 	unsigned long pfn, offset;
374 	size_t read = 0, csize;
375 	int rc = 0;
376 
377 	while (count) {
378 		pfn = *ppos / PAGE_SIZE;
379 		if (pfn > saved_max_pfn)
380 			return read;
381 
382 		offset = (unsigned long)(*ppos % PAGE_SIZE);
383 		if (count > PAGE_SIZE - offset)
384 			csize = PAGE_SIZE - offset;
385 		else
386 			csize = count;
387 
388 		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
389 		if (rc < 0)
390 			return rc;
391 		buf += csize;
392 		*ppos += csize;
393 		read += csize;
394 		count -= csize;
395 	}
396 	return read;
397 }
398 #endif
399 
400 #ifdef CONFIG_DEVKMEM
401 /*
402  * This function reads the *virtual* memory as seen by the kernel.
403  */
404 static ssize_t read_kmem(struct file *file, char __user *buf,
405 			 size_t count, loff_t *ppos)
406 {
407 	unsigned long p = *ppos;
408 	ssize_t low_count, read, sz;
409 	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
410 
411 	read = 0;
412 	if (p < (unsigned long) high_memory) {
413 		low_count = count;
414 		if (count > (unsigned long) high_memory - p)
415 			low_count = (unsigned long) high_memory - p;
416 
417 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
418 		/* we don't have page 0 mapped on sparc and m68k.. */
419 		if (p < PAGE_SIZE && low_count > 0) {
420 			size_t tmp = PAGE_SIZE - p;
421 			if (tmp > low_count) tmp = low_count;
422 			if (clear_user(buf, tmp))
423 				return -EFAULT;
424 			buf += tmp;
425 			p += tmp;
426 			read += tmp;
427 			low_count -= tmp;
428 			count -= tmp;
429 		}
430 #endif
431 		while (low_count > 0) {
432 			/*
433 			 * Handle first page in case it's not aligned
434 			 */
435 			if (-p & (PAGE_SIZE - 1))
436 				sz = -p & (PAGE_SIZE - 1);
437 			else
438 				sz = PAGE_SIZE;
439 
440 			sz = min_t(unsigned long, sz, low_count);
441 
442 			/*
443 			 * On ia64 if a page has been mapped somewhere as
444 			 * uncached, then it must also be accessed uncached
445 			 * by the kernel or data corruption may occur
446 			 */
447 			kbuf = xlate_dev_kmem_ptr((char *)p);
448 
449 			if (copy_to_user(buf, kbuf, sz))
450 				return -EFAULT;
451 			buf += sz;
452 			p += sz;
453 			read += sz;
454 			low_count -= sz;
455 			count -= sz;
456 		}
457 	}
458 
459 	if (count > 0) {
460 		kbuf = (char *)__get_free_page(GFP_KERNEL);
461 		if (!kbuf)
462 			return -ENOMEM;
463 		while (count > 0) {
464 			int len = count;
465 
466 			if (len > PAGE_SIZE)
467 				len = PAGE_SIZE;
468 			len = vread(kbuf, (char *)p, len);
469 			if (!len)
470 				break;
471 			if (copy_to_user(buf, kbuf, len)) {
472 				free_page((unsigned long)kbuf);
473 				return -EFAULT;
474 			}
475 			count -= len;
476 			buf += len;
477 			read += len;
478 			p += len;
479 		}
480 		free_page((unsigned long)kbuf);
481 	}
482  	*ppos = p;
483  	return read;
484 }
485 
486 
487 static inline ssize_t
488 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
489 	      size_t count, loff_t *ppos)
490 {
491 	ssize_t written, sz;
492 	unsigned long copied;
493 
494 	written = 0;
495 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
496 	/* we don't have page 0 mapped on sparc and m68k.. */
497 	if (realp < PAGE_SIZE) {
498 		unsigned long sz = PAGE_SIZE - realp;
499 		if (sz > count)
500 			sz = count;
501 		/* Hmm. Do something? */
502 		buf += sz;
503 		p += sz;
504 		realp += sz;
505 		count -= sz;
506 		written += sz;
507 	}
508 #endif
509 
510 	while (count > 0) {
511 		char *ptr;
512 		/*
513 		 * Handle first page in case it's not aligned
514 		 */
515 		if (-realp & (PAGE_SIZE - 1))
516 			sz = -realp & (PAGE_SIZE - 1);
517 		else
518 			sz = PAGE_SIZE;
519 
520 		sz = min_t(unsigned long, sz, count);
521 
522 		/*
523 		 * On ia64 if a page has been mapped somewhere as
524 		 * uncached, then it must also be accessed uncached
525 		 * by the kernel or data corruption may occur
526 		 */
527 		ptr = xlate_dev_kmem_ptr(p);
528 
529 		copied = copy_from_user(ptr, buf, sz);
530 		if (copied) {
531 			written += sz - copied;
532 			if (written)
533 				break;
534 			return -EFAULT;
535 		}
536 		buf += sz;
537 		p += sz;
538 		realp += sz;
539 		count -= sz;
540 		written += sz;
541 	}
542 
543 	*ppos += written;
544 	return written;
545 }
546 
547 
548 /*
549  * This function writes to the *virtual* memory as seen by the kernel.
550  */
551 static ssize_t write_kmem(struct file * file, const char __user * buf,
552 			  size_t count, loff_t *ppos)
553 {
554 	unsigned long p = *ppos;
555 	ssize_t wrote = 0;
556 	ssize_t virtr = 0;
557 	ssize_t written;
558 	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
559 
560 	if (p < (unsigned long) high_memory) {
561 
562 		wrote = count;
563 		if (count > (unsigned long) high_memory - p)
564 			wrote = (unsigned long) high_memory - p;
565 
566 		written = do_write_kmem((void*)p, p, buf, wrote, ppos);
567 		if (written != wrote)
568 			return written;
569 		wrote = written;
570 		p += wrote;
571 		buf += wrote;
572 		count -= wrote;
573 	}
574 
575 	if (count > 0) {
576 		kbuf = (char *)__get_free_page(GFP_KERNEL);
577 		if (!kbuf)
578 			return wrote ? wrote : -ENOMEM;
579 		while (count > 0) {
580 			int len = count;
581 
582 			if (len > PAGE_SIZE)
583 				len = PAGE_SIZE;
584 			if (len) {
585 				written = copy_from_user(kbuf, buf, len);
586 				if (written) {
587 					if (wrote + virtr)
588 						break;
589 					free_page((unsigned long)kbuf);
590 					return -EFAULT;
591 				}
592 			}
593 			len = vwrite(kbuf, (char *)p, len);
594 			count -= len;
595 			buf += len;
596 			virtr += len;
597 			p += len;
598 		}
599 		free_page((unsigned long)kbuf);
600 	}
601 
602  	*ppos = p;
603  	return virtr + wrote;
604 }
605 #endif
606 
607 #ifdef CONFIG_DEVPORT
608 static ssize_t read_port(struct file * file, char __user * buf,
609 			 size_t count, loff_t *ppos)
610 {
611 	unsigned long i = *ppos;
612 	char __user *tmp = buf;
613 
614 	if (!access_ok(VERIFY_WRITE, buf, count))
615 		return -EFAULT;
616 	while (count-- > 0 && i < 65536) {
617 		if (__put_user(inb(i),tmp) < 0)
618 			return -EFAULT;
619 		i++;
620 		tmp++;
621 	}
622 	*ppos = i;
623 	return tmp-buf;
624 }
625 
626 static ssize_t write_port(struct file * file, const char __user * buf,
627 			  size_t count, loff_t *ppos)
628 {
629 	unsigned long i = *ppos;
630 	const char __user * tmp = buf;
631 
632 	if (!access_ok(VERIFY_READ,buf,count))
633 		return -EFAULT;
634 	while (count-- > 0 && i < 65536) {
635 		char c;
636 		if (__get_user(c, tmp)) {
637 			if (tmp > buf)
638 				break;
639 			return -EFAULT;
640 		}
641 		outb(c,i);
642 		i++;
643 		tmp++;
644 	}
645 	*ppos = i;
646 	return tmp-buf;
647 }
648 #endif
649 
650 static ssize_t read_null(struct file * file, char __user * buf,
651 			 size_t count, loff_t *ppos)
652 {
653 	return 0;
654 }
655 
656 static ssize_t write_null(struct file * file, const char __user * buf,
657 			  size_t count, loff_t *ppos)
658 {
659 	return count;
660 }
661 
662 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
663 			struct splice_desc *sd)
664 {
665 	return sd->len;
666 }
667 
668 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
669 				 loff_t *ppos, size_t len, unsigned int flags)
670 {
671 	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
672 }
673 
674 static ssize_t read_zero(struct file * file, char __user * buf,
675 			 size_t count, loff_t *ppos)
676 {
677 	size_t written;
678 
679 	if (!count)
680 		return 0;
681 
682 	if (!access_ok(VERIFY_WRITE, buf, count))
683 		return -EFAULT;
684 
685 	written = 0;
686 	while (count) {
687 		unsigned long unwritten;
688 		size_t chunk = count;
689 
690 		if (chunk > PAGE_SIZE)
691 			chunk = PAGE_SIZE;	/* Just for latency reasons */
692 		unwritten = __clear_user(buf, chunk);
693 		written += chunk - unwritten;
694 		if (unwritten)
695 			break;
696 		if (signal_pending(current))
697 			return written ? written : -ERESTARTSYS;
698 		buf += chunk;
699 		count -= chunk;
700 		cond_resched();
701 	}
702 	return written ? written : -EFAULT;
703 }
704 
705 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
706 {
707 #ifndef CONFIG_MMU
708 	return -ENOSYS;
709 #endif
710 	if (vma->vm_flags & VM_SHARED)
711 		return shmem_zero_setup(vma);
712 	return 0;
713 }
714 
715 static ssize_t write_full(struct file * file, const char __user * buf,
716 			  size_t count, loff_t *ppos)
717 {
718 	return -ENOSPC;
719 }
720 
721 /*
722  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
723  * can fopen() both devices with "a" now.  This was previously impossible.
724  * -- SRB.
725  */
726 
727 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
728 {
729 	return file->f_pos = 0;
730 }
731 
732 /*
733  * The memory devices use the full 32/64 bits of the offset, and so we cannot
734  * check against negative addresses: they are ok. The return value is weird,
735  * though, in that case (0).
736  *
737  * also note that seeking relative to the "end of file" isn't supported:
738  * it has no meaning, so it returns -EINVAL.
739  */
740 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
741 {
742 	loff_t ret;
743 
744 	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
745 	switch (orig) {
746 		case 0:
747 			file->f_pos = offset;
748 			ret = file->f_pos;
749 			force_successful_syscall_return();
750 			break;
751 		case 1:
752 			file->f_pos += offset;
753 			ret = file->f_pos;
754 			force_successful_syscall_return();
755 			break;
756 		default:
757 			ret = -EINVAL;
758 	}
759 	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
760 	return ret;
761 }
762 
763 static int open_port(struct inode * inode, struct file * filp)
764 {
765 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
766 }
767 
768 #define zero_lseek	null_lseek
769 #define full_lseek      null_lseek
770 #define write_zero	write_null
771 #define read_full       read_zero
772 #define open_mem	open_port
773 #define open_kmem	open_mem
774 #define open_oldmem	open_mem
775 
776 static const struct file_operations mem_fops = {
777 	.llseek		= memory_lseek,
778 	.read		= read_mem,
779 	.write		= write_mem,
780 	.mmap		= mmap_mem,
781 	.open		= open_mem,
782 	.get_unmapped_area = get_unmapped_area_mem,
783 };
784 
785 #ifdef CONFIG_DEVKMEM
786 static const struct file_operations kmem_fops = {
787 	.llseek		= memory_lseek,
788 	.read		= read_kmem,
789 	.write		= write_kmem,
790 	.mmap		= mmap_kmem,
791 	.open		= open_kmem,
792 	.get_unmapped_area = get_unmapped_area_mem,
793 };
794 #endif
795 
796 static const struct file_operations null_fops = {
797 	.llseek		= null_lseek,
798 	.read		= read_null,
799 	.write		= write_null,
800 	.splice_write	= splice_write_null,
801 };
802 
803 #ifdef CONFIG_DEVPORT
804 static const struct file_operations port_fops = {
805 	.llseek		= memory_lseek,
806 	.read		= read_port,
807 	.write		= write_port,
808 	.open		= open_port,
809 };
810 #endif
811 
812 static const struct file_operations zero_fops = {
813 	.llseek		= zero_lseek,
814 	.read		= read_zero,
815 	.write		= write_zero,
816 	.mmap		= mmap_zero,
817 };
818 
819 /*
820  * capabilities for /dev/zero
821  * - permits private mappings, "copies" are taken of the source of zeros
822  */
823 static struct backing_dev_info zero_bdi = {
824 	.name		= "char/mem",
825 	.capabilities	= BDI_CAP_MAP_COPY,
826 };
827 
828 static const struct file_operations full_fops = {
829 	.llseek		= full_lseek,
830 	.read		= read_full,
831 	.write		= write_full,
832 };
833 
834 #ifdef CONFIG_CRASH_DUMP
835 static const struct file_operations oldmem_fops = {
836 	.read	= read_oldmem,
837 	.open	= open_oldmem,
838 };
839 #endif
840 
841 static ssize_t kmsg_write(struct file * file, const char __user * buf,
842 			  size_t count, loff_t *ppos)
843 {
844 	char *tmp;
845 	ssize_t ret;
846 
847 	tmp = kmalloc(count + 1, GFP_KERNEL);
848 	if (tmp == NULL)
849 		return -ENOMEM;
850 	ret = -EFAULT;
851 	if (!copy_from_user(tmp, buf, count)) {
852 		tmp[count] = 0;
853 		ret = printk("%s", tmp);
854 		if (ret > count)
855 			/* printk can add a prefix */
856 			ret = count;
857 	}
858 	kfree(tmp);
859 	return ret;
860 }
861 
862 static const struct file_operations kmsg_fops = {
863 	.write =	kmsg_write,
864 };
865 
866 static const struct memdev {
867 	const char *name;
868 	mode_t mode;
869 	const struct file_operations *fops;
870 	struct backing_dev_info *dev_info;
871 } devlist[] = {
872 	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
873 #ifdef CONFIG_DEVKMEM
874 	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
875 #endif
876 	 [3] = { "null", 0666, &null_fops, NULL },
877 #ifdef CONFIG_DEVPORT
878 	 [4] = { "port", 0, &port_fops, NULL },
879 #endif
880 	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
881 	 [7] = { "full", 0666, &full_fops, NULL },
882 	 [8] = { "random", 0666, &random_fops, NULL },
883 	 [9] = { "urandom", 0666, &urandom_fops, NULL },
884 	[11] = { "kmsg", 0, &kmsg_fops, NULL },
885 #ifdef CONFIG_CRASH_DUMP
886 	[12] = { "oldmem", 0, &oldmem_fops, NULL },
887 #endif
888 };
889 
890 static int memory_open(struct inode *inode, struct file *filp)
891 {
892 	int minor;
893 	const struct memdev *dev;
894 
895 	minor = iminor(inode);
896 	if (minor >= ARRAY_SIZE(devlist))
897 		return -ENXIO;
898 
899 	dev = &devlist[minor];
900 	if (!dev->fops)
901 		return -ENXIO;
902 
903 	filp->f_op = dev->fops;
904 	if (dev->dev_info)
905 		filp->f_mapping->backing_dev_info = dev->dev_info;
906 
907 	if (dev->fops->open)
908 		return dev->fops->open(inode, filp);
909 
910 	return 0;
911 }
912 
913 static const struct file_operations memory_fops = {
914 	.open		= memory_open,
915 };
916 
917 static char *mem_devnode(struct device *dev, mode_t *mode)
918 {
919 	if (mode && devlist[MINOR(dev->devt)].mode)
920 		*mode = devlist[MINOR(dev->devt)].mode;
921 	return NULL;
922 }
923 
924 static struct class *mem_class;
925 
926 static int __init chr_dev_init(void)
927 {
928 	int minor;
929 	int err;
930 
931 	err = bdi_init(&zero_bdi);
932 	if (err)
933 		return err;
934 
935 	if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
936 		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
937 
938 	mem_class = class_create(THIS_MODULE, "mem");
939 	mem_class->devnode = mem_devnode;
940 	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
941 		if (!devlist[minor].name)
942 			continue;
943 		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
944 			      NULL, devlist[minor].name);
945 	}
946 
947 	return 0;
948 }
949 
950 fs_initcall(chr_dev_init);
951