1 /* 2 * linux/drivers/char/mem.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * Added devfs support. 7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> 8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/miscdevice.h> 13 #include <linux/slab.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mman.h> 16 #include <linux/random.h> 17 #include <linux/init.h> 18 #include <linux/raw.h> 19 #include <linux/tty.h> 20 #include <linux/capability.h> 21 #include <linux/ptrace.h> 22 #include <linux/device.h> 23 #include <linux/highmem.h> 24 #include <linux/crash_dump.h> 25 #include <linux/backing-dev.h> 26 #include <linux/bootmem.h> 27 #include <linux/splice.h> 28 #include <linux/pfn.h> 29 30 #include <asm/uaccess.h> 31 #include <asm/io.h> 32 33 #ifdef CONFIG_IA64 34 # include <linux/efi.h> 35 #endif 36 37 static inline unsigned long size_inside_page(unsigned long start, 38 unsigned long size) 39 { 40 unsigned long sz; 41 42 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); 43 44 return min(sz, size); 45 } 46 47 /* 48 * Architectures vary in how they handle caching for addresses 49 * outside of main memory. 50 * 51 */ 52 static inline int uncached_access(struct file *file, unsigned long addr) 53 { 54 #if defined(CONFIG_IA64) 55 /* 56 * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases. 57 */ 58 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); 59 #elif defined(CONFIG_MIPS) 60 { 61 extern int __uncached_access(struct file *file, 62 unsigned long addr); 63 64 return __uncached_access(file, addr); 65 } 66 #else 67 /* 68 * Accessing memory above the top the kernel knows about or through a file pointer 69 * that was marked O_DSYNC will be done non-cached. 70 */ 71 if (file->f_flags & O_DSYNC) 72 return 1; 73 return addr >= __pa(high_memory); 74 #endif 75 } 76 77 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 78 static inline int valid_phys_addr_range(unsigned long addr, size_t count) 79 { 80 if (addr + count > __pa(high_memory)) 81 return 0; 82 83 return 1; 84 } 85 86 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 87 { 88 return 1; 89 } 90 #endif 91 92 #ifdef CONFIG_STRICT_DEVMEM 93 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 94 { 95 u64 from = ((u64)pfn) << PAGE_SHIFT; 96 u64 to = from + size; 97 u64 cursor = from; 98 99 while (cursor < to) { 100 if (!devmem_is_allowed(pfn)) { 101 printk(KERN_INFO 102 "Program %s tried to access /dev/mem between %Lx->%Lx.\n", 103 current->comm, from, to); 104 return 0; 105 } 106 cursor += PAGE_SIZE; 107 pfn++; 108 } 109 return 1; 110 } 111 #else 112 static inline int range_is_allowed(unsigned long pfn, unsigned long size) 113 { 114 return 1; 115 } 116 #endif 117 118 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr) 119 { 120 } 121 122 /* 123 * This funcion reads the *physical* memory. The f_pos points directly to the 124 * memory location. 125 */ 126 static ssize_t read_mem(struct file * file, char __user * buf, 127 size_t count, loff_t *ppos) 128 { 129 unsigned long p = *ppos; 130 ssize_t read, sz; 131 char *ptr; 132 133 if (!valid_phys_addr_range(p, count)) 134 return -EFAULT; 135 read = 0; 136 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 137 /* we don't have page 0 mapped on sparc and m68k.. */ 138 if (p < PAGE_SIZE) { 139 sz = size_inside_page(p, count); 140 if (sz > 0) { 141 if (clear_user(buf, sz)) 142 return -EFAULT; 143 buf += sz; 144 p += sz; 145 count -= sz; 146 read += sz; 147 } 148 } 149 #endif 150 151 while (count > 0) { 152 unsigned long remaining; 153 154 sz = size_inside_page(p, count); 155 156 if (!range_is_allowed(p >> PAGE_SHIFT, count)) 157 return -EPERM; 158 159 /* 160 * On ia64 if a page has been mapped somewhere as 161 * uncached, then it must also be accessed uncached 162 * by the kernel or data corruption may occur 163 */ 164 ptr = xlate_dev_mem_ptr(p); 165 if (!ptr) 166 return -EFAULT; 167 168 remaining = copy_to_user(buf, ptr, sz); 169 unxlate_dev_mem_ptr(p, ptr); 170 if (remaining) 171 return -EFAULT; 172 173 buf += sz; 174 p += sz; 175 count -= sz; 176 read += sz; 177 } 178 179 *ppos += read; 180 return read; 181 } 182 183 static ssize_t write_mem(struct file * file, const char __user * buf, 184 size_t count, loff_t *ppos) 185 { 186 unsigned long p = *ppos; 187 ssize_t written, sz; 188 unsigned long copied; 189 void *ptr; 190 191 if (!valid_phys_addr_range(p, count)) 192 return -EFAULT; 193 194 written = 0; 195 196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 197 /* we don't have page 0 mapped on sparc and m68k.. */ 198 if (p < PAGE_SIZE) { 199 sz = size_inside_page(p, count); 200 /* Hmm. Do something? */ 201 buf += sz; 202 p += sz; 203 count -= sz; 204 written += sz; 205 } 206 #endif 207 208 while (count > 0) { 209 sz = size_inside_page(p, count); 210 211 if (!range_is_allowed(p >> PAGE_SHIFT, sz)) 212 return -EPERM; 213 214 /* 215 * On ia64 if a page has been mapped somewhere as 216 * uncached, then it must also be accessed uncached 217 * by the kernel or data corruption may occur 218 */ 219 ptr = xlate_dev_mem_ptr(p); 220 if (!ptr) { 221 if (written) 222 break; 223 return -EFAULT; 224 } 225 226 copied = copy_from_user(ptr, buf, sz); 227 unxlate_dev_mem_ptr(p, ptr); 228 if (copied) { 229 written += sz - copied; 230 if (written) 231 break; 232 return -EFAULT; 233 } 234 235 buf += sz; 236 p += sz; 237 count -= sz; 238 written += sz; 239 } 240 241 *ppos += written; 242 return written; 243 } 244 245 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file, 246 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) 247 { 248 return 1; 249 } 250 251 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT 252 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 253 unsigned long size, pgprot_t vma_prot) 254 { 255 #ifdef pgprot_noncached 256 unsigned long offset = pfn << PAGE_SHIFT; 257 258 if (uncached_access(file, offset)) 259 return pgprot_noncached(vma_prot); 260 #endif 261 return vma_prot; 262 } 263 #endif 264 265 #ifndef CONFIG_MMU 266 static unsigned long get_unmapped_area_mem(struct file *file, 267 unsigned long addr, 268 unsigned long len, 269 unsigned long pgoff, 270 unsigned long flags) 271 { 272 if (!valid_mmap_phys_addr_range(pgoff, len)) 273 return (unsigned long) -EINVAL; 274 return pgoff << PAGE_SHIFT; 275 } 276 277 /* can't do an in-place private mapping if there's no MMU */ 278 static inline int private_mapping_ok(struct vm_area_struct *vma) 279 { 280 return vma->vm_flags & VM_MAYSHARE; 281 } 282 #else 283 #define get_unmapped_area_mem NULL 284 285 static inline int private_mapping_ok(struct vm_area_struct *vma) 286 { 287 return 1; 288 } 289 #endif 290 291 static const struct vm_operations_struct mmap_mem_ops = { 292 #ifdef CONFIG_HAVE_IOREMAP_PROT 293 .access = generic_access_phys 294 #endif 295 }; 296 297 static int mmap_mem(struct file * file, struct vm_area_struct * vma) 298 { 299 size_t size = vma->vm_end - vma->vm_start; 300 301 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 302 return -EINVAL; 303 304 if (!private_mapping_ok(vma)) 305 return -ENOSYS; 306 307 if (!range_is_allowed(vma->vm_pgoff, size)) 308 return -EPERM; 309 310 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, 311 &vma->vm_page_prot)) 312 return -EINVAL; 313 314 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 315 size, 316 vma->vm_page_prot); 317 318 vma->vm_ops = &mmap_mem_ops; 319 320 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 321 if (remap_pfn_range(vma, 322 vma->vm_start, 323 vma->vm_pgoff, 324 size, 325 vma->vm_page_prot)) { 326 return -EAGAIN; 327 } 328 return 0; 329 } 330 331 #ifdef CONFIG_DEVKMEM 332 static int mmap_kmem(struct file * file, struct vm_area_struct * vma) 333 { 334 unsigned long pfn; 335 336 /* Turn a kernel-virtual address into a physical page frame */ 337 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; 338 339 /* 340 * RED-PEN: on some architectures there is more mapped memory 341 * than available in mem_map which pfn_valid checks 342 * for. Perhaps should add a new macro here. 343 * 344 * RED-PEN: vmalloc is not supported right now. 345 */ 346 if (!pfn_valid(pfn)) 347 return -EIO; 348 349 vma->vm_pgoff = pfn; 350 return mmap_mem(file, vma); 351 } 352 #endif 353 354 #ifdef CONFIG_CRASH_DUMP 355 /* 356 * Read memory corresponding to the old kernel. 357 */ 358 static ssize_t read_oldmem(struct file *file, char __user *buf, 359 size_t count, loff_t *ppos) 360 { 361 unsigned long pfn, offset; 362 size_t read = 0, csize; 363 int rc = 0; 364 365 while (count) { 366 pfn = *ppos / PAGE_SIZE; 367 if (pfn > saved_max_pfn) 368 return read; 369 370 offset = (unsigned long)(*ppos % PAGE_SIZE); 371 if (count > PAGE_SIZE - offset) 372 csize = PAGE_SIZE - offset; 373 else 374 csize = count; 375 376 rc = copy_oldmem_page(pfn, buf, csize, offset, 1); 377 if (rc < 0) 378 return rc; 379 buf += csize; 380 *ppos += csize; 381 read += csize; 382 count -= csize; 383 } 384 return read; 385 } 386 #endif 387 388 #ifdef CONFIG_DEVKMEM 389 /* 390 * This function reads the *virtual* memory as seen by the kernel. 391 */ 392 static ssize_t read_kmem(struct file *file, char __user *buf, 393 size_t count, loff_t *ppos) 394 { 395 unsigned long p = *ppos; 396 ssize_t low_count, read, sz; 397 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 398 int err = 0; 399 400 read = 0; 401 if (p < (unsigned long) high_memory) { 402 low_count = count; 403 if (count > (unsigned long) high_memory - p) 404 low_count = (unsigned long) high_memory - p; 405 406 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 407 /* we don't have page 0 mapped on sparc and m68k.. */ 408 if (p < PAGE_SIZE && low_count > 0) { 409 sz = size_inside_page(p, low_count); 410 if (clear_user(buf, sz)) 411 return -EFAULT; 412 buf += sz; 413 p += sz; 414 read += sz; 415 low_count -= sz; 416 count -= sz; 417 } 418 #endif 419 while (low_count > 0) { 420 sz = size_inside_page(p, low_count); 421 422 /* 423 * On ia64 if a page has been mapped somewhere as 424 * uncached, then it must also be accessed uncached 425 * by the kernel or data corruption may occur 426 */ 427 kbuf = xlate_dev_kmem_ptr((char *)p); 428 429 if (copy_to_user(buf, kbuf, sz)) 430 return -EFAULT; 431 buf += sz; 432 p += sz; 433 read += sz; 434 low_count -= sz; 435 count -= sz; 436 } 437 } 438 439 if (count > 0) { 440 kbuf = (char *)__get_free_page(GFP_KERNEL); 441 if (!kbuf) 442 return -ENOMEM; 443 while (count > 0) { 444 sz = size_inside_page(p, count); 445 if (!is_vmalloc_or_module_addr((void *)p)) { 446 err = -ENXIO; 447 break; 448 } 449 sz = vread(kbuf, (char *)p, sz); 450 if (!sz) 451 break; 452 if (copy_to_user(buf, kbuf, sz)) { 453 err = -EFAULT; 454 break; 455 } 456 count -= sz; 457 buf += sz; 458 read += sz; 459 p += sz; 460 } 461 free_page((unsigned long)kbuf); 462 } 463 *ppos = p; 464 return read ? read : err; 465 } 466 467 468 static inline ssize_t 469 do_write_kmem(unsigned long p, const char __user *buf, 470 size_t count, loff_t *ppos) 471 { 472 ssize_t written, sz; 473 unsigned long copied; 474 475 written = 0; 476 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 477 /* we don't have page 0 mapped on sparc and m68k.. */ 478 if (p < PAGE_SIZE) { 479 sz = size_inside_page(p, count); 480 /* Hmm. Do something? */ 481 buf += sz; 482 p += sz; 483 count -= sz; 484 written += sz; 485 } 486 #endif 487 488 while (count > 0) { 489 char *ptr; 490 491 sz = size_inside_page(p, count); 492 493 /* 494 * On ia64 if a page has been mapped somewhere as 495 * uncached, then it must also be accessed uncached 496 * by the kernel or data corruption may occur 497 */ 498 ptr = xlate_dev_kmem_ptr((char *)p); 499 500 copied = copy_from_user(ptr, buf, sz); 501 if (copied) { 502 written += sz - copied; 503 if (written) 504 break; 505 return -EFAULT; 506 } 507 buf += sz; 508 p += sz; 509 count -= sz; 510 written += sz; 511 } 512 513 *ppos += written; 514 return written; 515 } 516 517 518 /* 519 * This function writes to the *virtual* memory as seen by the kernel. 520 */ 521 static ssize_t write_kmem(struct file * file, const char __user * buf, 522 size_t count, loff_t *ppos) 523 { 524 unsigned long p = *ppos; 525 ssize_t wrote = 0; 526 ssize_t virtr = 0; 527 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 528 int err = 0; 529 530 if (p < (unsigned long) high_memory) { 531 unsigned long to_write = min_t(unsigned long, count, 532 (unsigned long)high_memory - p); 533 wrote = do_write_kmem(p, buf, to_write, ppos); 534 if (wrote != to_write) 535 return wrote; 536 p += wrote; 537 buf += wrote; 538 count -= wrote; 539 } 540 541 if (count > 0) { 542 kbuf = (char *)__get_free_page(GFP_KERNEL); 543 if (!kbuf) 544 return wrote ? wrote : -ENOMEM; 545 while (count > 0) { 546 unsigned long sz = size_inside_page(p, count); 547 unsigned long n; 548 549 if (!is_vmalloc_or_module_addr((void *)p)) { 550 err = -ENXIO; 551 break; 552 } 553 n = copy_from_user(kbuf, buf, sz); 554 if (n) { 555 err = -EFAULT; 556 break; 557 } 558 vwrite(kbuf, (char *)p, sz); 559 count -= sz; 560 buf += sz; 561 virtr += sz; 562 p += sz; 563 } 564 free_page((unsigned long)kbuf); 565 } 566 567 *ppos = p; 568 return virtr + wrote ? : err; 569 } 570 #endif 571 572 #ifdef CONFIG_DEVPORT 573 static ssize_t read_port(struct file * file, char __user * buf, 574 size_t count, loff_t *ppos) 575 { 576 unsigned long i = *ppos; 577 char __user *tmp = buf; 578 579 if (!access_ok(VERIFY_WRITE, buf, count)) 580 return -EFAULT; 581 while (count-- > 0 && i < 65536) { 582 if (__put_user(inb(i),tmp) < 0) 583 return -EFAULT; 584 i++; 585 tmp++; 586 } 587 *ppos = i; 588 return tmp-buf; 589 } 590 591 static ssize_t write_port(struct file * file, const char __user * buf, 592 size_t count, loff_t *ppos) 593 { 594 unsigned long i = *ppos; 595 const char __user * tmp = buf; 596 597 if (!access_ok(VERIFY_READ,buf,count)) 598 return -EFAULT; 599 while (count-- > 0 && i < 65536) { 600 char c; 601 if (__get_user(c, tmp)) { 602 if (tmp > buf) 603 break; 604 return -EFAULT; 605 } 606 outb(c,i); 607 i++; 608 tmp++; 609 } 610 *ppos = i; 611 return tmp-buf; 612 } 613 #endif 614 615 static ssize_t read_null(struct file * file, char __user * buf, 616 size_t count, loff_t *ppos) 617 { 618 return 0; 619 } 620 621 static ssize_t write_null(struct file * file, const char __user * buf, 622 size_t count, loff_t *ppos) 623 { 624 return count; 625 } 626 627 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, 628 struct splice_desc *sd) 629 { 630 return sd->len; 631 } 632 633 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out, 634 loff_t *ppos, size_t len, unsigned int flags) 635 { 636 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 637 } 638 639 static ssize_t read_zero(struct file * file, char __user * buf, 640 size_t count, loff_t *ppos) 641 { 642 size_t written; 643 644 if (!count) 645 return 0; 646 647 if (!access_ok(VERIFY_WRITE, buf, count)) 648 return -EFAULT; 649 650 written = 0; 651 while (count) { 652 unsigned long unwritten; 653 size_t chunk = count; 654 655 if (chunk > PAGE_SIZE) 656 chunk = PAGE_SIZE; /* Just for latency reasons */ 657 unwritten = __clear_user(buf, chunk); 658 written += chunk - unwritten; 659 if (unwritten) 660 break; 661 if (signal_pending(current)) 662 return written ? written : -ERESTARTSYS; 663 buf += chunk; 664 count -= chunk; 665 cond_resched(); 666 } 667 return written ? written : -EFAULT; 668 } 669 670 static int mmap_zero(struct file * file, struct vm_area_struct * vma) 671 { 672 #ifndef CONFIG_MMU 673 return -ENOSYS; 674 #endif 675 if (vma->vm_flags & VM_SHARED) 676 return shmem_zero_setup(vma); 677 return 0; 678 } 679 680 static ssize_t write_full(struct file * file, const char __user * buf, 681 size_t count, loff_t *ppos) 682 { 683 return -ENOSPC; 684 } 685 686 /* 687 * Special lseek() function for /dev/null and /dev/zero. Most notably, you 688 * can fopen() both devices with "a" now. This was previously impossible. 689 * -- SRB. 690 */ 691 692 static loff_t null_lseek(struct file * file, loff_t offset, int orig) 693 { 694 return file->f_pos = 0; 695 } 696 697 /* 698 * The memory devices use the full 32/64 bits of the offset, and so we cannot 699 * check against negative addresses: they are ok. The return value is weird, 700 * though, in that case (0). 701 * 702 * also note that seeking relative to the "end of file" isn't supported: 703 * it has no meaning, so it returns -EINVAL. 704 */ 705 static loff_t memory_lseek(struct file * file, loff_t offset, int orig) 706 { 707 loff_t ret; 708 709 mutex_lock(&file->f_path.dentry->d_inode->i_mutex); 710 switch (orig) { 711 case 0: 712 file->f_pos = offset; 713 ret = file->f_pos; 714 force_successful_syscall_return(); 715 break; 716 case 1: 717 file->f_pos += offset; 718 ret = file->f_pos; 719 force_successful_syscall_return(); 720 break; 721 default: 722 ret = -EINVAL; 723 } 724 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); 725 return ret; 726 } 727 728 static int open_port(struct inode * inode, struct file * filp) 729 { 730 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 731 } 732 733 #define zero_lseek null_lseek 734 #define full_lseek null_lseek 735 #define write_zero write_null 736 #define read_full read_zero 737 #define open_mem open_port 738 #define open_kmem open_mem 739 #define open_oldmem open_mem 740 741 static const struct file_operations mem_fops = { 742 .llseek = memory_lseek, 743 .read = read_mem, 744 .write = write_mem, 745 .mmap = mmap_mem, 746 .open = open_mem, 747 .get_unmapped_area = get_unmapped_area_mem, 748 }; 749 750 #ifdef CONFIG_DEVKMEM 751 static const struct file_operations kmem_fops = { 752 .llseek = memory_lseek, 753 .read = read_kmem, 754 .write = write_kmem, 755 .mmap = mmap_kmem, 756 .open = open_kmem, 757 .get_unmapped_area = get_unmapped_area_mem, 758 }; 759 #endif 760 761 static const struct file_operations null_fops = { 762 .llseek = null_lseek, 763 .read = read_null, 764 .write = write_null, 765 .splice_write = splice_write_null, 766 }; 767 768 #ifdef CONFIG_DEVPORT 769 static const struct file_operations port_fops = { 770 .llseek = memory_lseek, 771 .read = read_port, 772 .write = write_port, 773 .open = open_port, 774 }; 775 #endif 776 777 static const struct file_operations zero_fops = { 778 .llseek = zero_lseek, 779 .read = read_zero, 780 .write = write_zero, 781 .mmap = mmap_zero, 782 }; 783 784 /* 785 * capabilities for /dev/zero 786 * - permits private mappings, "copies" are taken of the source of zeros 787 */ 788 static struct backing_dev_info zero_bdi = { 789 .name = "char/mem", 790 .capabilities = BDI_CAP_MAP_COPY, 791 }; 792 793 static const struct file_operations full_fops = { 794 .llseek = full_lseek, 795 .read = read_full, 796 .write = write_full, 797 }; 798 799 #ifdef CONFIG_CRASH_DUMP 800 static const struct file_operations oldmem_fops = { 801 .read = read_oldmem, 802 .open = open_oldmem, 803 }; 804 #endif 805 806 static ssize_t kmsg_write(struct file * file, const char __user * buf, 807 size_t count, loff_t *ppos) 808 { 809 char *tmp; 810 ssize_t ret; 811 812 tmp = kmalloc(count + 1, GFP_KERNEL); 813 if (tmp == NULL) 814 return -ENOMEM; 815 ret = -EFAULT; 816 if (!copy_from_user(tmp, buf, count)) { 817 tmp[count] = 0; 818 ret = printk("%s", tmp); 819 if (ret > count) 820 /* printk can add a prefix */ 821 ret = count; 822 } 823 kfree(tmp); 824 return ret; 825 } 826 827 static const struct file_operations kmsg_fops = { 828 .write = kmsg_write, 829 }; 830 831 static const struct memdev { 832 const char *name; 833 mode_t mode; 834 const struct file_operations *fops; 835 struct backing_dev_info *dev_info; 836 } devlist[] = { 837 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, 838 #ifdef CONFIG_DEVKMEM 839 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, 840 #endif 841 [3] = { "null", 0666, &null_fops, NULL }, 842 #ifdef CONFIG_DEVPORT 843 [4] = { "port", 0, &port_fops, NULL }, 844 #endif 845 [5] = { "zero", 0666, &zero_fops, &zero_bdi }, 846 [7] = { "full", 0666, &full_fops, NULL }, 847 [8] = { "random", 0666, &random_fops, NULL }, 848 [9] = { "urandom", 0666, &urandom_fops, NULL }, 849 [11] = { "kmsg", 0, &kmsg_fops, NULL }, 850 #ifdef CONFIG_CRASH_DUMP 851 [12] = { "oldmem", 0, &oldmem_fops, NULL }, 852 #endif 853 }; 854 855 static int memory_open(struct inode *inode, struct file *filp) 856 { 857 int minor; 858 const struct memdev *dev; 859 860 minor = iminor(inode); 861 if (minor >= ARRAY_SIZE(devlist)) 862 return -ENXIO; 863 864 dev = &devlist[minor]; 865 if (!dev->fops) 866 return -ENXIO; 867 868 filp->f_op = dev->fops; 869 if (dev->dev_info) 870 filp->f_mapping->backing_dev_info = dev->dev_info; 871 872 if (dev->fops->open) 873 return dev->fops->open(inode, filp); 874 875 return 0; 876 } 877 878 static const struct file_operations memory_fops = { 879 .open = memory_open, 880 }; 881 882 static char *mem_devnode(struct device *dev, mode_t *mode) 883 { 884 if (mode && devlist[MINOR(dev->devt)].mode) 885 *mode = devlist[MINOR(dev->devt)].mode; 886 return NULL; 887 } 888 889 static struct class *mem_class; 890 891 static int __init chr_dev_init(void) 892 { 893 int minor; 894 int err; 895 896 err = bdi_init(&zero_bdi); 897 if (err) 898 return err; 899 900 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) 901 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 902 903 mem_class = class_create(THIS_MODULE, "mem"); 904 mem_class->devnode = mem_devnode; 905 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { 906 if (!devlist[minor].name) 907 continue; 908 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), 909 NULL, devlist[minor].name); 910 } 911 912 return 0; 913 } 914 915 fs_initcall(chr_dev_init); 916