1 /* 2 * linux/drivers/char/mem.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * Added devfs support. 7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> 8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> 9 */ 10 11 #include <linux/config.h> 12 #include <linux/mm.h> 13 #include <linux/miscdevice.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 #include <linux/mman.h> 17 #include <linux/random.h> 18 #include <linux/init.h> 19 #include <linux/raw.h> 20 #include <linux/tty.h> 21 #include <linux/capability.h> 22 #include <linux/smp_lock.h> 23 #include <linux/devfs_fs_kernel.h> 24 #include <linux/ptrace.h> 25 #include <linux/device.h> 26 #include <linux/highmem.h> 27 #include <linux/crash_dump.h> 28 #include <linux/backing-dev.h> 29 #include <linux/bootmem.h> 30 31 #include <asm/uaccess.h> 32 #include <asm/io.h> 33 34 #ifdef CONFIG_IA64 35 # include <linux/efi.h> 36 #endif 37 38 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR) 39 extern void tapechar_init(void); 40 #endif 41 42 /* 43 * Architectures vary in how they handle caching for addresses 44 * outside of main memory. 45 * 46 */ 47 static inline int uncached_access(struct file *file, unsigned long addr) 48 { 49 #if defined(__i386__) 50 /* 51 * On the PPro and successors, the MTRRs are used to set 52 * memory types for physical addresses outside main memory, 53 * so blindly setting PCD or PWT on those pages is wrong. 54 * For Pentiums and earlier, the surround logic should disable 55 * caching for the high addresses through the KEN pin, but 56 * we maintain the tradition of paranoia in this code. 57 */ 58 if (file->f_flags & O_SYNC) 59 return 1; 60 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || 61 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || 62 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || 63 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) 64 && addr >= __pa(high_memory); 65 #elif defined(__x86_64__) 66 /* 67 * This is broken because it can generate memory type aliases, 68 * which can cause cache corruptions 69 * But it is only available for root and we have to be bug-to-bug 70 * compatible with i386. 71 */ 72 if (file->f_flags & O_SYNC) 73 return 1; 74 /* same behaviour as i386. PAT always set to cached and MTRRs control the 75 caching behaviour. 76 Hopefully a full PAT implementation will fix that soon. */ 77 return 0; 78 #elif defined(CONFIG_IA64) 79 /* 80 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. 81 */ 82 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); 83 #else 84 /* 85 * Accessing memory above the top the kernel knows about or through a file pointer 86 * that was marked O_SYNC will be done non-cached. 87 */ 88 if (file->f_flags & O_SYNC) 89 return 1; 90 return addr >= __pa(high_memory); 91 #endif 92 } 93 94 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 95 static inline int valid_phys_addr_range(unsigned long addr, size_t *count) 96 { 97 unsigned long end_mem; 98 99 end_mem = __pa(high_memory); 100 if (addr >= end_mem) 101 return 0; 102 103 if (*count > end_mem - addr) 104 *count = end_mem - addr; 105 106 return 1; 107 } 108 #endif 109 110 /* 111 * This funcion reads the *physical* memory. The f_pos points directly to the 112 * memory location. 113 */ 114 static ssize_t read_mem(struct file * file, char __user * buf, 115 size_t count, loff_t *ppos) 116 { 117 unsigned long p = *ppos; 118 ssize_t read, sz; 119 char *ptr; 120 121 if (!valid_phys_addr_range(p, &count)) 122 return -EFAULT; 123 read = 0; 124 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 125 /* we don't have page 0 mapped on sparc and m68k.. */ 126 if (p < PAGE_SIZE) { 127 sz = PAGE_SIZE - p; 128 if (sz > count) 129 sz = count; 130 if (sz > 0) { 131 if (clear_user(buf, sz)) 132 return -EFAULT; 133 buf += sz; 134 p += sz; 135 count -= sz; 136 read += sz; 137 } 138 } 139 #endif 140 141 while (count > 0) { 142 /* 143 * Handle first page in case it's not aligned 144 */ 145 if (-p & (PAGE_SIZE - 1)) 146 sz = -p & (PAGE_SIZE - 1); 147 else 148 sz = PAGE_SIZE; 149 150 sz = min_t(unsigned long, sz, count); 151 152 /* 153 * On ia64 if a page has been mapped somewhere as 154 * uncached, then it must also be accessed uncached 155 * by the kernel or data corruption may occur 156 */ 157 ptr = xlate_dev_mem_ptr(p); 158 159 if (copy_to_user(buf, ptr, sz)) 160 return -EFAULT; 161 buf += sz; 162 p += sz; 163 count -= sz; 164 read += sz; 165 } 166 167 *ppos += read; 168 return read; 169 } 170 171 static ssize_t write_mem(struct file * file, const char __user * buf, 172 size_t count, loff_t *ppos) 173 { 174 unsigned long p = *ppos; 175 ssize_t written, sz; 176 unsigned long copied; 177 void *ptr; 178 179 if (!valid_phys_addr_range(p, &count)) 180 return -EFAULT; 181 182 written = 0; 183 184 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 185 /* we don't have page 0 mapped on sparc and m68k.. */ 186 if (p < PAGE_SIZE) { 187 unsigned long sz = PAGE_SIZE - p; 188 if (sz > count) 189 sz = count; 190 /* Hmm. Do something? */ 191 buf += sz; 192 p += sz; 193 count -= sz; 194 written += sz; 195 } 196 #endif 197 198 while (count > 0) { 199 /* 200 * Handle first page in case it's not aligned 201 */ 202 if (-p & (PAGE_SIZE - 1)) 203 sz = -p & (PAGE_SIZE - 1); 204 else 205 sz = PAGE_SIZE; 206 207 sz = min_t(unsigned long, sz, count); 208 209 /* 210 * On ia64 if a page has been mapped somewhere as 211 * uncached, then it must also be accessed uncached 212 * by the kernel or data corruption may occur 213 */ 214 ptr = xlate_dev_mem_ptr(p); 215 216 copied = copy_from_user(ptr, buf, sz); 217 if (copied) { 218 ssize_t ret; 219 220 ret = written + (sz - copied); 221 if (ret) 222 return ret; 223 return -EFAULT; 224 } 225 buf += sz; 226 p += sz; 227 count -= sz; 228 written += sz; 229 } 230 231 *ppos += written; 232 return written; 233 } 234 235 static int mmap_mem(struct file * file, struct vm_area_struct * vma) 236 { 237 #if defined(__HAVE_PHYS_MEM_ACCESS_PROT) 238 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 239 240 vma->vm_page_prot = phys_mem_access_prot(file, offset, 241 vma->vm_end - vma->vm_start, 242 vma->vm_page_prot); 243 #elif defined(pgprot_noncached) 244 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 245 int uncached; 246 247 uncached = uncached_access(file, offset); 248 if (uncached) 249 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 250 #endif 251 252 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 253 if (remap_pfn_range(vma, 254 vma->vm_start, 255 vma->vm_pgoff, 256 vma->vm_end-vma->vm_start, 257 vma->vm_page_prot)) 258 return -EAGAIN; 259 return 0; 260 } 261 262 static int mmap_kmem(struct file * file, struct vm_area_struct * vma) 263 { 264 unsigned long pfn; 265 266 /* Turn a kernel-virtual address into a physical page frame */ 267 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; 268 269 /* 270 * RED-PEN: on some architectures there is more mapped memory 271 * than available in mem_map which pfn_valid checks 272 * for. Perhaps should add a new macro here. 273 * 274 * RED-PEN: vmalloc is not supported right now. 275 */ 276 if (!pfn_valid(pfn)) 277 return -EIO; 278 279 vma->vm_pgoff = pfn; 280 return mmap_mem(file, vma); 281 } 282 283 #ifdef CONFIG_CRASH_DUMP 284 /* 285 * Read memory corresponding to the old kernel. 286 */ 287 static ssize_t read_oldmem(struct file *file, char __user *buf, 288 size_t count, loff_t *ppos) 289 { 290 unsigned long pfn, offset; 291 size_t read = 0, csize; 292 int rc = 0; 293 294 while (count) { 295 pfn = *ppos / PAGE_SIZE; 296 if (pfn > saved_max_pfn) 297 return read; 298 299 offset = (unsigned long)(*ppos % PAGE_SIZE); 300 if (count > PAGE_SIZE - offset) 301 csize = PAGE_SIZE - offset; 302 else 303 csize = count; 304 305 rc = copy_oldmem_page(pfn, buf, csize, offset, 1); 306 if (rc < 0) 307 return rc; 308 buf += csize; 309 *ppos += csize; 310 read += csize; 311 count -= csize; 312 } 313 return read; 314 } 315 #endif 316 317 extern long vread(char *buf, char *addr, unsigned long count); 318 extern long vwrite(char *buf, char *addr, unsigned long count); 319 320 /* 321 * This function reads the *virtual* memory as seen by the kernel. 322 */ 323 static ssize_t read_kmem(struct file *file, char __user *buf, 324 size_t count, loff_t *ppos) 325 { 326 unsigned long p = *ppos; 327 ssize_t low_count, read, sz; 328 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 329 330 read = 0; 331 if (p < (unsigned long) high_memory) { 332 low_count = count; 333 if (count > (unsigned long) high_memory - p) 334 low_count = (unsigned long) high_memory - p; 335 336 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 337 /* we don't have page 0 mapped on sparc and m68k.. */ 338 if (p < PAGE_SIZE && low_count > 0) { 339 size_t tmp = PAGE_SIZE - p; 340 if (tmp > low_count) tmp = low_count; 341 if (clear_user(buf, tmp)) 342 return -EFAULT; 343 buf += tmp; 344 p += tmp; 345 read += tmp; 346 low_count -= tmp; 347 count -= tmp; 348 } 349 #endif 350 while (low_count > 0) { 351 /* 352 * Handle first page in case it's not aligned 353 */ 354 if (-p & (PAGE_SIZE - 1)) 355 sz = -p & (PAGE_SIZE - 1); 356 else 357 sz = PAGE_SIZE; 358 359 sz = min_t(unsigned long, sz, low_count); 360 361 /* 362 * On ia64 if a page has been mapped somewhere as 363 * uncached, then it must also be accessed uncached 364 * by the kernel or data corruption may occur 365 */ 366 kbuf = xlate_dev_kmem_ptr((char *)p); 367 368 if (copy_to_user(buf, kbuf, sz)) 369 return -EFAULT; 370 buf += sz; 371 p += sz; 372 read += sz; 373 low_count -= sz; 374 count -= sz; 375 } 376 } 377 378 if (count > 0) { 379 kbuf = (char *)__get_free_page(GFP_KERNEL); 380 if (!kbuf) 381 return -ENOMEM; 382 while (count > 0) { 383 int len = count; 384 385 if (len > PAGE_SIZE) 386 len = PAGE_SIZE; 387 len = vread(kbuf, (char *)p, len); 388 if (!len) 389 break; 390 if (copy_to_user(buf, kbuf, len)) { 391 free_page((unsigned long)kbuf); 392 return -EFAULT; 393 } 394 count -= len; 395 buf += len; 396 read += len; 397 p += len; 398 } 399 free_page((unsigned long)kbuf); 400 } 401 *ppos = p; 402 return read; 403 } 404 405 406 static inline ssize_t 407 do_write_kmem(void *p, unsigned long realp, const char __user * buf, 408 size_t count, loff_t *ppos) 409 { 410 ssize_t written, sz; 411 unsigned long copied; 412 413 written = 0; 414 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 415 /* we don't have page 0 mapped on sparc and m68k.. */ 416 if (realp < PAGE_SIZE) { 417 unsigned long sz = PAGE_SIZE - realp; 418 if (sz > count) 419 sz = count; 420 /* Hmm. Do something? */ 421 buf += sz; 422 p += sz; 423 realp += sz; 424 count -= sz; 425 written += sz; 426 } 427 #endif 428 429 while (count > 0) { 430 char *ptr; 431 /* 432 * Handle first page in case it's not aligned 433 */ 434 if (-realp & (PAGE_SIZE - 1)) 435 sz = -realp & (PAGE_SIZE - 1); 436 else 437 sz = PAGE_SIZE; 438 439 sz = min_t(unsigned long, sz, count); 440 441 /* 442 * On ia64 if a page has been mapped somewhere as 443 * uncached, then it must also be accessed uncached 444 * by the kernel or data corruption may occur 445 */ 446 ptr = xlate_dev_kmem_ptr(p); 447 448 copied = copy_from_user(ptr, buf, sz); 449 if (copied) { 450 ssize_t ret; 451 452 ret = written + (sz - copied); 453 if (ret) 454 return ret; 455 return -EFAULT; 456 } 457 buf += sz; 458 p += sz; 459 realp += sz; 460 count -= sz; 461 written += sz; 462 } 463 464 *ppos += written; 465 return written; 466 } 467 468 469 /* 470 * This function writes to the *virtual* memory as seen by the kernel. 471 */ 472 static ssize_t write_kmem(struct file * file, const char __user * buf, 473 size_t count, loff_t *ppos) 474 { 475 unsigned long p = *ppos; 476 ssize_t wrote = 0; 477 ssize_t virtr = 0; 478 ssize_t written; 479 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 480 481 if (p < (unsigned long) high_memory) { 482 483 wrote = count; 484 if (count > (unsigned long) high_memory - p) 485 wrote = (unsigned long) high_memory - p; 486 487 written = do_write_kmem((void*)p, p, buf, wrote, ppos); 488 if (written != wrote) 489 return written; 490 wrote = written; 491 p += wrote; 492 buf += wrote; 493 count -= wrote; 494 } 495 496 if (count > 0) { 497 kbuf = (char *)__get_free_page(GFP_KERNEL); 498 if (!kbuf) 499 return wrote ? wrote : -ENOMEM; 500 while (count > 0) { 501 int len = count; 502 503 if (len > PAGE_SIZE) 504 len = PAGE_SIZE; 505 if (len) { 506 written = copy_from_user(kbuf, buf, len); 507 if (written) { 508 ssize_t ret; 509 510 free_page((unsigned long)kbuf); 511 ret = wrote + virtr + (len - written); 512 return ret ? ret : -EFAULT; 513 } 514 } 515 len = vwrite(kbuf, (char *)p, len); 516 count -= len; 517 buf += len; 518 virtr += len; 519 p += len; 520 } 521 free_page((unsigned long)kbuf); 522 } 523 524 *ppos = p; 525 return virtr + wrote; 526 } 527 528 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 529 static ssize_t read_port(struct file * file, char __user * buf, 530 size_t count, loff_t *ppos) 531 { 532 unsigned long i = *ppos; 533 char __user *tmp = buf; 534 535 if (!access_ok(VERIFY_WRITE, buf, count)) 536 return -EFAULT; 537 while (count-- > 0 && i < 65536) { 538 if (__put_user(inb(i),tmp) < 0) 539 return -EFAULT; 540 i++; 541 tmp++; 542 } 543 *ppos = i; 544 return tmp-buf; 545 } 546 547 static ssize_t write_port(struct file * file, const char __user * buf, 548 size_t count, loff_t *ppos) 549 { 550 unsigned long i = *ppos; 551 const char __user * tmp = buf; 552 553 if (!access_ok(VERIFY_READ,buf,count)) 554 return -EFAULT; 555 while (count-- > 0 && i < 65536) { 556 char c; 557 if (__get_user(c, tmp)) 558 return -EFAULT; 559 outb(c,i); 560 i++; 561 tmp++; 562 } 563 *ppos = i; 564 return tmp-buf; 565 } 566 #endif 567 568 static ssize_t read_null(struct file * file, char __user * buf, 569 size_t count, loff_t *ppos) 570 { 571 return 0; 572 } 573 574 static ssize_t write_null(struct file * file, const char __user * buf, 575 size_t count, loff_t *ppos) 576 { 577 return count; 578 } 579 580 #ifdef CONFIG_MMU 581 /* 582 * For fun, we are using the MMU for this. 583 */ 584 static inline size_t read_zero_pagealigned(char __user * buf, size_t size) 585 { 586 struct mm_struct *mm; 587 struct vm_area_struct * vma; 588 unsigned long addr=(unsigned long)buf; 589 590 mm = current->mm; 591 /* Oops, this was forgotten before. -ben */ 592 down_read(&mm->mmap_sem); 593 594 /* For private mappings, just map in zero pages. */ 595 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { 596 unsigned long count; 597 598 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) 599 goto out_up; 600 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) 601 break; 602 count = vma->vm_end - addr; 603 if (count > size) 604 count = size; 605 606 zap_page_range(vma, addr, count, NULL); 607 zeromap_page_range(vma, addr, count, PAGE_COPY); 608 609 size -= count; 610 buf += count; 611 addr += count; 612 if (size == 0) 613 goto out_up; 614 } 615 616 up_read(&mm->mmap_sem); 617 618 /* The shared case is hard. Let's do the conventional zeroing. */ 619 do { 620 unsigned long unwritten = clear_user(buf, PAGE_SIZE); 621 if (unwritten) 622 return size + unwritten - PAGE_SIZE; 623 cond_resched(); 624 buf += PAGE_SIZE; 625 size -= PAGE_SIZE; 626 } while (size); 627 628 return size; 629 out_up: 630 up_read(&mm->mmap_sem); 631 return size; 632 } 633 634 static ssize_t read_zero(struct file * file, char __user * buf, 635 size_t count, loff_t *ppos) 636 { 637 unsigned long left, unwritten, written = 0; 638 639 if (!count) 640 return 0; 641 642 if (!access_ok(VERIFY_WRITE, buf, count)) 643 return -EFAULT; 644 645 left = count; 646 647 /* do we want to be clever? Arbitrary cut-off */ 648 if (count >= PAGE_SIZE*4) { 649 unsigned long partial; 650 651 /* How much left of the page? */ 652 partial = (PAGE_SIZE-1) & -(unsigned long) buf; 653 unwritten = clear_user(buf, partial); 654 written = partial - unwritten; 655 if (unwritten) 656 goto out; 657 left -= partial; 658 buf += partial; 659 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); 660 written += (left & PAGE_MASK) - unwritten; 661 if (unwritten) 662 goto out; 663 buf += left & PAGE_MASK; 664 left &= ~PAGE_MASK; 665 } 666 unwritten = clear_user(buf, left); 667 written += left - unwritten; 668 out: 669 return written ? written : -EFAULT; 670 } 671 672 static int mmap_zero(struct file * file, struct vm_area_struct * vma) 673 { 674 if (vma->vm_flags & VM_SHARED) 675 return shmem_zero_setup(vma); 676 if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) 677 return -EAGAIN; 678 return 0; 679 } 680 #else /* CONFIG_MMU */ 681 static ssize_t read_zero(struct file * file, char * buf, 682 size_t count, loff_t *ppos) 683 { 684 size_t todo = count; 685 686 while (todo) { 687 size_t chunk = todo; 688 689 if (chunk > 4096) 690 chunk = 4096; /* Just for latency reasons */ 691 if (clear_user(buf, chunk)) 692 return -EFAULT; 693 buf += chunk; 694 todo -= chunk; 695 cond_resched(); 696 } 697 return count; 698 } 699 700 static int mmap_zero(struct file * file, struct vm_area_struct * vma) 701 { 702 return -ENOSYS; 703 } 704 #endif /* CONFIG_MMU */ 705 706 static ssize_t write_full(struct file * file, const char __user * buf, 707 size_t count, loff_t *ppos) 708 { 709 return -ENOSPC; 710 } 711 712 /* 713 * Special lseek() function for /dev/null and /dev/zero. Most notably, you 714 * can fopen() both devices with "a" now. This was previously impossible. 715 * -- SRB. 716 */ 717 718 static loff_t null_lseek(struct file * file, loff_t offset, int orig) 719 { 720 return file->f_pos = 0; 721 } 722 723 /* 724 * The memory devices use the full 32/64 bits of the offset, and so we cannot 725 * check against negative addresses: they are ok. The return value is weird, 726 * though, in that case (0). 727 * 728 * also note that seeking relative to the "end of file" isn't supported: 729 * it has no meaning, so it returns -EINVAL. 730 */ 731 static loff_t memory_lseek(struct file * file, loff_t offset, int orig) 732 { 733 loff_t ret; 734 735 down(&file->f_dentry->d_inode->i_sem); 736 switch (orig) { 737 case 0: 738 file->f_pos = offset; 739 ret = file->f_pos; 740 force_successful_syscall_return(); 741 break; 742 case 1: 743 file->f_pos += offset; 744 ret = file->f_pos; 745 force_successful_syscall_return(); 746 break; 747 default: 748 ret = -EINVAL; 749 } 750 up(&file->f_dentry->d_inode->i_sem); 751 return ret; 752 } 753 754 static int open_port(struct inode * inode, struct file * filp) 755 { 756 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 757 } 758 759 #define zero_lseek null_lseek 760 #define full_lseek null_lseek 761 #define write_zero write_null 762 #define read_full read_zero 763 #define open_mem open_port 764 #define open_kmem open_mem 765 #define open_oldmem open_mem 766 767 static struct file_operations mem_fops = { 768 .llseek = memory_lseek, 769 .read = read_mem, 770 .write = write_mem, 771 .mmap = mmap_mem, 772 .open = open_mem, 773 }; 774 775 static struct file_operations kmem_fops = { 776 .llseek = memory_lseek, 777 .read = read_kmem, 778 .write = write_kmem, 779 .mmap = mmap_kmem, 780 .open = open_kmem, 781 }; 782 783 static struct file_operations null_fops = { 784 .llseek = null_lseek, 785 .read = read_null, 786 .write = write_null, 787 }; 788 789 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 790 static struct file_operations port_fops = { 791 .llseek = memory_lseek, 792 .read = read_port, 793 .write = write_port, 794 .open = open_port, 795 }; 796 #endif 797 798 static struct file_operations zero_fops = { 799 .llseek = zero_lseek, 800 .read = read_zero, 801 .write = write_zero, 802 .mmap = mmap_zero, 803 }; 804 805 static struct backing_dev_info zero_bdi = { 806 .capabilities = BDI_CAP_MAP_COPY, 807 }; 808 809 static struct file_operations full_fops = { 810 .llseek = full_lseek, 811 .read = read_full, 812 .write = write_full, 813 }; 814 815 #ifdef CONFIG_CRASH_DUMP 816 static struct file_operations oldmem_fops = { 817 .read = read_oldmem, 818 .open = open_oldmem, 819 }; 820 #endif 821 822 static ssize_t kmsg_write(struct file * file, const char __user * buf, 823 size_t count, loff_t *ppos) 824 { 825 char *tmp; 826 int ret; 827 828 tmp = kmalloc(count + 1, GFP_KERNEL); 829 if (tmp == NULL) 830 return -ENOMEM; 831 ret = -EFAULT; 832 if (!copy_from_user(tmp, buf, count)) { 833 tmp[count] = 0; 834 ret = printk("%s", tmp); 835 } 836 kfree(tmp); 837 return ret; 838 } 839 840 static struct file_operations kmsg_fops = { 841 .write = kmsg_write, 842 }; 843 844 static int memory_open(struct inode * inode, struct file * filp) 845 { 846 switch (iminor(inode)) { 847 case 1: 848 filp->f_op = &mem_fops; 849 break; 850 case 2: 851 filp->f_op = &kmem_fops; 852 break; 853 case 3: 854 filp->f_op = &null_fops; 855 break; 856 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 857 case 4: 858 filp->f_op = &port_fops; 859 break; 860 #endif 861 case 5: 862 filp->f_mapping->backing_dev_info = &zero_bdi; 863 filp->f_op = &zero_fops; 864 break; 865 case 7: 866 filp->f_op = &full_fops; 867 break; 868 case 8: 869 filp->f_op = &random_fops; 870 break; 871 case 9: 872 filp->f_op = &urandom_fops; 873 break; 874 case 11: 875 filp->f_op = &kmsg_fops; 876 break; 877 #ifdef CONFIG_CRASH_DUMP 878 case 12: 879 filp->f_op = &oldmem_fops; 880 break; 881 #endif 882 default: 883 return -ENXIO; 884 } 885 if (filp->f_op && filp->f_op->open) 886 return filp->f_op->open(inode,filp); 887 return 0; 888 } 889 890 static struct file_operations memory_fops = { 891 .open = memory_open, /* just a selector for the real open */ 892 }; 893 894 static const struct { 895 unsigned int minor; 896 char *name; 897 umode_t mode; 898 struct file_operations *fops; 899 } devlist[] = { /* list of minor devices */ 900 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, 901 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, 902 {3, "null", S_IRUGO | S_IWUGO, &null_fops}, 903 #if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)) 904 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, 905 #endif 906 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, 907 {7, "full", S_IRUGO | S_IWUGO, &full_fops}, 908 {8, "random", S_IRUGO | S_IWUSR, &random_fops}, 909 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, 910 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, 911 #ifdef CONFIG_CRASH_DUMP 912 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, 913 #endif 914 }; 915 916 static struct class *mem_class; 917 918 static int __init chr_dev_init(void) 919 { 920 int i; 921 922 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) 923 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 924 925 mem_class = class_create(THIS_MODULE, "mem"); 926 for (i = 0; i < ARRAY_SIZE(devlist); i++) { 927 class_device_create(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor), 928 NULL, devlist[i].name); 929 devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor), 930 S_IFCHR | devlist[i].mode, devlist[i].name); 931 } 932 933 return 0; 934 } 935 936 fs_initcall(chr_dev_init); 937