1 /* 2 * linux/mm/nommu.c 3 * 4 * Replacement code for mm functions to support CPU's that don't 5 * have any form of memory management unit (thus no virtual memory). 6 * 7 * See Documentation/nommu-mmap.txt 8 * 9 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> 10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 13 * Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org> 14 */ 15 16 #include <linux/module.h> 17 #include <linux/mm.h> 18 #include <linux/mman.h> 19 #include <linux/swap.h> 20 #include <linux/file.h> 21 #include <linux/highmem.h> 22 #include <linux/pagemap.h> 23 #include <linux/slab.h> 24 #include <linux/vmalloc.h> 25 #include <linux/tracehook.h> 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/mount.h> 29 #include <linux/personality.h> 30 #include <linux/security.h> 31 #include <linux/syscalls.h> 32 33 #include <asm/uaccess.h> 34 #include <asm/tlb.h> 35 #include <asm/tlbflush.h> 36 #include <asm/mmu_context.h> 37 #include "internal.h" 38 39 static inline __attribute__((format(printf, 1, 2))) 40 void no_printk(const char *fmt, ...) 41 { 42 } 43 44 #if 0 45 #define kenter(FMT, ...) \ 46 printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) 47 #define kleave(FMT, ...) \ 48 printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) 49 #define kdebug(FMT, ...) \ 50 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) 51 #else 52 #define kenter(FMT, ...) \ 53 no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) 54 #define kleave(FMT, ...) \ 55 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) 56 #define kdebug(FMT, ...) \ 57 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) 58 #endif 59 60 void *high_memory; 61 struct page *mem_map; 62 unsigned long max_mapnr; 63 unsigned long num_physpages; 64 struct percpu_counter vm_committed_as; 65 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 66 int sysctl_overcommit_ratio = 50; /* default is 50% */ 67 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; 68 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 69 int heap_stack_gap = 0; 70 71 atomic_long_t mmap_pages_allocated; 72 73 EXPORT_SYMBOL(mem_map); 74 EXPORT_SYMBOL(num_physpages); 75 76 /* list of mapped, potentially shareable regions */ 77 static struct kmem_cache *vm_region_jar; 78 struct rb_root nommu_region_tree = RB_ROOT; 79 DECLARE_RWSEM(nommu_region_sem); 80 81 struct vm_operations_struct generic_file_vm_ops = { 82 }; 83 84 /* 85 * Handle all mappings that got truncated by a "truncate()" 86 * system call. 87 * 88 * NOTE! We have to be ready to update the memory sharing 89 * between the file and the memory map for a potential last 90 * incomplete page. Ugly, but necessary. 91 */ 92 int vmtruncate(struct inode *inode, loff_t offset) 93 { 94 struct address_space *mapping = inode->i_mapping; 95 unsigned long limit; 96 97 if (inode->i_size < offset) 98 goto do_expand; 99 i_size_write(inode, offset); 100 101 truncate_inode_pages(mapping, offset); 102 goto out_truncate; 103 104 do_expand: 105 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 106 if (limit != RLIM_INFINITY && offset > limit) 107 goto out_sig; 108 if (offset > inode->i_sb->s_maxbytes) 109 goto out; 110 i_size_write(inode, offset); 111 112 out_truncate: 113 if (inode->i_op->truncate) 114 inode->i_op->truncate(inode); 115 return 0; 116 out_sig: 117 send_sig(SIGXFSZ, current, 0); 118 out: 119 return -EFBIG; 120 } 121 122 EXPORT_SYMBOL(vmtruncate); 123 124 /* 125 * Return the total memory allocated for this pointer, not 126 * just what the caller asked for. 127 * 128 * Doesn't have to be accurate, i.e. may have races. 129 */ 130 unsigned int kobjsize(const void *objp) 131 { 132 struct page *page; 133 134 /* 135 * If the object we have should not have ksize performed on it, 136 * return size of 0 137 */ 138 if (!objp || !virt_addr_valid(objp)) 139 return 0; 140 141 page = virt_to_head_page(objp); 142 143 /* 144 * If the allocator sets PageSlab, we know the pointer came from 145 * kmalloc(). 146 */ 147 if (PageSlab(page)) 148 return ksize(objp); 149 150 /* 151 * If it's not a compound page, see if we have a matching VMA 152 * region. This test is intentionally done in reverse order, 153 * so if there's no VMA, we still fall through and hand back 154 * PAGE_SIZE for 0-order pages. 155 */ 156 if (!PageCompound(page)) { 157 struct vm_area_struct *vma; 158 159 vma = find_vma(current->mm, (unsigned long)objp); 160 if (vma) 161 return vma->vm_end - vma->vm_start; 162 } 163 164 /* 165 * The ksize() function is only guaranteed to work for pointers 166 * returned by kmalloc(). So handle arbitrary pointers here. 167 */ 168 return PAGE_SIZE << compound_order(page); 169 } 170 171 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 172 unsigned long start, int nr_pages, int foll_flags, 173 struct page **pages, struct vm_area_struct **vmas) 174 { 175 struct vm_area_struct *vma; 176 unsigned long vm_flags; 177 int i; 178 179 /* calculate required read or write permissions. 180 * If FOLL_FORCE is set, we only require the "MAY" flags. 181 */ 182 vm_flags = (foll_flags & FOLL_WRITE) ? 183 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 184 vm_flags &= (foll_flags & FOLL_FORCE) ? 185 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 186 187 for (i = 0; i < nr_pages; i++) { 188 vma = find_vma(mm, start); 189 if (!vma) 190 goto finish_or_fault; 191 192 /* protect what we can, including chardevs */ 193 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 194 !(vm_flags & vma->vm_flags)) 195 goto finish_or_fault; 196 197 if (pages) { 198 pages[i] = virt_to_page(start); 199 if (pages[i]) 200 page_cache_get(pages[i]); 201 } 202 if (vmas) 203 vmas[i] = vma; 204 start += PAGE_SIZE; 205 } 206 207 return i; 208 209 finish_or_fault: 210 return i ? : -EFAULT; 211 } 212 213 /* 214 * get a list of pages in an address range belonging to the specified process 215 * and indicate the VMA that covers each page 216 * - this is potentially dodgy as we may end incrementing the page count of a 217 * slab page or a secondary page from a compound page 218 * - don't permit access to VMAs that don't support it, such as I/O mappings 219 */ 220 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 221 unsigned long start, int nr_pages, int write, int force, 222 struct page **pages, struct vm_area_struct **vmas) 223 { 224 int flags = 0; 225 226 if (write) 227 flags |= FOLL_WRITE; 228 if (force) 229 flags |= FOLL_FORCE; 230 231 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); 232 } 233 EXPORT_SYMBOL(get_user_pages); 234 235 /** 236 * follow_pfn - look up PFN at a user virtual address 237 * @vma: memory mapping 238 * @address: user virtual address 239 * @pfn: location to store found PFN 240 * 241 * Only IO mappings and raw PFN mappings are allowed. 242 * 243 * Returns zero and the pfn at @pfn on success, -ve otherwise. 244 */ 245 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 246 unsigned long *pfn) 247 { 248 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 249 return -EINVAL; 250 251 *pfn = address >> PAGE_SHIFT; 252 return 0; 253 } 254 EXPORT_SYMBOL(follow_pfn); 255 256 DEFINE_RWLOCK(vmlist_lock); 257 struct vm_struct *vmlist; 258 259 void vfree(const void *addr) 260 { 261 kfree(addr); 262 } 263 EXPORT_SYMBOL(vfree); 264 265 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 266 { 267 /* 268 * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() 269 * returns only a logical address. 270 */ 271 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); 272 } 273 EXPORT_SYMBOL(__vmalloc); 274 275 void *vmalloc_user(unsigned long size) 276 { 277 void *ret; 278 279 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 280 PAGE_KERNEL); 281 if (ret) { 282 struct vm_area_struct *vma; 283 284 down_write(¤t->mm->mmap_sem); 285 vma = find_vma(current->mm, (unsigned long)ret); 286 if (vma) 287 vma->vm_flags |= VM_USERMAP; 288 up_write(¤t->mm->mmap_sem); 289 } 290 291 return ret; 292 } 293 EXPORT_SYMBOL(vmalloc_user); 294 295 struct page *vmalloc_to_page(const void *addr) 296 { 297 return virt_to_page(addr); 298 } 299 EXPORT_SYMBOL(vmalloc_to_page); 300 301 unsigned long vmalloc_to_pfn(const void *addr) 302 { 303 return page_to_pfn(virt_to_page(addr)); 304 } 305 EXPORT_SYMBOL(vmalloc_to_pfn); 306 307 long vread(char *buf, char *addr, unsigned long count) 308 { 309 memcpy(buf, addr, count); 310 return count; 311 } 312 313 long vwrite(char *buf, char *addr, unsigned long count) 314 { 315 /* Don't allow overflow */ 316 if ((unsigned long) addr + count < count) 317 count = -(unsigned long) addr; 318 319 memcpy(addr, buf, count); 320 return(count); 321 } 322 323 /* 324 * vmalloc - allocate virtually continguos memory 325 * 326 * @size: allocation size 327 * 328 * Allocate enough pages to cover @size from the page level 329 * allocator and map them into continguos kernel virtual space. 330 * 331 * For tight control over page level allocator and protection flags 332 * use __vmalloc() instead. 333 */ 334 void *vmalloc(unsigned long size) 335 { 336 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 337 } 338 EXPORT_SYMBOL(vmalloc); 339 340 void *vmalloc_node(unsigned long size, int node) 341 { 342 return vmalloc(size); 343 } 344 EXPORT_SYMBOL(vmalloc_node); 345 346 #ifndef PAGE_KERNEL_EXEC 347 # define PAGE_KERNEL_EXEC PAGE_KERNEL 348 #endif 349 350 /** 351 * vmalloc_exec - allocate virtually contiguous, executable memory 352 * @size: allocation size 353 * 354 * Kernel-internal function to allocate enough pages to cover @size 355 * the page level allocator and map them into contiguous and 356 * executable kernel virtual space. 357 * 358 * For tight control over page level allocator and protection flags 359 * use __vmalloc() instead. 360 */ 361 362 void *vmalloc_exec(unsigned long size) 363 { 364 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 365 } 366 367 /** 368 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 369 * @size: allocation size 370 * 371 * Allocate enough 32bit PA addressable pages to cover @size from the 372 * page level allocator and map them into continguos kernel virtual space. 373 */ 374 void *vmalloc_32(unsigned long size) 375 { 376 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 377 } 378 EXPORT_SYMBOL(vmalloc_32); 379 380 /** 381 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 382 * @size: allocation size 383 * 384 * The resulting memory area is 32bit addressable and zeroed so it can be 385 * mapped to userspace without leaking data. 386 * 387 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to 388 * remap_vmalloc_range() are permissible. 389 */ 390 void *vmalloc_32_user(unsigned long size) 391 { 392 /* 393 * We'll have to sort out the ZONE_DMA bits for 64-bit, 394 * but for now this can simply use vmalloc_user() directly. 395 */ 396 return vmalloc_user(size); 397 } 398 EXPORT_SYMBOL(vmalloc_32_user); 399 400 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) 401 { 402 BUG(); 403 return NULL; 404 } 405 EXPORT_SYMBOL(vmap); 406 407 void vunmap(const void *addr) 408 { 409 BUG(); 410 } 411 EXPORT_SYMBOL(vunmap); 412 413 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 414 { 415 BUG(); 416 return NULL; 417 } 418 EXPORT_SYMBOL(vm_map_ram); 419 420 void vm_unmap_ram(const void *mem, unsigned int count) 421 { 422 BUG(); 423 } 424 EXPORT_SYMBOL(vm_unmap_ram); 425 426 void vm_unmap_aliases(void) 427 { 428 } 429 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 430 431 /* 432 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 433 * have one. 434 */ 435 void __attribute__((weak)) vmalloc_sync_all(void) 436 { 437 } 438 439 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 440 struct page *page) 441 { 442 return -EINVAL; 443 } 444 EXPORT_SYMBOL(vm_insert_page); 445 446 /* 447 * sys_brk() for the most part doesn't need the global kernel 448 * lock, except when an application is doing something nasty 449 * like trying to un-brk an area that has already been mapped 450 * to a regular file. in this case, the unmapping will need 451 * to invoke file system routines that need the global lock. 452 */ 453 SYSCALL_DEFINE1(brk, unsigned long, brk) 454 { 455 struct mm_struct *mm = current->mm; 456 457 if (brk < mm->start_brk || brk > mm->context.end_brk) 458 return mm->brk; 459 460 if (mm->brk == brk) 461 return mm->brk; 462 463 /* 464 * Always allow shrinking brk 465 */ 466 if (brk <= mm->brk) { 467 mm->brk = brk; 468 return brk; 469 } 470 471 /* 472 * Ok, looks good - let it rip. 473 */ 474 return mm->brk = brk; 475 } 476 477 /* 478 * initialise the VMA and region record slabs 479 */ 480 void __init mmap_init(void) 481 { 482 int ret; 483 484 ret = percpu_counter_init(&vm_committed_as, 0); 485 VM_BUG_ON(ret); 486 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); 487 } 488 489 /* 490 * validate the region tree 491 * - the caller must hold the region lock 492 */ 493 #ifdef CONFIG_DEBUG_NOMMU_REGIONS 494 static noinline void validate_nommu_regions(void) 495 { 496 struct vm_region *region, *last; 497 struct rb_node *p, *lastp; 498 499 lastp = rb_first(&nommu_region_tree); 500 if (!lastp) 501 return; 502 503 last = rb_entry(lastp, struct vm_region, vm_rb); 504 BUG_ON(unlikely(last->vm_end <= last->vm_start)); 505 BUG_ON(unlikely(last->vm_top < last->vm_end)); 506 507 while ((p = rb_next(lastp))) { 508 region = rb_entry(p, struct vm_region, vm_rb); 509 last = rb_entry(lastp, struct vm_region, vm_rb); 510 511 BUG_ON(unlikely(region->vm_end <= region->vm_start)); 512 BUG_ON(unlikely(region->vm_top < region->vm_end)); 513 BUG_ON(unlikely(region->vm_start < last->vm_top)); 514 515 lastp = p; 516 } 517 } 518 #else 519 static void validate_nommu_regions(void) 520 { 521 } 522 #endif 523 524 /* 525 * add a region into the global tree 526 */ 527 static void add_nommu_region(struct vm_region *region) 528 { 529 struct vm_region *pregion; 530 struct rb_node **p, *parent; 531 532 validate_nommu_regions(); 533 534 parent = NULL; 535 p = &nommu_region_tree.rb_node; 536 while (*p) { 537 parent = *p; 538 pregion = rb_entry(parent, struct vm_region, vm_rb); 539 if (region->vm_start < pregion->vm_start) 540 p = &(*p)->rb_left; 541 else if (region->vm_start > pregion->vm_start) 542 p = &(*p)->rb_right; 543 else if (pregion == region) 544 return; 545 else 546 BUG(); 547 } 548 549 rb_link_node(®ion->vm_rb, parent, p); 550 rb_insert_color(®ion->vm_rb, &nommu_region_tree); 551 552 validate_nommu_regions(); 553 } 554 555 /* 556 * delete a region from the global tree 557 */ 558 static void delete_nommu_region(struct vm_region *region) 559 { 560 BUG_ON(!nommu_region_tree.rb_node); 561 562 validate_nommu_regions(); 563 rb_erase(®ion->vm_rb, &nommu_region_tree); 564 validate_nommu_regions(); 565 } 566 567 /* 568 * free a contiguous series of pages 569 */ 570 static void free_page_series(unsigned long from, unsigned long to) 571 { 572 for (; from < to; from += PAGE_SIZE) { 573 struct page *page = virt_to_page(from); 574 575 kdebug("- free %lx", from); 576 atomic_long_dec(&mmap_pages_allocated); 577 if (page_count(page) != 1) 578 kdebug("free page %p: refcount not one: %d", 579 page, page_count(page)); 580 put_page(page); 581 } 582 } 583 584 /* 585 * release a reference to a region 586 * - the caller must hold the region semaphore for writing, which this releases 587 * - the region may not have been added to the tree yet, in which case vm_top 588 * will equal vm_start 589 */ 590 static void __put_nommu_region(struct vm_region *region) 591 __releases(nommu_region_sem) 592 { 593 kenter("%p{%d}", region, atomic_read(®ion->vm_usage)); 594 595 BUG_ON(!nommu_region_tree.rb_node); 596 597 if (atomic_dec_and_test(®ion->vm_usage)) { 598 if (region->vm_top > region->vm_start) 599 delete_nommu_region(region); 600 up_write(&nommu_region_sem); 601 602 if (region->vm_file) 603 fput(region->vm_file); 604 605 /* IO memory and memory shared directly out of the pagecache 606 * from ramfs/tmpfs mustn't be released here */ 607 if (region->vm_flags & VM_MAPPED_COPY) { 608 kdebug("free series"); 609 free_page_series(region->vm_start, region->vm_top); 610 } 611 kmem_cache_free(vm_region_jar, region); 612 } else { 613 up_write(&nommu_region_sem); 614 } 615 } 616 617 /* 618 * release a reference to a region 619 */ 620 static void put_nommu_region(struct vm_region *region) 621 { 622 down_write(&nommu_region_sem); 623 __put_nommu_region(region); 624 } 625 626 /* 627 * update protection on a vma 628 */ 629 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) 630 { 631 #ifdef CONFIG_MPU 632 struct mm_struct *mm = vma->vm_mm; 633 long start = vma->vm_start & PAGE_MASK; 634 while (start < vma->vm_end) { 635 protect_page(mm, start, flags); 636 start += PAGE_SIZE; 637 } 638 update_protections(mm); 639 #endif 640 } 641 642 /* 643 * add a VMA into a process's mm_struct in the appropriate place in the list 644 * and tree and add to the address space's page tree also if not an anonymous 645 * page 646 * - should be called with mm->mmap_sem held writelocked 647 */ 648 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 649 { 650 struct vm_area_struct *pvma, **pp; 651 struct address_space *mapping; 652 struct rb_node **p, *parent; 653 654 kenter(",%p", vma); 655 656 BUG_ON(!vma->vm_region); 657 658 mm->map_count++; 659 vma->vm_mm = mm; 660 661 protect_vma(vma, vma->vm_flags); 662 663 /* add the VMA to the mapping */ 664 if (vma->vm_file) { 665 mapping = vma->vm_file->f_mapping; 666 667 flush_dcache_mmap_lock(mapping); 668 vma_prio_tree_insert(vma, &mapping->i_mmap); 669 flush_dcache_mmap_unlock(mapping); 670 } 671 672 /* add the VMA to the tree */ 673 parent = NULL; 674 p = &mm->mm_rb.rb_node; 675 while (*p) { 676 parent = *p; 677 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); 678 679 /* sort by: start addr, end addr, VMA struct addr in that order 680 * (the latter is necessary as we may get identical VMAs) */ 681 if (vma->vm_start < pvma->vm_start) 682 p = &(*p)->rb_left; 683 else if (vma->vm_start > pvma->vm_start) 684 p = &(*p)->rb_right; 685 else if (vma->vm_end < pvma->vm_end) 686 p = &(*p)->rb_left; 687 else if (vma->vm_end > pvma->vm_end) 688 p = &(*p)->rb_right; 689 else if (vma < pvma) 690 p = &(*p)->rb_left; 691 else if (vma > pvma) 692 p = &(*p)->rb_right; 693 else 694 BUG(); 695 } 696 697 rb_link_node(&vma->vm_rb, parent, p); 698 rb_insert_color(&vma->vm_rb, &mm->mm_rb); 699 700 /* add VMA to the VMA list also */ 701 for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { 702 if (pvma->vm_start > vma->vm_start) 703 break; 704 if (pvma->vm_start < vma->vm_start) 705 continue; 706 if (pvma->vm_end < vma->vm_end) 707 break; 708 } 709 710 vma->vm_next = *pp; 711 *pp = vma; 712 } 713 714 /* 715 * delete a VMA from its owning mm_struct and address space 716 */ 717 static void delete_vma_from_mm(struct vm_area_struct *vma) 718 { 719 struct vm_area_struct **pp; 720 struct address_space *mapping; 721 struct mm_struct *mm = vma->vm_mm; 722 723 kenter("%p", vma); 724 725 protect_vma(vma, 0); 726 727 mm->map_count--; 728 if (mm->mmap_cache == vma) 729 mm->mmap_cache = NULL; 730 731 /* remove the VMA from the mapping */ 732 if (vma->vm_file) { 733 mapping = vma->vm_file->f_mapping; 734 735 flush_dcache_mmap_lock(mapping); 736 vma_prio_tree_remove(vma, &mapping->i_mmap); 737 flush_dcache_mmap_unlock(mapping); 738 } 739 740 /* remove from the MM's tree and list */ 741 rb_erase(&vma->vm_rb, &mm->mm_rb); 742 for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { 743 if (*pp == vma) { 744 *pp = vma->vm_next; 745 break; 746 } 747 } 748 749 vma->vm_mm = NULL; 750 } 751 752 /* 753 * destroy a VMA record 754 */ 755 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) 756 { 757 kenter("%p", vma); 758 if (vma->vm_ops && vma->vm_ops->close) 759 vma->vm_ops->close(vma); 760 if (vma->vm_file) { 761 fput(vma->vm_file); 762 if (vma->vm_flags & VM_EXECUTABLE) 763 removed_exe_file_vma(mm); 764 } 765 put_nommu_region(vma->vm_region); 766 kmem_cache_free(vm_area_cachep, vma); 767 } 768 769 /* 770 * look up the first VMA in which addr resides, NULL if none 771 * - should be called with mm->mmap_sem at least held readlocked 772 */ 773 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 774 { 775 struct vm_area_struct *vma; 776 struct rb_node *n = mm->mm_rb.rb_node; 777 778 /* check the cache first */ 779 vma = mm->mmap_cache; 780 if (vma && vma->vm_start <= addr && vma->vm_end > addr) 781 return vma; 782 783 /* trawl the tree (there may be multiple mappings in which addr 784 * resides) */ 785 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 786 vma = rb_entry(n, struct vm_area_struct, vm_rb); 787 if (vma->vm_start > addr) 788 return NULL; 789 if (vma->vm_end > addr) { 790 mm->mmap_cache = vma; 791 return vma; 792 } 793 } 794 795 return NULL; 796 } 797 EXPORT_SYMBOL(find_vma); 798 799 /* 800 * find a VMA 801 * - we don't extend stack VMAs under NOMMU conditions 802 */ 803 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) 804 { 805 return find_vma(mm, addr); 806 } 807 808 /* 809 * expand a stack to a given address 810 * - not supported under NOMMU conditions 811 */ 812 int expand_stack(struct vm_area_struct *vma, unsigned long address) 813 { 814 return -ENOMEM; 815 } 816 817 /* 818 * look up the first VMA exactly that exactly matches addr 819 * - should be called with mm->mmap_sem at least held readlocked 820 */ 821 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, 822 unsigned long addr, 823 unsigned long len) 824 { 825 struct vm_area_struct *vma; 826 struct rb_node *n = mm->mm_rb.rb_node; 827 unsigned long end = addr + len; 828 829 /* check the cache first */ 830 vma = mm->mmap_cache; 831 if (vma && vma->vm_start == addr && vma->vm_end == end) 832 return vma; 833 834 /* trawl the tree (there may be multiple mappings in which addr 835 * resides) */ 836 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 837 vma = rb_entry(n, struct vm_area_struct, vm_rb); 838 if (vma->vm_start < addr) 839 continue; 840 if (vma->vm_start > addr) 841 return NULL; 842 if (vma->vm_end == end) { 843 mm->mmap_cache = vma; 844 return vma; 845 } 846 } 847 848 return NULL; 849 } 850 851 /* 852 * determine whether a mapping should be permitted and, if so, what sort of 853 * mapping we're capable of supporting 854 */ 855 static int validate_mmap_request(struct file *file, 856 unsigned long addr, 857 unsigned long len, 858 unsigned long prot, 859 unsigned long flags, 860 unsigned long pgoff, 861 unsigned long *_capabilities) 862 { 863 unsigned long capabilities, rlen; 864 unsigned long reqprot = prot; 865 int ret; 866 867 /* do the simple checks first */ 868 if (flags & MAP_FIXED || addr) { 869 printk(KERN_DEBUG 870 "%d: Can't do fixed-address/overlay mmap of RAM\n", 871 current->pid); 872 return -EINVAL; 873 } 874 875 if ((flags & MAP_TYPE) != MAP_PRIVATE && 876 (flags & MAP_TYPE) != MAP_SHARED) 877 return -EINVAL; 878 879 if (!len) 880 return -EINVAL; 881 882 /* Careful about overflows.. */ 883 rlen = PAGE_ALIGN(len); 884 if (!rlen || rlen > TASK_SIZE) 885 return -ENOMEM; 886 887 /* offset overflow? */ 888 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) 889 return -EOVERFLOW; 890 891 if (file) { 892 /* validate file mapping requests */ 893 struct address_space *mapping; 894 895 /* files must support mmap */ 896 if (!file->f_op || !file->f_op->mmap) 897 return -ENODEV; 898 899 /* work out if what we've got could possibly be shared 900 * - we support chardevs that provide their own "memory" 901 * - we support files/blockdevs that are memory backed 902 */ 903 mapping = file->f_mapping; 904 if (!mapping) 905 mapping = file->f_path.dentry->d_inode->i_mapping; 906 907 capabilities = 0; 908 if (mapping && mapping->backing_dev_info) 909 capabilities = mapping->backing_dev_info->capabilities; 910 911 if (!capabilities) { 912 /* no explicit capabilities set, so assume some 913 * defaults */ 914 switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) { 915 case S_IFREG: 916 case S_IFBLK: 917 capabilities = BDI_CAP_MAP_COPY; 918 break; 919 920 case S_IFCHR: 921 capabilities = 922 BDI_CAP_MAP_DIRECT | 923 BDI_CAP_READ_MAP | 924 BDI_CAP_WRITE_MAP; 925 break; 926 927 default: 928 return -EINVAL; 929 } 930 } 931 932 /* eliminate any capabilities that we can't support on this 933 * device */ 934 if (!file->f_op->get_unmapped_area) 935 capabilities &= ~BDI_CAP_MAP_DIRECT; 936 if (!file->f_op->read) 937 capabilities &= ~BDI_CAP_MAP_COPY; 938 939 /* The file shall have been opened with read permission. */ 940 if (!(file->f_mode & FMODE_READ)) 941 return -EACCES; 942 943 if (flags & MAP_SHARED) { 944 /* do checks for writing, appending and locking */ 945 if ((prot & PROT_WRITE) && 946 !(file->f_mode & FMODE_WRITE)) 947 return -EACCES; 948 949 if (IS_APPEND(file->f_path.dentry->d_inode) && 950 (file->f_mode & FMODE_WRITE)) 951 return -EACCES; 952 953 if (locks_verify_locked(file->f_path.dentry->d_inode)) 954 return -EAGAIN; 955 956 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 957 return -ENODEV; 958 959 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || 960 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || 961 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) 962 ) { 963 printk("MAP_SHARED not completely supported on !MMU\n"); 964 return -EINVAL; 965 } 966 967 /* we mustn't privatise shared mappings */ 968 capabilities &= ~BDI_CAP_MAP_COPY; 969 } 970 else { 971 /* we're going to read the file into private memory we 972 * allocate */ 973 if (!(capabilities & BDI_CAP_MAP_COPY)) 974 return -ENODEV; 975 976 /* we don't permit a private writable mapping to be 977 * shared with the backing device */ 978 if (prot & PROT_WRITE) 979 capabilities &= ~BDI_CAP_MAP_DIRECT; 980 } 981 982 /* handle executable mappings and implied executable 983 * mappings */ 984 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 985 if (prot & PROT_EXEC) 986 return -EPERM; 987 } 988 else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { 989 /* handle implication of PROT_EXEC by PROT_READ */ 990 if (current->personality & READ_IMPLIES_EXEC) { 991 if (capabilities & BDI_CAP_EXEC_MAP) 992 prot |= PROT_EXEC; 993 } 994 } 995 else if ((prot & PROT_READ) && 996 (prot & PROT_EXEC) && 997 !(capabilities & BDI_CAP_EXEC_MAP) 998 ) { 999 /* backing file is not executable, try to copy */ 1000 capabilities &= ~BDI_CAP_MAP_DIRECT; 1001 } 1002 } 1003 else { 1004 /* anonymous mappings are always memory backed and can be 1005 * privately mapped 1006 */ 1007 capabilities = BDI_CAP_MAP_COPY; 1008 1009 /* handle PROT_EXEC implication by PROT_READ */ 1010 if ((prot & PROT_READ) && 1011 (current->personality & READ_IMPLIES_EXEC)) 1012 prot |= PROT_EXEC; 1013 } 1014 1015 /* allow the security API to have its say */ 1016 ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1017 if (ret < 0) 1018 return ret; 1019 1020 /* looks okay */ 1021 *_capabilities = capabilities; 1022 return 0; 1023 } 1024 1025 /* 1026 * we've determined that we can make the mapping, now translate what we 1027 * now know into VMA flags 1028 */ 1029 static unsigned long determine_vm_flags(struct file *file, 1030 unsigned long prot, 1031 unsigned long flags, 1032 unsigned long capabilities) 1033 { 1034 unsigned long vm_flags; 1035 1036 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1037 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1038 /* vm_flags |= mm->def_flags; */ 1039 1040 if (!(capabilities & BDI_CAP_MAP_DIRECT)) { 1041 /* attempt to share read-only copies of mapped file chunks */ 1042 if (file && !(prot & PROT_WRITE)) 1043 vm_flags |= VM_MAYSHARE; 1044 } 1045 else { 1046 /* overlay a shareable mapping on the backing device or inode 1047 * if possible - used for chardevs, ramfs/tmpfs/shmfs and 1048 * romfs/cramfs */ 1049 if (flags & MAP_SHARED) 1050 vm_flags |= VM_MAYSHARE | VM_SHARED; 1051 else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0) 1052 vm_flags |= VM_MAYSHARE; 1053 } 1054 1055 /* refuse to let anyone share private mappings with this process if 1056 * it's being traced - otherwise breakpoints set in it may interfere 1057 * with another untraced process 1058 */ 1059 if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current)) 1060 vm_flags &= ~VM_MAYSHARE; 1061 1062 return vm_flags; 1063 } 1064 1065 /* 1066 * set up a shared mapping on a file (the driver or filesystem provides and 1067 * pins the storage) 1068 */ 1069 static int do_mmap_shared_file(struct vm_area_struct *vma) 1070 { 1071 int ret; 1072 1073 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1074 if (ret == 0) { 1075 vma->vm_region->vm_top = vma->vm_region->vm_end; 1076 return ret; 1077 } 1078 if (ret != -ENOSYS) 1079 return ret; 1080 1081 /* getting an ENOSYS error indicates that direct mmap isn't 1082 * possible (as opposed to tried but failed) so we'll fall 1083 * through to making a private copy of the data and mapping 1084 * that if we can */ 1085 return -ENODEV; 1086 } 1087 1088 /* 1089 * set up a private mapping or an anonymous shared mapping 1090 */ 1091 static int do_mmap_private(struct vm_area_struct *vma, 1092 struct vm_region *region, 1093 unsigned long len) 1094 { 1095 struct page *pages; 1096 unsigned long total, point, n, rlen; 1097 void *base; 1098 int ret, order; 1099 1100 /* invoke the file's mapping function so that it can keep track of 1101 * shared mappings on devices or memory 1102 * - VM_MAYSHARE will be set if it may attempt to share 1103 */ 1104 if (vma->vm_file) { 1105 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1106 if (ret == 0) { 1107 /* shouldn't return success if we're not sharing */ 1108 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); 1109 vma->vm_region->vm_top = vma->vm_region->vm_end; 1110 return ret; 1111 } 1112 if (ret != -ENOSYS) 1113 return ret; 1114 1115 /* getting an ENOSYS error indicates that direct mmap isn't 1116 * possible (as opposed to tried but failed) so we'll try to 1117 * make a private copy of the data and map that instead */ 1118 } 1119 1120 rlen = PAGE_ALIGN(len); 1121 1122 /* allocate some memory to hold the mapping 1123 * - note that this may not return a page-aligned address if the object 1124 * we're allocating is smaller than a page 1125 */ 1126 order = get_order(rlen); 1127 kdebug("alloc order %d for %lx", order, len); 1128 1129 pages = alloc_pages(GFP_KERNEL, order); 1130 if (!pages) 1131 goto enomem; 1132 1133 total = 1 << order; 1134 atomic_long_add(total, &mmap_pages_allocated); 1135 1136 point = rlen >> PAGE_SHIFT; 1137 1138 /* we allocated a power-of-2 sized page set, so we may want to trim off 1139 * the excess */ 1140 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { 1141 while (total > point) { 1142 order = ilog2(total - point); 1143 n = 1 << order; 1144 kdebug("shave %lu/%lu @%lu", n, total - point, total); 1145 atomic_long_sub(n, &mmap_pages_allocated); 1146 total -= n; 1147 set_page_refcounted(pages + total); 1148 __free_pages(pages + total, order); 1149 } 1150 } 1151 1152 for (point = 1; point < total; point++) 1153 set_page_refcounted(&pages[point]); 1154 1155 base = page_address(pages); 1156 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; 1157 region->vm_start = (unsigned long) base; 1158 region->vm_end = region->vm_start + rlen; 1159 region->vm_top = region->vm_start + (total << PAGE_SHIFT); 1160 1161 vma->vm_start = region->vm_start; 1162 vma->vm_end = region->vm_start + len; 1163 1164 if (vma->vm_file) { 1165 /* read the contents of a file into the copy */ 1166 mm_segment_t old_fs; 1167 loff_t fpos; 1168 1169 fpos = vma->vm_pgoff; 1170 fpos <<= PAGE_SHIFT; 1171 1172 old_fs = get_fs(); 1173 set_fs(KERNEL_DS); 1174 ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); 1175 set_fs(old_fs); 1176 1177 if (ret < 0) 1178 goto error_free; 1179 1180 /* clear the last little bit */ 1181 if (ret < rlen) 1182 memset(base + ret, 0, rlen - ret); 1183 1184 } else { 1185 /* if it's an anonymous mapping, then just clear it */ 1186 memset(base, 0, rlen); 1187 } 1188 1189 return 0; 1190 1191 error_free: 1192 free_page_series(region->vm_start, region->vm_end); 1193 region->vm_start = vma->vm_start = 0; 1194 region->vm_end = vma->vm_end = 0; 1195 region->vm_top = 0; 1196 return ret; 1197 1198 enomem: 1199 printk("Allocation of length %lu from process %d (%s) failed\n", 1200 len, current->pid, current->comm); 1201 show_free_areas(); 1202 return -ENOMEM; 1203 } 1204 1205 /* 1206 * handle mapping creation for uClinux 1207 */ 1208 unsigned long do_mmap_pgoff(struct file *file, 1209 unsigned long addr, 1210 unsigned long len, 1211 unsigned long prot, 1212 unsigned long flags, 1213 unsigned long pgoff) 1214 { 1215 struct vm_area_struct *vma; 1216 struct vm_region *region; 1217 struct rb_node *rb; 1218 unsigned long capabilities, vm_flags, result; 1219 int ret; 1220 1221 kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); 1222 1223 if (!(flags & MAP_FIXED)) 1224 addr = round_hint_to_min(addr); 1225 1226 /* decide whether we should attempt the mapping, and if so what sort of 1227 * mapping */ 1228 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, 1229 &capabilities); 1230 if (ret < 0) { 1231 kleave(" = %d [val]", ret); 1232 return ret; 1233 } 1234 1235 /* we've determined that we can make the mapping, now translate what we 1236 * now know into VMA flags */ 1237 vm_flags = determine_vm_flags(file, prot, flags, capabilities); 1238 1239 /* we're going to need to record the mapping */ 1240 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); 1241 if (!region) 1242 goto error_getting_region; 1243 1244 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 1245 if (!vma) 1246 goto error_getting_vma; 1247 1248 atomic_set(®ion->vm_usage, 1); 1249 region->vm_flags = vm_flags; 1250 region->vm_pgoff = pgoff; 1251 1252 INIT_LIST_HEAD(&vma->anon_vma_node); 1253 vma->vm_flags = vm_flags; 1254 vma->vm_pgoff = pgoff; 1255 1256 if (file) { 1257 region->vm_file = file; 1258 get_file(file); 1259 vma->vm_file = file; 1260 get_file(file); 1261 if (vm_flags & VM_EXECUTABLE) { 1262 added_exe_file_vma(current->mm); 1263 vma->vm_mm = current->mm; 1264 } 1265 } 1266 1267 down_write(&nommu_region_sem); 1268 1269 /* if we want to share, we need to check for regions created by other 1270 * mmap() calls that overlap with our proposed mapping 1271 * - we can only share with a superset match on most regular files 1272 * - shared mappings on character devices and memory backed files are 1273 * permitted to overlap inexactly as far as we are concerned for in 1274 * these cases, sharing is handled in the driver or filesystem rather 1275 * than here 1276 */ 1277 if (vm_flags & VM_MAYSHARE) { 1278 struct vm_region *pregion; 1279 unsigned long pglen, rpglen, pgend, rpgend, start; 1280 1281 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1282 pgend = pgoff + pglen; 1283 1284 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { 1285 pregion = rb_entry(rb, struct vm_region, vm_rb); 1286 1287 if (!(pregion->vm_flags & VM_MAYSHARE)) 1288 continue; 1289 1290 /* search for overlapping mappings on the same file */ 1291 if (pregion->vm_file->f_path.dentry->d_inode != 1292 file->f_path.dentry->d_inode) 1293 continue; 1294 1295 if (pregion->vm_pgoff >= pgend) 1296 continue; 1297 1298 rpglen = pregion->vm_end - pregion->vm_start; 1299 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1300 rpgend = pregion->vm_pgoff + rpglen; 1301 if (pgoff >= rpgend) 1302 continue; 1303 1304 /* handle inexactly overlapping matches between 1305 * mappings */ 1306 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && 1307 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { 1308 /* new mapping is not a subset of the region */ 1309 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1310 goto sharing_violation; 1311 continue; 1312 } 1313 1314 /* we've found a region we can share */ 1315 atomic_inc(&pregion->vm_usage); 1316 vma->vm_region = pregion; 1317 start = pregion->vm_start; 1318 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1319 vma->vm_start = start; 1320 vma->vm_end = start + len; 1321 1322 if (pregion->vm_flags & VM_MAPPED_COPY) { 1323 kdebug("share copy"); 1324 vma->vm_flags |= VM_MAPPED_COPY; 1325 } else { 1326 kdebug("share mmap"); 1327 ret = do_mmap_shared_file(vma); 1328 if (ret < 0) { 1329 vma->vm_region = NULL; 1330 vma->vm_start = 0; 1331 vma->vm_end = 0; 1332 atomic_dec(&pregion->vm_usage); 1333 pregion = NULL; 1334 goto error_just_free; 1335 } 1336 } 1337 fput(region->vm_file); 1338 kmem_cache_free(vm_region_jar, region); 1339 region = pregion; 1340 result = start; 1341 goto share; 1342 } 1343 1344 /* obtain the address at which to make a shared mapping 1345 * - this is the hook for quasi-memory character devices to 1346 * tell us the location of a shared mapping 1347 */ 1348 if (file && file->f_op->get_unmapped_area) { 1349 addr = file->f_op->get_unmapped_area(file, addr, len, 1350 pgoff, flags); 1351 if (IS_ERR((void *) addr)) { 1352 ret = addr; 1353 if (ret != (unsigned long) -ENOSYS) 1354 goto error_just_free; 1355 1356 /* the driver refused to tell us where to site 1357 * the mapping so we'll have to attempt to copy 1358 * it */ 1359 ret = (unsigned long) -ENODEV; 1360 if (!(capabilities & BDI_CAP_MAP_COPY)) 1361 goto error_just_free; 1362 1363 capabilities &= ~BDI_CAP_MAP_DIRECT; 1364 } else { 1365 vma->vm_start = region->vm_start = addr; 1366 vma->vm_end = region->vm_end = addr + len; 1367 } 1368 } 1369 } 1370 1371 vma->vm_region = region; 1372 add_nommu_region(region); 1373 1374 /* set up the mapping */ 1375 if (file && vma->vm_flags & VM_SHARED) 1376 ret = do_mmap_shared_file(vma); 1377 else 1378 ret = do_mmap_private(vma, region, len); 1379 if (ret < 0) 1380 goto error_put_region; 1381 1382 /* okay... we have a mapping; now we have to register it */ 1383 result = vma->vm_start; 1384 1385 current->mm->total_vm += len >> PAGE_SHIFT; 1386 1387 share: 1388 add_vma_to_mm(current->mm, vma); 1389 1390 up_write(&nommu_region_sem); 1391 1392 if (prot & PROT_EXEC) 1393 flush_icache_range(result, result + len); 1394 1395 kleave(" = %lx", result); 1396 return result; 1397 1398 error_put_region: 1399 __put_nommu_region(region); 1400 if (vma) { 1401 if (vma->vm_file) { 1402 fput(vma->vm_file); 1403 if (vma->vm_flags & VM_EXECUTABLE) 1404 removed_exe_file_vma(vma->vm_mm); 1405 } 1406 kmem_cache_free(vm_area_cachep, vma); 1407 } 1408 kleave(" = %d [pr]", ret); 1409 return ret; 1410 1411 error_just_free: 1412 up_write(&nommu_region_sem); 1413 error: 1414 fput(region->vm_file); 1415 kmem_cache_free(vm_region_jar, region); 1416 fput(vma->vm_file); 1417 if (vma->vm_flags & VM_EXECUTABLE) 1418 removed_exe_file_vma(vma->vm_mm); 1419 kmem_cache_free(vm_area_cachep, vma); 1420 kleave(" = %d", ret); 1421 return ret; 1422 1423 sharing_violation: 1424 up_write(&nommu_region_sem); 1425 printk(KERN_WARNING "Attempt to share mismatched mappings\n"); 1426 ret = -EINVAL; 1427 goto error; 1428 1429 error_getting_vma: 1430 kmem_cache_free(vm_region_jar, region); 1431 printk(KERN_WARNING "Allocation of vma for %lu byte allocation" 1432 " from process %d failed\n", 1433 len, current->pid); 1434 show_free_areas(); 1435 return -ENOMEM; 1436 1437 error_getting_region: 1438 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" 1439 " from process %d failed\n", 1440 len, current->pid); 1441 show_free_areas(); 1442 return -ENOMEM; 1443 } 1444 EXPORT_SYMBOL(do_mmap_pgoff); 1445 1446 /* 1447 * split a vma into two pieces at address 'addr', a new vma is allocated either 1448 * for the first part or the tail. 1449 */ 1450 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 1451 unsigned long addr, int new_below) 1452 { 1453 struct vm_area_struct *new; 1454 struct vm_region *region; 1455 unsigned long npages; 1456 1457 kenter(""); 1458 1459 /* we're only permitted to split anonymous regions that have a single 1460 * owner */ 1461 if (vma->vm_file || 1462 atomic_read(&vma->vm_region->vm_usage) != 1) 1463 return -ENOMEM; 1464 1465 if (mm->map_count >= sysctl_max_map_count) 1466 return -ENOMEM; 1467 1468 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); 1469 if (!region) 1470 return -ENOMEM; 1471 1472 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 1473 if (!new) { 1474 kmem_cache_free(vm_region_jar, region); 1475 return -ENOMEM; 1476 } 1477 1478 /* most fields are the same, copy all, and then fixup */ 1479 *new = *vma; 1480 *region = *vma->vm_region; 1481 new->vm_region = region; 1482 1483 npages = (addr - vma->vm_start) >> PAGE_SHIFT; 1484 1485 if (new_below) { 1486 region->vm_top = region->vm_end = new->vm_end = addr; 1487 } else { 1488 region->vm_start = new->vm_start = addr; 1489 region->vm_pgoff = new->vm_pgoff += npages; 1490 } 1491 1492 if (new->vm_ops && new->vm_ops->open) 1493 new->vm_ops->open(new); 1494 1495 delete_vma_from_mm(vma); 1496 down_write(&nommu_region_sem); 1497 delete_nommu_region(vma->vm_region); 1498 if (new_below) { 1499 vma->vm_region->vm_start = vma->vm_start = addr; 1500 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; 1501 } else { 1502 vma->vm_region->vm_end = vma->vm_end = addr; 1503 vma->vm_region->vm_top = addr; 1504 } 1505 add_nommu_region(vma->vm_region); 1506 add_nommu_region(new->vm_region); 1507 up_write(&nommu_region_sem); 1508 add_vma_to_mm(mm, vma); 1509 add_vma_to_mm(mm, new); 1510 return 0; 1511 } 1512 1513 /* 1514 * shrink a VMA by removing the specified chunk from either the beginning or 1515 * the end 1516 */ 1517 static int shrink_vma(struct mm_struct *mm, 1518 struct vm_area_struct *vma, 1519 unsigned long from, unsigned long to) 1520 { 1521 struct vm_region *region; 1522 1523 kenter(""); 1524 1525 /* adjust the VMA's pointers, which may reposition it in the MM's tree 1526 * and list */ 1527 delete_vma_from_mm(vma); 1528 if (from > vma->vm_start) 1529 vma->vm_end = from; 1530 else 1531 vma->vm_start = to; 1532 add_vma_to_mm(mm, vma); 1533 1534 /* cut the backing region down to size */ 1535 region = vma->vm_region; 1536 BUG_ON(atomic_read(®ion->vm_usage) != 1); 1537 1538 down_write(&nommu_region_sem); 1539 delete_nommu_region(region); 1540 if (from > region->vm_start) { 1541 to = region->vm_top; 1542 region->vm_top = region->vm_end = from; 1543 } else { 1544 region->vm_start = to; 1545 } 1546 add_nommu_region(region); 1547 up_write(&nommu_region_sem); 1548 1549 free_page_series(from, to); 1550 return 0; 1551 } 1552 1553 /* 1554 * release a mapping 1555 * - under NOMMU conditions the chunk to be unmapped must be backed by a single 1556 * VMA, though it need not cover the whole VMA 1557 */ 1558 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 1559 { 1560 struct vm_area_struct *vma; 1561 struct rb_node *rb; 1562 unsigned long end = start + len; 1563 int ret; 1564 1565 kenter(",%lx,%zx", start, len); 1566 1567 if (len == 0) 1568 return -EINVAL; 1569 1570 /* find the first potentially overlapping VMA */ 1571 vma = find_vma(mm, start); 1572 if (!vma) { 1573 static int limit = 0; 1574 if (limit < 5) { 1575 printk(KERN_WARNING 1576 "munmap of memory not mmapped by process %d" 1577 " (%s): 0x%lx-0x%lx\n", 1578 current->pid, current->comm, 1579 start, start + len - 1); 1580 limit++; 1581 } 1582 return -EINVAL; 1583 } 1584 1585 /* we're allowed to split an anonymous VMA but not a file-backed one */ 1586 if (vma->vm_file) { 1587 do { 1588 if (start > vma->vm_start) { 1589 kleave(" = -EINVAL [miss]"); 1590 return -EINVAL; 1591 } 1592 if (end == vma->vm_end) 1593 goto erase_whole_vma; 1594 rb = rb_next(&vma->vm_rb); 1595 vma = rb_entry(rb, struct vm_area_struct, vm_rb); 1596 } while (rb); 1597 kleave(" = -EINVAL [split file]"); 1598 return -EINVAL; 1599 } else { 1600 /* the chunk must be a subset of the VMA found */ 1601 if (start == vma->vm_start && end == vma->vm_end) 1602 goto erase_whole_vma; 1603 if (start < vma->vm_start || end > vma->vm_end) { 1604 kleave(" = -EINVAL [superset]"); 1605 return -EINVAL; 1606 } 1607 if (start & ~PAGE_MASK) { 1608 kleave(" = -EINVAL [unaligned start]"); 1609 return -EINVAL; 1610 } 1611 if (end != vma->vm_end && end & ~PAGE_MASK) { 1612 kleave(" = -EINVAL [unaligned split]"); 1613 return -EINVAL; 1614 } 1615 if (start != vma->vm_start && end != vma->vm_end) { 1616 ret = split_vma(mm, vma, start, 1); 1617 if (ret < 0) { 1618 kleave(" = %d [split]", ret); 1619 return ret; 1620 } 1621 } 1622 return shrink_vma(mm, vma, start, end); 1623 } 1624 1625 erase_whole_vma: 1626 delete_vma_from_mm(vma); 1627 delete_vma(mm, vma); 1628 kleave(" = 0"); 1629 return 0; 1630 } 1631 EXPORT_SYMBOL(do_munmap); 1632 1633 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1634 { 1635 int ret; 1636 struct mm_struct *mm = current->mm; 1637 1638 down_write(&mm->mmap_sem); 1639 ret = do_munmap(mm, addr, len); 1640 up_write(&mm->mmap_sem); 1641 return ret; 1642 } 1643 1644 /* 1645 * release all the mappings made in a process's VM space 1646 */ 1647 void exit_mmap(struct mm_struct *mm) 1648 { 1649 struct vm_area_struct *vma; 1650 1651 if (!mm) 1652 return; 1653 1654 kenter(""); 1655 1656 mm->total_vm = 0; 1657 1658 while ((vma = mm->mmap)) { 1659 mm->mmap = vma->vm_next; 1660 delete_vma_from_mm(vma); 1661 delete_vma(mm, vma); 1662 } 1663 1664 kleave(""); 1665 } 1666 1667 unsigned long do_brk(unsigned long addr, unsigned long len) 1668 { 1669 return -ENOMEM; 1670 } 1671 1672 /* 1673 * expand (or shrink) an existing mapping, potentially moving it at the same 1674 * time (controlled by the MREMAP_MAYMOVE flag and available VM space) 1675 * 1676 * under NOMMU conditions, we only permit changing a mapping's size, and only 1677 * as long as it stays within the region allocated by do_mmap_private() and the 1678 * block is not shareable 1679 * 1680 * MREMAP_FIXED is not supported under NOMMU conditions 1681 */ 1682 unsigned long do_mremap(unsigned long addr, 1683 unsigned long old_len, unsigned long new_len, 1684 unsigned long flags, unsigned long new_addr) 1685 { 1686 struct vm_area_struct *vma; 1687 1688 /* insanity checks first */ 1689 if (old_len == 0 || new_len == 0) 1690 return (unsigned long) -EINVAL; 1691 1692 if (addr & ~PAGE_MASK) 1693 return -EINVAL; 1694 1695 if (flags & MREMAP_FIXED && new_addr != addr) 1696 return (unsigned long) -EINVAL; 1697 1698 vma = find_vma_exact(current->mm, addr, old_len); 1699 if (!vma) 1700 return (unsigned long) -EINVAL; 1701 1702 if (vma->vm_end != vma->vm_start + old_len) 1703 return (unsigned long) -EFAULT; 1704 1705 if (vma->vm_flags & VM_MAYSHARE) 1706 return (unsigned long) -EPERM; 1707 1708 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) 1709 return (unsigned long) -ENOMEM; 1710 1711 /* all checks complete - do it */ 1712 vma->vm_end = vma->vm_start + new_len; 1713 return vma->vm_start; 1714 } 1715 EXPORT_SYMBOL(do_mremap); 1716 1717 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 1718 unsigned long, new_len, unsigned long, flags, 1719 unsigned long, new_addr) 1720 { 1721 unsigned long ret; 1722 1723 down_write(¤t->mm->mmap_sem); 1724 ret = do_mremap(addr, old_len, new_len, flags, new_addr); 1725 up_write(¤t->mm->mmap_sem); 1726 return ret; 1727 } 1728 1729 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1730 unsigned int foll_flags) 1731 { 1732 return NULL; 1733 } 1734 1735 int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, 1736 unsigned long to, unsigned long size, pgprot_t prot) 1737 { 1738 vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; 1739 return 0; 1740 } 1741 EXPORT_SYMBOL(remap_pfn_range); 1742 1743 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1744 unsigned long pgoff) 1745 { 1746 unsigned int size = vma->vm_end - vma->vm_start; 1747 1748 if (!(vma->vm_flags & VM_USERMAP)) 1749 return -EINVAL; 1750 1751 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); 1752 vma->vm_end = vma->vm_start + size; 1753 1754 return 0; 1755 } 1756 EXPORT_SYMBOL(remap_vmalloc_range); 1757 1758 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1759 { 1760 } 1761 1762 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, 1763 unsigned long len, unsigned long pgoff, unsigned long flags) 1764 { 1765 return -ENOMEM; 1766 } 1767 1768 void arch_unmap_area(struct mm_struct *mm, unsigned long addr) 1769 { 1770 } 1771 1772 void unmap_mapping_range(struct address_space *mapping, 1773 loff_t const holebegin, loff_t const holelen, 1774 int even_cows) 1775 { 1776 } 1777 EXPORT_SYMBOL(unmap_mapping_range); 1778 1779 /* 1780 * ask for an unmapped area at which to create a mapping on a file 1781 */ 1782 unsigned long get_unmapped_area(struct file *file, unsigned long addr, 1783 unsigned long len, unsigned long pgoff, 1784 unsigned long flags) 1785 { 1786 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, 1787 unsigned long, unsigned long); 1788 1789 get_area = current->mm->get_unmapped_area; 1790 if (file && file->f_op && file->f_op->get_unmapped_area) 1791 get_area = file->f_op->get_unmapped_area; 1792 1793 if (!get_area) 1794 return -ENOSYS; 1795 1796 return get_area(file, addr, len, pgoff, flags); 1797 } 1798 EXPORT_SYMBOL(get_unmapped_area); 1799 1800 /* 1801 * Check that a process has enough memory to allocate a new virtual 1802 * mapping. 0 means there is enough memory for the allocation to 1803 * succeed and -ENOMEM implies there is not. 1804 * 1805 * We currently support three overcommit policies, which are set via the 1806 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 1807 * 1808 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 1809 * Additional code 2002 Jul 20 by Robert Love. 1810 * 1811 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 1812 * 1813 * Note this is a helper function intended to be used by LSMs which 1814 * wish to use this logic. 1815 */ 1816 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 1817 { 1818 unsigned long free, allowed; 1819 1820 vm_acct_memory(pages); 1821 1822 /* 1823 * Sometimes we want to use more memory than we have 1824 */ 1825 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 1826 return 0; 1827 1828 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 1829 unsigned long n; 1830 1831 free = global_page_state(NR_FILE_PAGES); 1832 free += nr_swap_pages; 1833 1834 /* 1835 * Any slabs which are created with the 1836 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 1837 * which are reclaimable, under pressure. The dentry 1838 * cache and most inode caches should fall into this 1839 */ 1840 free += global_page_state(NR_SLAB_RECLAIMABLE); 1841 1842 /* 1843 * Leave the last 3% for root 1844 */ 1845 if (!cap_sys_admin) 1846 free -= free / 32; 1847 1848 if (free > pages) 1849 return 0; 1850 1851 /* 1852 * nr_free_pages() is very expensive on large systems, 1853 * only call if we're about to fail. 1854 */ 1855 n = nr_free_pages(); 1856 1857 /* 1858 * Leave reserved pages. The pages are not for anonymous pages. 1859 */ 1860 if (n <= totalreserve_pages) 1861 goto error; 1862 else 1863 n -= totalreserve_pages; 1864 1865 /* 1866 * Leave the last 3% for root 1867 */ 1868 if (!cap_sys_admin) 1869 n -= n / 32; 1870 free += n; 1871 1872 if (free > pages) 1873 return 0; 1874 1875 goto error; 1876 } 1877 1878 allowed = totalram_pages * sysctl_overcommit_ratio / 100; 1879 /* 1880 * Leave the last 3% for root 1881 */ 1882 if (!cap_sys_admin) 1883 allowed -= allowed / 32; 1884 allowed += total_swap_pages; 1885 1886 /* Don't let a single process grow too big: 1887 leave 3% of the size of this process for other processes */ 1888 if (mm) 1889 allowed -= mm->total_vm / 32; 1890 1891 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 1892 return 0; 1893 1894 error: 1895 vm_unacct_memory(pages); 1896 1897 return -ENOMEM; 1898 } 1899 1900 int in_gate_area_no_task(unsigned long addr) 1901 { 1902 return 0; 1903 } 1904 1905 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1906 { 1907 BUG(); 1908 return 0; 1909 } 1910 EXPORT_SYMBOL(filemap_fault); 1911 1912 /* 1913 * Access another process' address space. 1914 * - source/target buffer must be kernel space 1915 */ 1916 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 1917 { 1918 struct vm_area_struct *vma; 1919 struct mm_struct *mm; 1920 1921 if (addr + len < addr) 1922 return 0; 1923 1924 mm = get_task_mm(tsk); 1925 if (!mm) 1926 return 0; 1927 1928 down_read(&mm->mmap_sem); 1929 1930 /* the access must start within one of the target process's mappings */ 1931 vma = find_vma(mm, addr); 1932 if (vma) { 1933 /* don't overrun this mapping */ 1934 if (addr + len >= vma->vm_end) 1935 len = vma->vm_end - addr; 1936 1937 /* only read or write mappings where it is permitted */ 1938 if (write && vma->vm_flags & VM_MAYWRITE) 1939 len -= copy_to_user((void *) addr, buf, len); 1940 else if (!write && vma->vm_flags & VM_MAYREAD) 1941 len -= copy_from_user(buf, (void *) addr, len); 1942 else 1943 len = 0; 1944 } else { 1945 len = 0; 1946 } 1947 1948 up_read(&mm->mmap_sem); 1949 mmput(mm); 1950 return len; 1951 } 1952