1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/nommu.c 4 * 5 * Replacement code for mm functions to support CPU's that don't 6 * have any form of memory management unit (thus no virtual memory). 7 * 8 * See Documentation/admin-guide/mm/nommu-mmap.rst 9 * 10 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> 11 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 12 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 13 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 14 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/export.h> 20 #include <linux/mm.h> 21 #include <linux/sched/mm.h> 22 #include <linux/mman.h> 23 #include <linux/swap.h> 24 #include <linux/file.h> 25 #include <linux/highmem.h> 26 #include <linux/pagemap.h> 27 #include <linux/slab.h> 28 #include <linux/vmalloc.h> 29 #include <linux/backing-dev.h> 30 #include <linux/compiler.h> 31 #include <linux/mount.h> 32 #include <linux/personality.h> 33 #include <linux/security.h> 34 #include <linux/syscalls.h> 35 #include <linux/audit.h> 36 #include <linux/printk.h> 37 38 #include <linux/uaccess.h> 39 #include <linux/uio.h> 40 #include <asm/tlb.h> 41 #include <asm/tlbflush.h> 42 #include <asm/mmu_context.h> 43 #include "internal.h" 44 45 unsigned long highest_memmap_pfn; 46 int heap_stack_gap = 0; 47 48 atomic_long_t mmap_pages_allocated; 49 50 51 /* list of mapped, potentially shareable regions */ 52 static struct kmem_cache *vm_region_jar; 53 struct rb_root nommu_region_tree = RB_ROOT; 54 DECLARE_RWSEM(nommu_region_sem); 55 56 const struct vm_operations_struct generic_file_vm_ops = { 57 }; 58 59 /* 60 * Return the total memory allocated for this pointer, not 61 * just what the caller asked for. 62 * 63 * Doesn't have to be accurate, i.e. may have races. 64 */ 65 unsigned int kobjsize(const void *objp) 66 { 67 struct folio *folio; 68 69 /* 70 * If the object we have should not have ksize performed on it, 71 * return size of 0 72 */ 73 if (!objp || !virt_addr_valid(objp)) 74 return 0; 75 76 folio = virt_to_folio(objp); 77 78 /* 79 * If the allocator sets PageSlab, we know the pointer came from 80 * kmalloc(). 81 */ 82 if (folio_test_slab(folio)) 83 return ksize(objp); 84 85 /* 86 * If it's not a large folio, see if we have a matching VMA 87 * region. This test is intentionally done in reverse order, 88 * so if there's no VMA, we still fall through and hand back 89 * PAGE_SIZE for 0-order folios. 90 */ 91 if (!folio_test_large(folio)) { 92 struct vm_area_struct *vma; 93 94 vma = find_vma(current->mm, (unsigned long)objp); 95 if (vma) 96 return vma->vm_end - vma->vm_start; 97 } 98 99 /* 100 * The ksize() function is only guaranteed to work for pointers 101 * returned by kmalloc(). So handle arbitrary pointers here. 102 */ 103 return folio_size(folio); 104 } 105 106 void vfree(const void *addr) 107 { 108 kfree(addr); 109 } 110 EXPORT_SYMBOL(vfree); 111 112 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) 113 { 114 /* 115 * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() 116 * returns only a logical address. 117 */ 118 return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); 119 } 120 EXPORT_SYMBOL(__vmalloc_noprof); 121 122 void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, 123 gfp_t flags, int node) 124 { 125 return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM); 126 } 127 128 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 129 unsigned long start, unsigned long end, gfp_t gfp_mask, 130 pgprot_t prot, unsigned long vm_flags, int node, 131 const void *caller) 132 { 133 return __vmalloc_noprof(size, gfp_mask); 134 } 135 136 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, 137 int node, const void *caller) 138 { 139 return __vmalloc_noprof(size, gfp_mask); 140 } 141 142 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) 143 { 144 void *ret; 145 146 ret = __vmalloc(size, flags); 147 if (ret) { 148 struct vm_area_struct *vma; 149 150 mmap_write_lock(current->mm); 151 vma = find_vma(current->mm, (unsigned long)ret); 152 if (vma) 153 vm_flags_set(vma, VM_USERMAP); 154 mmap_write_unlock(current->mm); 155 } 156 157 return ret; 158 } 159 160 void *vmalloc_user_noprof(unsigned long size) 161 { 162 return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO); 163 } 164 EXPORT_SYMBOL(vmalloc_user_noprof); 165 166 struct page *vmalloc_to_page(const void *addr) 167 { 168 return virt_to_page(addr); 169 } 170 EXPORT_SYMBOL(vmalloc_to_page); 171 172 unsigned long vmalloc_to_pfn(const void *addr) 173 { 174 return page_to_pfn(virt_to_page(addr)); 175 } 176 EXPORT_SYMBOL(vmalloc_to_pfn); 177 178 long vread_iter(struct iov_iter *iter, const char *addr, size_t count) 179 { 180 /* Don't allow overflow */ 181 if ((unsigned long) addr + count < count) 182 count = -(unsigned long) addr; 183 184 return copy_to_iter(addr, count, iter); 185 } 186 187 /* 188 * vmalloc - allocate virtually contiguous memory 189 * 190 * @size: allocation size 191 * 192 * Allocate enough pages to cover @size from the page level 193 * allocator and map them into contiguous kernel virtual space. 194 * 195 * For tight control over page level allocator and protection flags 196 * use __vmalloc() instead. 197 */ 198 void *vmalloc_noprof(unsigned long size) 199 { 200 return __vmalloc_noprof(size, GFP_KERNEL); 201 } 202 EXPORT_SYMBOL(vmalloc_noprof); 203 204 /* 205 * vmalloc_huge_node - allocate virtually contiguous memory, on a node 206 * 207 * @size: allocation size 208 * @gfp_mask: flags for the page level allocator 209 * @node: node to use for allocation or NUMA_NO_NODE 210 * 211 * Allocate enough pages to cover @size from the page level 212 * allocator and map them into contiguous kernel virtual space. 213 * 214 * Due to NOMMU implications the node argument and HUGE page attribute is 215 * ignored. 216 */ 217 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) 218 { 219 return __vmalloc_noprof(size, gfp_mask); 220 } 221 222 /* 223 * vzalloc - allocate virtually contiguous memory with zero fill 224 * 225 * @size: allocation size 226 * 227 * Allocate enough pages to cover @size from the page level 228 * allocator and map them into contiguous kernel virtual space. 229 * The memory allocated is set to zero. 230 * 231 * For tight control over page level allocator and protection flags 232 * use __vmalloc() instead. 233 */ 234 void *vzalloc_noprof(unsigned long size) 235 { 236 return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO); 237 } 238 EXPORT_SYMBOL(vzalloc_noprof); 239 240 /** 241 * vmalloc_node - allocate memory on a specific node 242 * @size: allocation size 243 * @node: numa node 244 * 245 * Allocate enough pages to cover @size from the page level 246 * allocator and map them into contiguous kernel virtual space. 247 * 248 * For tight control over page level allocator and protection flags 249 * use __vmalloc() instead. 250 */ 251 void *vmalloc_node_noprof(unsigned long size, int node) 252 { 253 return vmalloc_noprof(size); 254 } 255 EXPORT_SYMBOL(vmalloc_node_noprof); 256 257 /** 258 * vzalloc_node - allocate memory on a specific node with zero fill 259 * @size: allocation size 260 * @node: numa node 261 * 262 * Allocate enough pages to cover @size from the page level 263 * allocator and map them into contiguous kernel virtual space. 264 * The memory allocated is set to zero. 265 * 266 * For tight control over page level allocator and protection flags 267 * use __vmalloc() instead. 268 */ 269 void *vzalloc_node_noprof(unsigned long size, int node) 270 { 271 return vzalloc_noprof(size); 272 } 273 EXPORT_SYMBOL(vzalloc_node_noprof); 274 275 /** 276 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 277 * @size: allocation size 278 * 279 * Allocate enough 32bit PA addressable pages to cover @size from the 280 * page level allocator and map them into contiguous kernel virtual space. 281 */ 282 void *vmalloc_32_noprof(unsigned long size) 283 { 284 return __vmalloc_noprof(size, GFP_KERNEL); 285 } 286 EXPORT_SYMBOL(vmalloc_32_noprof); 287 288 /** 289 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 290 * @size: allocation size 291 * 292 * The resulting memory area is 32bit addressable and zeroed so it can be 293 * mapped to userspace without leaking data. 294 * 295 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to 296 * remap_vmalloc_range() are permissible. 297 */ 298 void *vmalloc_32_user_noprof(unsigned long size) 299 { 300 /* 301 * We'll have to sort out the ZONE_DMA bits for 64-bit, 302 * but for now this can simply use vmalloc_user() directly. 303 */ 304 return vmalloc_user_noprof(size); 305 } 306 EXPORT_SYMBOL(vmalloc_32_user_noprof); 307 308 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) 309 { 310 BUG(); 311 return NULL; 312 } 313 EXPORT_SYMBOL(vmap); 314 315 void vunmap(const void *addr) 316 { 317 BUG(); 318 } 319 EXPORT_SYMBOL(vunmap); 320 321 void *vm_map_ram(struct page **pages, unsigned int count, int node) 322 { 323 BUG(); 324 return NULL; 325 } 326 EXPORT_SYMBOL(vm_map_ram); 327 328 void vm_unmap_ram(const void *mem, unsigned int count) 329 { 330 BUG(); 331 } 332 EXPORT_SYMBOL(vm_unmap_ram); 333 334 void vm_unmap_aliases(void) 335 { 336 } 337 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 338 339 void free_vm_area(struct vm_struct *area) 340 { 341 BUG(); 342 } 343 EXPORT_SYMBOL_GPL(free_vm_area); 344 345 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 346 struct page *page) 347 { 348 return -EINVAL; 349 } 350 EXPORT_SYMBOL(vm_insert_page); 351 352 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 353 struct page **pages, unsigned long *num) 354 { 355 return -EINVAL; 356 } 357 EXPORT_SYMBOL(vm_insert_pages); 358 359 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 360 unsigned long num) 361 { 362 return -EINVAL; 363 } 364 EXPORT_SYMBOL(vm_map_pages); 365 366 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 367 unsigned long num) 368 { 369 return -EINVAL; 370 } 371 EXPORT_SYMBOL(vm_map_pages_zero); 372 373 /* 374 * sys_brk() for the most part doesn't need the global kernel 375 * lock, except when an application is doing something nasty 376 * like trying to un-brk an area that has already been mapped 377 * to a regular file. in this case, the unmapping will need 378 * to invoke file system routines that need the global lock. 379 */ 380 SYSCALL_DEFINE1(brk, unsigned long, brk) 381 { 382 struct mm_struct *mm = current->mm; 383 384 if (brk < mm->start_brk || brk > mm->context.end_brk) 385 return mm->brk; 386 387 if (mm->brk == brk) 388 return mm->brk; 389 390 /* 391 * Always allow shrinking brk 392 */ 393 if (brk <= mm->brk) { 394 mm->brk = brk; 395 return brk; 396 } 397 398 /* 399 * Ok, looks good - let it rip. 400 */ 401 flush_icache_user_range(mm->brk, brk); 402 return mm->brk = brk; 403 } 404 405 static int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 406 407 static const struct ctl_table nommu_table[] = { 408 { 409 .procname = "nr_trim_pages", 410 .data = &sysctl_nr_trim_pages, 411 .maxlen = sizeof(sysctl_nr_trim_pages), 412 .mode = 0644, 413 .proc_handler = proc_dointvec_minmax, 414 .extra1 = SYSCTL_ZERO, 415 }, 416 }; 417 418 /* 419 * initialise the percpu counter for VM and region record slabs, initialise VMA 420 * state. 421 */ 422 void __init mmap_init(void) 423 { 424 int ret; 425 426 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 427 VM_BUG_ON(ret); 428 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT); 429 register_sysctl_init("vm", nommu_table); 430 vma_state_init(); 431 } 432 433 /* 434 * validate the region tree 435 * - the caller must hold the region lock 436 */ 437 #ifdef CONFIG_DEBUG_NOMMU_REGIONS 438 static noinline void validate_nommu_regions(void) 439 { 440 struct vm_region *region, *last; 441 struct rb_node *p, *lastp; 442 443 lastp = rb_first(&nommu_region_tree); 444 if (!lastp) 445 return; 446 447 last = rb_entry(lastp, struct vm_region, vm_rb); 448 BUG_ON(last->vm_end <= last->vm_start); 449 BUG_ON(last->vm_top < last->vm_end); 450 451 while ((p = rb_next(lastp))) { 452 region = rb_entry(p, struct vm_region, vm_rb); 453 last = rb_entry(lastp, struct vm_region, vm_rb); 454 455 BUG_ON(region->vm_end <= region->vm_start); 456 BUG_ON(region->vm_top < region->vm_end); 457 BUG_ON(region->vm_start < last->vm_top); 458 459 lastp = p; 460 } 461 } 462 #else 463 static void validate_nommu_regions(void) 464 { 465 } 466 #endif 467 468 /* 469 * add a region into the global tree 470 */ 471 static void add_nommu_region(struct vm_region *region) 472 { 473 struct vm_region *pregion; 474 struct rb_node **p, *parent; 475 476 validate_nommu_regions(); 477 478 parent = NULL; 479 p = &nommu_region_tree.rb_node; 480 while (*p) { 481 parent = *p; 482 pregion = rb_entry(parent, struct vm_region, vm_rb); 483 if (region->vm_start < pregion->vm_start) 484 p = &(*p)->rb_left; 485 else if (region->vm_start > pregion->vm_start) 486 p = &(*p)->rb_right; 487 else if (pregion == region) 488 return; 489 else 490 BUG(); 491 } 492 493 rb_link_node(®ion->vm_rb, parent, p); 494 rb_insert_color(®ion->vm_rb, &nommu_region_tree); 495 496 validate_nommu_regions(); 497 } 498 499 /* 500 * delete a region from the global tree 501 */ 502 static void delete_nommu_region(struct vm_region *region) 503 { 504 BUG_ON(!nommu_region_tree.rb_node); 505 506 validate_nommu_regions(); 507 rb_erase(®ion->vm_rb, &nommu_region_tree); 508 validate_nommu_regions(); 509 } 510 511 /* 512 * free a contiguous series of pages 513 */ 514 static void free_page_series(unsigned long from, unsigned long to) 515 { 516 for (; from < to; from += PAGE_SIZE) { 517 struct page *page = virt_to_page((void *)from); 518 519 atomic_long_dec(&mmap_pages_allocated); 520 put_page(page); 521 } 522 } 523 524 /* 525 * release a reference to a region 526 * - the caller must hold the region semaphore for writing, which this releases 527 * - the region may not have been added to the tree yet, in which case vm_top 528 * will equal vm_start 529 */ 530 static void __put_nommu_region(struct vm_region *region) 531 __releases(nommu_region_sem) 532 { 533 BUG_ON(!nommu_region_tree.rb_node); 534 535 if (--region->vm_usage == 0) { 536 if (region->vm_top > region->vm_start) 537 delete_nommu_region(region); 538 up_write(&nommu_region_sem); 539 540 if (region->vm_file) 541 fput(region->vm_file); 542 543 /* IO memory and memory shared directly out of the pagecache 544 * from ramfs/tmpfs mustn't be released here */ 545 if (region->vm_flags & VM_MAPPED_COPY) 546 free_page_series(region->vm_start, region->vm_top); 547 kmem_cache_free(vm_region_jar, region); 548 } else { 549 up_write(&nommu_region_sem); 550 } 551 } 552 553 /* 554 * release a reference to a region 555 */ 556 static void put_nommu_region(struct vm_region *region) 557 { 558 down_write(&nommu_region_sem); 559 __put_nommu_region(region); 560 } 561 562 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) 563 { 564 vma->vm_mm = mm; 565 566 /* add the VMA to the mapping */ 567 if (vma->vm_file) { 568 struct address_space *mapping = vma->vm_file->f_mapping; 569 570 i_mmap_lock_write(mapping); 571 flush_dcache_mmap_lock(mapping); 572 vma_interval_tree_insert(vma, &mapping->i_mmap); 573 flush_dcache_mmap_unlock(mapping); 574 i_mmap_unlock_write(mapping); 575 } 576 } 577 578 static void cleanup_vma_from_mm(struct vm_area_struct *vma) 579 { 580 vma->vm_mm->map_count--; 581 /* remove the VMA from the mapping */ 582 if (vma->vm_file) { 583 struct address_space *mapping; 584 mapping = vma->vm_file->f_mapping; 585 586 i_mmap_lock_write(mapping); 587 flush_dcache_mmap_lock(mapping); 588 vma_interval_tree_remove(vma, &mapping->i_mmap); 589 flush_dcache_mmap_unlock(mapping); 590 i_mmap_unlock_write(mapping); 591 } 592 } 593 594 /* 595 * delete a VMA from its owning mm_struct and address space 596 */ 597 static int delete_vma_from_mm(struct vm_area_struct *vma) 598 { 599 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); 600 601 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 602 if (vma_iter_prealloc(&vmi, NULL)) { 603 pr_warn("Allocation of vma tree for process %d failed\n", 604 current->pid); 605 return -ENOMEM; 606 } 607 cleanup_vma_from_mm(vma); 608 609 /* remove from the MM's tree and list */ 610 vma_iter_clear(&vmi); 611 return 0; 612 } 613 /* 614 * destroy a VMA record 615 */ 616 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) 617 { 618 vma_close(vma); 619 if (vma->vm_file) 620 fput(vma->vm_file); 621 put_nommu_region(vma->vm_region); 622 vm_area_free(vma); 623 } 624 625 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 626 unsigned long start_addr, 627 unsigned long end_addr) 628 { 629 unsigned long index = start_addr; 630 631 mmap_assert_locked(mm); 632 return mt_find(&mm->mm_mt, &index, end_addr - 1); 633 } 634 EXPORT_SYMBOL(find_vma_intersection); 635 636 /* 637 * look up the first VMA in which addr resides, NULL if none 638 * - should be called with mm->mmap_lock at least held readlocked 639 */ 640 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 641 { 642 VMA_ITERATOR(vmi, mm, addr); 643 644 return vma_iter_load(&vmi); 645 } 646 EXPORT_SYMBOL(find_vma); 647 648 /* 649 * expand a stack to a given address 650 * - not supported under NOMMU conditions 651 */ 652 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr) 653 { 654 return -ENOMEM; 655 } 656 657 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) 658 { 659 mmap_read_unlock(mm); 660 return NULL; 661 } 662 663 /* 664 * look up the first VMA exactly that exactly matches addr 665 * - should be called with mm->mmap_lock at least held readlocked 666 */ 667 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, 668 unsigned long addr, 669 unsigned long len) 670 { 671 struct vm_area_struct *vma; 672 unsigned long end = addr + len; 673 VMA_ITERATOR(vmi, mm, addr); 674 675 vma = vma_iter_load(&vmi); 676 if (!vma) 677 return NULL; 678 if (vma->vm_start != addr) 679 return NULL; 680 if (vma->vm_end != end) 681 return NULL; 682 683 return vma; 684 } 685 686 /* 687 * determine whether a mapping should be permitted and, if so, what sort of 688 * mapping we're capable of supporting 689 */ 690 static int validate_mmap_request(struct file *file, 691 unsigned long addr, 692 unsigned long len, 693 unsigned long prot, 694 unsigned long flags, 695 unsigned long pgoff, 696 unsigned long *_capabilities) 697 { 698 unsigned long capabilities, rlen; 699 int ret; 700 701 /* do the simple checks first */ 702 if (flags & MAP_FIXED) 703 return -EINVAL; 704 705 if ((flags & MAP_TYPE) != MAP_PRIVATE && 706 (flags & MAP_TYPE) != MAP_SHARED) 707 return -EINVAL; 708 709 if (!len) 710 return -EINVAL; 711 712 /* Careful about overflows.. */ 713 rlen = PAGE_ALIGN(len); 714 if (!rlen || rlen > TASK_SIZE) 715 return -ENOMEM; 716 717 /* offset overflow? */ 718 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) 719 return -EOVERFLOW; 720 721 if (file) { 722 /* files must support mmap */ 723 if (!can_mmap_file(file)) 724 return -ENODEV; 725 726 /* work out if what we've got could possibly be shared 727 * - we support chardevs that provide their own "memory" 728 * - we support files/blockdevs that are memory backed 729 */ 730 if (file->f_op->mmap_capabilities) { 731 capabilities = file->f_op->mmap_capabilities(file); 732 } else { 733 /* no explicit capabilities set, so assume some 734 * defaults */ 735 switch (file_inode(file)->i_mode & S_IFMT) { 736 case S_IFREG: 737 case S_IFBLK: 738 capabilities = NOMMU_MAP_COPY; 739 break; 740 741 case S_IFCHR: 742 capabilities = 743 NOMMU_MAP_DIRECT | 744 NOMMU_MAP_READ | 745 NOMMU_MAP_WRITE; 746 break; 747 748 default: 749 return -EINVAL; 750 } 751 } 752 753 /* eliminate any capabilities that we can't support on this 754 * device */ 755 if (!file->f_op->get_unmapped_area) 756 capabilities &= ~NOMMU_MAP_DIRECT; 757 if (!(file->f_mode & FMODE_CAN_READ)) 758 capabilities &= ~NOMMU_MAP_COPY; 759 760 /* The file shall have been opened with read permission. */ 761 if (!(file->f_mode & FMODE_READ)) 762 return -EACCES; 763 764 if (flags & MAP_SHARED) { 765 /* do checks for writing, appending and locking */ 766 if ((prot & PROT_WRITE) && 767 !(file->f_mode & FMODE_WRITE)) 768 return -EACCES; 769 770 if (IS_APPEND(file_inode(file)) && 771 (file->f_mode & FMODE_WRITE)) 772 return -EACCES; 773 774 if (!(capabilities & NOMMU_MAP_DIRECT)) 775 return -ENODEV; 776 777 /* we mustn't privatise shared mappings */ 778 capabilities &= ~NOMMU_MAP_COPY; 779 } else { 780 /* we're going to read the file into private memory we 781 * allocate */ 782 if (!(capabilities & NOMMU_MAP_COPY)) 783 return -ENODEV; 784 785 /* we don't permit a private writable mapping to be 786 * shared with the backing device */ 787 if (prot & PROT_WRITE) 788 capabilities &= ~NOMMU_MAP_DIRECT; 789 } 790 791 if (capabilities & NOMMU_MAP_DIRECT) { 792 if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) || 793 ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) || 794 ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC)) 795 ) { 796 capabilities &= ~NOMMU_MAP_DIRECT; 797 if (flags & MAP_SHARED) { 798 pr_warn("MAP_SHARED not completely supported on !MMU\n"); 799 return -EINVAL; 800 } 801 } 802 } 803 804 /* handle executable mappings and implied executable 805 * mappings */ 806 if (path_noexec(&file->f_path)) { 807 if (prot & PROT_EXEC) 808 return -EPERM; 809 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { 810 /* handle implication of PROT_EXEC by PROT_READ */ 811 if (current->personality & READ_IMPLIES_EXEC) { 812 if (capabilities & NOMMU_MAP_EXEC) 813 prot |= PROT_EXEC; 814 } 815 } else if ((prot & PROT_READ) && 816 (prot & PROT_EXEC) && 817 !(capabilities & NOMMU_MAP_EXEC) 818 ) { 819 /* backing file is not executable, try to copy */ 820 capabilities &= ~NOMMU_MAP_DIRECT; 821 } 822 } else { 823 /* anonymous mappings are always memory backed and can be 824 * privately mapped 825 */ 826 capabilities = NOMMU_MAP_COPY; 827 828 /* handle PROT_EXEC implication by PROT_READ */ 829 if ((prot & PROT_READ) && 830 (current->personality & READ_IMPLIES_EXEC)) 831 prot |= PROT_EXEC; 832 } 833 834 /* allow the security API to have its say */ 835 ret = security_mmap_addr(addr); 836 if (ret < 0) 837 return ret; 838 839 /* looks okay */ 840 *_capabilities = capabilities; 841 return 0; 842 } 843 844 /* 845 * we've determined that we can make the mapping, now translate what we 846 * now know into VMA flags 847 */ 848 static vm_flags_t determine_vm_flags(struct file *file, 849 unsigned long prot, 850 unsigned long flags, 851 unsigned long capabilities) 852 { 853 vm_flags_t vm_flags; 854 855 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags); 856 857 if (!file) { 858 /* 859 * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because 860 * there is no fork(). 861 */ 862 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 863 } else if (flags & MAP_PRIVATE) { 864 /* MAP_PRIVATE file mapping */ 865 if (capabilities & NOMMU_MAP_DIRECT) 866 vm_flags |= (capabilities & NOMMU_VMFLAGS); 867 else 868 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 869 870 if (!(prot & PROT_WRITE) && !current->ptrace) 871 /* 872 * R/O private file mapping which cannot be used to 873 * modify memory, especially also not via active ptrace 874 * (e.g., set breakpoints) or later by upgrading 875 * permissions (no mprotect()). We can try overlaying 876 * the file mapping, which will work e.g., on chardevs, 877 * ramfs/tmpfs/shmfs and romfs/cramf. 878 */ 879 vm_flags |= VM_MAYOVERLAY; 880 } else { 881 /* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */ 882 vm_flags |= VM_SHARED | VM_MAYSHARE | 883 (capabilities & NOMMU_VMFLAGS); 884 } 885 886 return vm_flags; 887 } 888 889 /* 890 * set up a shared mapping on a file (the driver or filesystem provides and 891 * pins the storage) 892 */ 893 static int do_mmap_shared_file(struct vm_area_struct *vma) 894 { 895 int ret; 896 897 ret = mmap_file(vma->vm_file, vma); 898 if (ret == 0) { 899 vma->vm_region->vm_top = vma->vm_region->vm_end; 900 return 0; 901 } 902 if (ret != -ENOSYS) 903 return ret; 904 905 /* getting -ENOSYS indicates that direct mmap isn't possible (as 906 * opposed to tried but failed) so we can only give a suitable error as 907 * it's not possible to make a private copy if MAP_SHARED was given */ 908 return -ENODEV; 909 } 910 911 /* 912 * set up a private mapping or an anonymous shared mapping 913 */ 914 static int do_mmap_private(struct vm_area_struct *vma, 915 struct vm_region *region, 916 unsigned long len, 917 unsigned long capabilities) 918 { 919 unsigned long total, point; 920 void *base; 921 int ret, order; 922 923 /* 924 * Invoke the file's mapping function so that it can keep track of 925 * shared mappings on devices or memory. VM_MAYOVERLAY will be set if 926 * it may attempt to share, which will make is_nommu_shared_mapping() 927 * happy. 928 */ 929 if (capabilities & NOMMU_MAP_DIRECT) { 930 ret = mmap_file(vma->vm_file, vma); 931 /* shouldn't return success if we're not sharing */ 932 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags))) 933 ret = -ENOSYS; 934 if (ret == 0) { 935 vma->vm_region->vm_top = vma->vm_region->vm_end; 936 return 0; 937 } 938 if (ret != -ENOSYS) 939 return ret; 940 941 /* getting an ENOSYS error indicates that direct mmap isn't 942 * possible (as opposed to tried but failed) so we'll try to 943 * make a private copy of the data and map that instead */ 944 } 945 946 947 /* allocate some memory to hold the mapping 948 * - note that this may not return a page-aligned address if the object 949 * we're allocating is smaller than a page 950 */ 951 order = get_order(len); 952 total = 1 << order; 953 point = len >> PAGE_SHIFT; 954 955 /* we don't want to allocate a power-of-2 sized page set */ 956 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) 957 total = point; 958 959 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); 960 if (!base) 961 goto enomem; 962 963 atomic_long_add(total, &mmap_pages_allocated); 964 965 vm_flags_set(vma, VM_MAPPED_COPY); 966 region->vm_flags = vma->vm_flags; 967 region->vm_start = (unsigned long) base; 968 region->vm_end = region->vm_start + len; 969 region->vm_top = region->vm_start + (total << PAGE_SHIFT); 970 971 vma->vm_start = region->vm_start; 972 vma->vm_end = region->vm_start + len; 973 974 if (vma->vm_file) { 975 /* read the contents of a file into the copy */ 976 loff_t fpos; 977 978 fpos = vma->vm_pgoff; 979 fpos <<= PAGE_SHIFT; 980 981 ret = kernel_read(vma->vm_file, base, len, &fpos); 982 if (ret < 0) 983 goto error_free; 984 985 /* clear the last little bit */ 986 if (ret < len) 987 memset(base + ret, 0, len - ret); 988 989 } else { 990 vma_set_anonymous(vma); 991 } 992 993 return 0; 994 995 error_free: 996 free_page_series(region->vm_start, region->vm_top); 997 region->vm_start = vma->vm_start = 0; 998 region->vm_end = vma->vm_end = 0; 999 region->vm_top = 0; 1000 return ret; 1001 1002 enomem: 1003 pr_err("Allocation of length %lu from process %d (%s) failed\n", 1004 len, current->pid, current->comm); 1005 show_mem(); 1006 return -ENOMEM; 1007 } 1008 1009 /* 1010 * handle mapping creation for uClinux 1011 */ 1012 unsigned long do_mmap(struct file *file, 1013 unsigned long addr, 1014 unsigned long len, 1015 unsigned long prot, 1016 unsigned long flags, 1017 vm_flags_t vm_flags, 1018 unsigned long pgoff, 1019 unsigned long *populate, 1020 struct list_head *uf) 1021 { 1022 struct vm_area_struct *vma; 1023 struct vm_region *region; 1024 struct rb_node *rb; 1025 unsigned long capabilities, result; 1026 int ret; 1027 VMA_ITERATOR(vmi, current->mm, 0); 1028 1029 *populate = 0; 1030 1031 /* decide whether we should attempt the mapping, and if so what sort of 1032 * mapping */ 1033 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, 1034 &capabilities); 1035 if (ret < 0) 1036 return ret; 1037 1038 /* we ignore the address hint */ 1039 addr = 0; 1040 len = PAGE_ALIGN(len); 1041 1042 /* we've determined that we can make the mapping, now translate what we 1043 * now know into VMA flags */ 1044 vm_flags |= determine_vm_flags(file, prot, flags, capabilities); 1045 1046 1047 /* we're going to need to record the mapping */ 1048 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); 1049 if (!region) 1050 goto error_getting_region; 1051 1052 vma = vm_area_alloc(current->mm); 1053 if (!vma) 1054 goto error_getting_vma; 1055 1056 region->vm_usage = 1; 1057 region->vm_flags = vm_flags; 1058 region->vm_pgoff = pgoff; 1059 1060 vm_flags_init(vma, vm_flags); 1061 vma->vm_pgoff = pgoff; 1062 1063 if (file) { 1064 region->vm_file = get_file(file); 1065 vma->vm_file = get_file(file); 1066 } 1067 1068 down_write(&nommu_region_sem); 1069 1070 /* if we want to share, we need to check for regions created by other 1071 * mmap() calls that overlap with our proposed mapping 1072 * - we can only share with a superset match on most regular files 1073 * - shared mappings on character devices and memory backed files are 1074 * permitted to overlap inexactly as far as we are concerned for in 1075 * these cases, sharing is handled in the driver or filesystem rather 1076 * than here 1077 */ 1078 if (is_nommu_shared_mapping(vm_flags)) { 1079 struct vm_region *pregion; 1080 unsigned long pglen, rpglen, pgend, rpgend, start; 1081 1082 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1083 pgend = pgoff + pglen; 1084 1085 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { 1086 pregion = rb_entry(rb, struct vm_region, vm_rb); 1087 1088 if (!is_nommu_shared_mapping(pregion->vm_flags)) 1089 continue; 1090 1091 /* search for overlapping mappings on the same file */ 1092 if (file_inode(pregion->vm_file) != 1093 file_inode(file)) 1094 continue; 1095 1096 if (pregion->vm_pgoff >= pgend) 1097 continue; 1098 1099 rpglen = pregion->vm_end - pregion->vm_start; 1100 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1101 rpgend = pregion->vm_pgoff + rpglen; 1102 if (pgoff >= rpgend) 1103 continue; 1104 1105 /* handle inexactly overlapping matches between 1106 * mappings */ 1107 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && 1108 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { 1109 /* new mapping is not a subset of the region */ 1110 if (!(capabilities & NOMMU_MAP_DIRECT)) 1111 goto sharing_violation; 1112 continue; 1113 } 1114 1115 /* we've found a region we can share */ 1116 pregion->vm_usage++; 1117 vma->vm_region = pregion; 1118 start = pregion->vm_start; 1119 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1120 vma->vm_start = start; 1121 vma->vm_end = start + len; 1122 1123 if (pregion->vm_flags & VM_MAPPED_COPY) 1124 vm_flags_set(vma, VM_MAPPED_COPY); 1125 else { 1126 ret = do_mmap_shared_file(vma); 1127 if (ret < 0) { 1128 vma->vm_region = NULL; 1129 vma->vm_start = 0; 1130 vma->vm_end = 0; 1131 pregion->vm_usage--; 1132 pregion = NULL; 1133 goto error_just_free; 1134 } 1135 } 1136 fput(region->vm_file); 1137 kmem_cache_free(vm_region_jar, region); 1138 region = pregion; 1139 result = start; 1140 goto share; 1141 } 1142 1143 /* obtain the address at which to make a shared mapping 1144 * - this is the hook for quasi-memory character devices to 1145 * tell us the location of a shared mapping 1146 */ 1147 if (capabilities & NOMMU_MAP_DIRECT) { 1148 addr = file->f_op->get_unmapped_area(file, addr, len, 1149 pgoff, flags); 1150 if (IS_ERR_VALUE(addr)) { 1151 ret = addr; 1152 if (ret != -ENOSYS) 1153 goto error_just_free; 1154 1155 /* the driver refused to tell us where to site 1156 * the mapping so we'll have to attempt to copy 1157 * it */ 1158 ret = -ENODEV; 1159 if (!(capabilities & NOMMU_MAP_COPY)) 1160 goto error_just_free; 1161 1162 capabilities &= ~NOMMU_MAP_DIRECT; 1163 } else { 1164 vma->vm_start = region->vm_start = addr; 1165 vma->vm_end = region->vm_end = addr + len; 1166 } 1167 } 1168 } 1169 1170 vma->vm_region = region; 1171 1172 /* set up the mapping 1173 * - the region is filled in if NOMMU_MAP_DIRECT is still set 1174 */ 1175 if (file && vma->vm_flags & VM_SHARED) 1176 ret = do_mmap_shared_file(vma); 1177 else 1178 ret = do_mmap_private(vma, region, len, capabilities); 1179 if (ret < 0) 1180 goto error_just_free; 1181 add_nommu_region(region); 1182 1183 /* clear anonymous mappings that don't ask for uninitialized data */ 1184 if (!vma->vm_file && 1185 (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) || 1186 !(flags & MAP_UNINITIALIZED))) 1187 memset((void *)region->vm_start, 0, 1188 region->vm_end - region->vm_start); 1189 1190 /* okay... we have a mapping; now we have to register it */ 1191 result = vma->vm_start; 1192 1193 current->mm->total_vm += len >> PAGE_SHIFT; 1194 1195 share: 1196 BUG_ON(!vma->vm_region); 1197 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 1198 if (vma_iter_prealloc(&vmi, vma)) 1199 goto error_just_free; 1200 1201 setup_vma_to_mm(vma, current->mm); 1202 current->mm->map_count++; 1203 /* add the VMA to the tree */ 1204 vma_iter_store_new(&vmi, vma); 1205 1206 /* we flush the region from the icache only when the first executable 1207 * mapping of it is made */ 1208 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { 1209 flush_icache_user_range(region->vm_start, region->vm_end); 1210 region->vm_icache_flushed = true; 1211 } 1212 1213 up_write(&nommu_region_sem); 1214 1215 return result; 1216 1217 error_just_free: 1218 up_write(&nommu_region_sem); 1219 error: 1220 vma_iter_free(&vmi); 1221 if (region->vm_file) 1222 fput(region->vm_file); 1223 kmem_cache_free(vm_region_jar, region); 1224 if (vma->vm_file) 1225 fput(vma->vm_file); 1226 vm_area_free(vma); 1227 return ret; 1228 1229 sharing_violation: 1230 up_write(&nommu_region_sem); 1231 pr_warn("Attempt to share mismatched mappings\n"); 1232 ret = -EINVAL; 1233 goto error; 1234 1235 error_getting_vma: 1236 kmem_cache_free(vm_region_jar, region); 1237 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n", 1238 len, current->pid); 1239 show_mem(); 1240 return -ENOMEM; 1241 1242 error_getting_region: 1243 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", 1244 len, current->pid); 1245 show_mem(); 1246 return -ENOMEM; 1247 } 1248 1249 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 1250 unsigned long prot, unsigned long flags, 1251 unsigned long fd, unsigned long pgoff) 1252 { 1253 struct file *file = NULL; 1254 unsigned long retval = -EBADF; 1255 1256 audit_mmap_fd(fd, flags); 1257 if (!(flags & MAP_ANONYMOUS)) { 1258 file = fget(fd); 1259 if (!file) 1260 goto out; 1261 } 1262 1263 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1264 1265 if (file) 1266 fput(file); 1267 out: 1268 return retval; 1269 } 1270 1271 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1272 unsigned long, prot, unsigned long, flags, 1273 unsigned long, fd, unsigned long, pgoff) 1274 { 1275 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 1276 } 1277 1278 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1279 struct mmap_arg_struct { 1280 unsigned long addr; 1281 unsigned long len; 1282 unsigned long prot; 1283 unsigned long flags; 1284 unsigned long fd; 1285 unsigned long offset; 1286 }; 1287 1288 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1289 { 1290 struct mmap_arg_struct a; 1291 1292 if (copy_from_user(&a, arg, sizeof(a))) 1293 return -EFAULT; 1294 if (offset_in_page(a.offset)) 1295 return -EINVAL; 1296 1297 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1298 a.offset >> PAGE_SHIFT); 1299 } 1300 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1301 1302 /* 1303 * split a vma into two pieces at address 'addr', a new vma is allocated either 1304 * for the first part or the tail. 1305 */ 1306 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 1307 unsigned long addr, int new_below) 1308 { 1309 struct vm_area_struct *new; 1310 struct vm_region *region; 1311 unsigned long npages; 1312 struct mm_struct *mm; 1313 1314 /* we're only permitted to split anonymous regions (these should have 1315 * only a single usage on the region) */ 1316 if (vma->vm_file) 1317 return -ENOMEM; 1318 1319 mm = vma->vm_mm; 1320 if (mm->map_count >= sysctl_max_map_count) 1321 return -ENOMEM; 1322 1323 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); 1324 if (!region) 1325 return -ENOMEM; 1326 1327 new = vm_area_dup(vma); 1328 if (!new) 1329 goto err_vma_dup; 1330 1331 /* most fields are the same, copy all, and then fixup */ 1332 *region = *vma->vm_region; 1333 new->vm_region = region; 1334 1335 npages = (addr - vma->vm_start) >> PAGE_SHIFT; 1336 1337 if (new_below) { 1338 region->vm_top = region->vm_end = new->vm_end = addr; 1339 } else { 1340 region->vm_start = new->vm_start = addr; 1341 region->vm_pgoff = new->vm_pgoff += npages; 1342 } 1343 1344 vma_iter_config(vmi, new->vm_start, new->vm_end); 1345 if (vma_iter_prealloc(vmi, vma)) { 1346 pr_warn("Allocation of vma tree for process %d failed\n", 1347 current->pid); 1348 goto err_vmi_preallocate; 1349 } 1350 1351 if (new->vm_ops && new->vm_ops->open) 1352 new->vm_ops->open(new); 1353 1354 down_write(&nommu_region_sem); 1355 delete_nommu_region(vma->vm_region); 1356 if (new_below) { 1357 vma->vm_region->vm_start = vma->vm_start = addr; 1358 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; 1359 } else { 1360 vma->vm_region->vm_end = vma->vm_end = addr; 1361 vma->vm_region->vm_top = addr; 1362 } 1363 add_nommu_region(vma->vm_region); 1364 add_nommu_region(new->vm_region); 1365 up_write(&nommu_region_sem); 1366 1367 setup_vma_to_mm(vma, mm); 1368 setup_vma_to_mm(new, mm); 1369 vma_iter_store_new(vmi, new); 1370 mm->map_count++; 1371 return 0; 1372 1373 err_vmi_preallocate: 1374 vm_area_free(new); 1375 err_vma_dup: 1376 kmem_cache_free(vm_region_jar, region); 1377 return -ENOMEM; 1378 } 1379 1380 /* 1381 * shrink a VMA by removing the specified chunk from either the beginning or 1382 * the end 1383 */ 1384 static int vmi_shrink_vma(struct vma_iterator *vmi, 1385 struct vm_area_struct *vma, 1386 unsigned long from, unsigned long to) 1387 { 1388 struct vm_region *region; 1389 1390 /* adjust the VMA's pointers, which may reposition it in the MM's tree 1391 * and list */ 1392 if (from > vma->vm_start) { 1393 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL)) 1394 return -ENOMEM; 1395 vma->vm_end = from; 1396 } else { 1397 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL)) 1398 return -ENOMEM; 1399 vma->vm_start = to; 1400 } 1401 1402 /* cut the backing region down to size */ 1403 region = vma->vm_region; 1404 BUG_ON(region->vm_usage != 1); 1405 1406 down_write(&nommu_region_sem); 1407 delete_nommu_region(region); 1408 if (from > region->vm_start) { 1409 to = region->vm_top; 1410 region->vm_top = region->vm_end = from; 1411 } else { 1412 region->vm_start = to; 1413 } 1414 add_nommu_region(region); 1415 up_write(&nommu_region_sem); 1416 1417 free_page_series(from, to); 1418 return 0; 1419 } 1420 1421 /* 1422 * release a mapping 1423 * - under NOMMU conditions the chunk to be unmapped must be backed by a single 1424 * VMA, though it need not cover the whole VMA 1425 */ 1426 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) 1427 { 1428 VMA_ITERATOR(vmi, mm, start); 1429 struct vm_area_struct *vma; 1430 unsigned long end; 1431 int ret = 0; 1432 1433 len = PAGE_ALIGN(len); 1434 if (len == 0) 1435 return -EINVAL; 1436 1437 end = start + len; 1438 1439 /* find the first potentially overlapping VMA */ 1440 vma = vma_find(&vmi, end); 1441 if (!vma) { 1442 static int limit; 1443 if (limit < 5) { 1444 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n", 1445 current->pid, current->comm, 1446 start, start + len - 1); 1447 limit++; 1448 } 1449 return -EINVAL; 1450 } 1451 1452 /* we're allowed to split an anonymous VMA but not a file-backed one */ 1453 if (vma->vm_file) { 1454 do { 1455 if (start > vma->vm_start) 1456 return -EINVAL; 1457 if (end == vma->vm_end) 1458 goto erase_whole_vma; 1459 vma = vma_find(&vmi, end); 1460 } while (vma); 1461 return -EINVAL; 1462 } else { 1463 /* the chunk must be a subset of the VMA found */ 1464 if (start == vma->vm_start && end == vma->vm_end) 1465 goto erase_whole_vma; 1466 if (start < vma->vm_start || end > vma->vm_end) 1467 return -EINVAL; 1468 if (offset_in_page(start)) 1469 return -EINVAL; 1470 if (end != vma->vm_end && offset_in_page(end)) 1471 return -EINVAL; 1472 if (start != vma->vm_start && end != vma->vm_end) { 1473 ret = split_vma(&vmi, vma, start, 1); 1474 if (ret < 0) 1475 return ret; 1476 } 1477 return vmi_shrink_vma(&vmi, vma, start, end); 1478 } 1479 1480 erase_whole_vma: 1481 if (delete_vma_from_mm(vma)) 1482 ret = -ENOMEM; 1483 else 1484 delete_vma(mm, vma); 1485 return ret; 1486 } 1487 1488 int vm_munmap(unsigned long addr, size_t len) 1489 { 1490 struct mm_struct *mm = current->mm; 1491 int ret; 1492 1493 mmap_write_lock(mm); 1494 ret = do_munmap(mm, addr, len, NULL); 1495 mmap_write_unlock(mm); 1496 return ret; 1497 } 1498 EXPORT_SYMBOL(vm_munmap); 1499 1500 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1501 { 1502 return vm_munmap(addr, len); 1503 } 1504 1505 /* 1506 * release all the mappings made in a process's VM space 1507 */ 1508 void exit_mmap(struct mm_struct *mm) 1509 { 1510 VMA_ITERATOR(vmi, mm, 0); 1511 struct vm_area_struct *vma; 1512 1513 if (!mm) 1514 return; 1515 1516 mm->total_vm = 0; 1517 1518 /* 1519 * Lock the mm to avoid assert complaining even though this is the only 1520 * user of the mm 1521 */ 1522 mmap_write_lock(mm); 1523 for_each_vma(vmi, vma) { 1524 cleanup_vma_from_mm(vma); 1525 delete_vma(mm, vma); 1526 cond_resched(); 1527 } 1528 __mt_destroy(&mm->mm_mt); 1529 mmap_write_unlock(mm); 1530 } 1531 1532 /* 1533 * expand (or shrink) an existing mapping, potentially moving it at the same 1534 * time (controlled by the MREMAP_MAYMOVE flag and available VM space) 1535 * 1536 * under NOMMU conditions, we only permit changing a mapping's size, and only 1537 * as long as it stays within the region allocated by do_mmap_private() and the 1538 * block is not shareable 1539 * 1540 * MREMAP_FIXED is not supported under NOMMU conditions 1541 */ 1542 static unsigned long do_mremap(unsigned long addr, 1543 unsigned long old_len, unsigned long new_len, 1544 unsigned long flags, unsigned long new_addr) 1545 { 1546 struct vm_area_struct *vma; 1547 1548 /* insanity checks first */ 1549 old_len = PAGE_ALIGN(old_len); 1550 new_len = PAGE_ALIGN(new_len); 1551 if (old_len == 0 || new_len == 0) 1552 return (unsigned long) -EINVAL; 1553 1554 if (offset_in_page(addr)) 1555 return -EINVAL; 1556 1557 if (flags & MREMAP_FIXED && new_addr != addr) 1558 return (unsigned long) -EINVAL; 1559 1560 vma = find_vma_exact(current->mm, addr, old_len); 1561 if (!vma) 1562 return (unsigned long) -EINVAL; 1563 1564 if (vma->vm_end != vma->vm_start + old_len) 1565 return (unsigned long) -EFAULT; 1566 1567 if (is_nommu_shared_mapping(vma->vm_flags)) 1568 return (unsigned long) -EPERM; 1569 1570 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) 1571 return (unsigned long) -ENOMEM; 1572 1573 /* all checks complete - do it */ 1574 vma->vm_end = vma->vm_start + new_len; 1575 return vma->vm_start; 1576 } 1577 1578 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 1579 unsigned long, new_len, unsigned long, flags, 1580 unsigned long, new_addr) 1581 { 1582 unsigned long ret; 1583 1584 mmap_write_lock(current->mm); 1585 ret = do_mremap(addr, old_len, new_len, flags, new_addr); 1586 mmap_write_unlock(current->mm); 1587 return ret; 1588 } 1589 1590 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1591 unsigned long pfn, unsigned long size, pgprot_t prot) 1592 { 1593 if (addr != (pfn << PAGE_SHIFT)) 1594 return -EINVAL; 1595 1596 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1597 return 0; 1598 } 1599 EXPORT_SYMBOL(remap_pfn_range); 1600 1601 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 1602 { 1603 unsigned long pfn = start >> PAGE_SHIFT; 1604 unsigned long vm_len = vma->vm_end - vma->vm_start; 1605 1606 pfn += vma->vm_pgoff; 1607 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 1608 } 1609 EXPORT_SYMBOL(vm_iomap_memory); 1610 1611 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1612 unsigned long pgoff) 1613 { 1614 unsigned int size = vma->vm_end - vma->vm_start; 1615 1616 if (!(vma->vm_flags & VM_USERMAP)) 1617 return -EINVAL; 1618 1619 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); 1620 vma->vm_end = vma->vm_start + size; 1621 1622 return 0; 1623 } 1624 EXPORT_SYMBOL(remap_vmalloc_range); 1625 1626 vm_fault_t filemap_fault(struct vm_fault *vmf) 1627 { 1628 BUG(); 1629 return 0; 1630 } 1631 EXPORT_SYMBOL(filemap_fault); 1632 1633 vm_fault_t filemap_map_pages(struct vm_fault *vmf, 1634 pgoff_t start_pgoff, pgoff_t end_pgoff) 1635 { 1636 BUG(); 1637 return 0; 1638 } 1639 EXPORT_SYMBOL(filemap_map_pages); 1640 1641 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, 1642 void *buf, int len, unsigned int gup_flags) 1643 { 1644 struct vm_area_struct *vma; 1645 int write = gup_flags & FOLL_WRITE; 1646 1647 if (mmap_read_lock_killable(mm)) 1648 return 0; 1649 1650 /* the access must start within one of the target process's mappings */ 1651 vma = find_vma(mm, addr); 1652 if (vma) { 1653 /* don't overrun this mapping */ 1654 if (addr + len >= vma->vm_end) 1655 len = vma->vm_end - addr; 1656 1657 /* only read or write mappings where it is permitted */ 1658 if (write && vma->vm_flags & VM_MAYWRITE) 1659 copy_to_user_page(vma, NULL, addr, 1660 (void *) addr, buf, len); 1661 else if (!write && vma->vm_flags & VM_MAYREAD) 1662 copy_from_user_page(vma, NULL, addr, 1663 buf, (void *) addr, len); 1664 else 1665 len = 0; 1666 } else { 1667 len = 0; 1668 } 1669 1670 mmap_read_unlock(mm); 1671 1672 return len; 1673 } 1674 1675 /** 1676 * access_remote_vm - access another process' address space 1677 * @mm: the mm_struct of the target address space 1678 * @addr: start address to access 1679 * @buf: source or destination buffer 1680 * @len: number of bytes to transfer 1681 * @gup_flags: flags modifying lookup behaviour 1682 * 1683 * The caller must hold a reference on @mm. 1684 */ 1685 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1686 void *buf, int len, unsigned int gup_flags) 1687 { 1688 return __access_remote_vm(mm, addr, buf, len, gup_flags); 1689 } 1690 1691 /* 1692 * Access another process' address space. 1693 * - source/target buffer must be kernel space 1694 */ 1695 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, 1696 unsigned int gup_flags) 1697 { 1698 struct mm_struct *mm; 1699 1700 if (addr + len < addr) 1701 return 0; 1702 1703 mm = get_task_mm(tsk); 1704 if (!mm) 1705 return 0; 1706 1707 len = __access_remote_vm(mm, addr, buf, len, gup_flags); 1708 1709 mmput(mm); 1710 return len; 1711 } 1712 EXPORT_SYMBOL_GPL(access_process_vm); 1713 1714 #ifdef CONFIG_BPF_SYSCALL 1715 /* 1716 * Copy a string from another process's address space as given in mm. 1717 * If there is any error return -EFAULT. 1718 */ 1719 static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr, 1720 void *buf, int len) 1721 { 1722 unsigned long addr_end; 1723 struct vm_area_struct *vma; 1724 int ret = -EFAULT; 1725 1726 *(char *)buf = '\0'; 1727 1728 if (mmap_read_lock_killable(mm)) 1729 return ret; 1730 1731 /* the access must start within one of the target process's mappings */ 1732 vma = find_vma(mm, addr); 1733 if (!vma) 1734 goto out; 1735 1736 if (check_add_overflow(addr, len, &addr_end)) 1737 goto out; 1738 1739 /* don't overrun this mapping */ 1740 if (addr_end > vma->vm_end) 1741 len = vma->vm_end - addr; 1742 1743 /* only read mappings where it is permitted */ 1744 if (vma->vm_flags & VM_MAYREAD) { 1745 ret = strscpy(buf, (char *)addr, len); 1746 if (ret < 0) 1747 ret = len - 1; 1748 } 1749 1750 out: 1751 mmap_read_unlock(mm); 1752 return ret; 1753 } 1754 1755 /** 1756 * copy_remote_vm_str - copy a string from another process's address space. 1757 * @tsk: the task of the target address space 1758 * @addr: start address to read from 1759 * @buf: destination buffer 1760 * @len: number of bytes to copy 1761 * @gup_flags: flags modifying lookup behaviour (unused) 1762 * 1763 * The caller must hold a reference on @mm. 1764 * 1765 * Return: number of bytes copied from @addr (source) to @buf (destination); 1766 * not including the trailing NUL. Always guaranteed to leave NUL-terminated 1767 * buffer. On any error, return -EFAULT. 1768 */ 1769 int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr, 1770 void *buf, int len, unsigned int gup_flags) 1771 { 1772 struct mm_struct *mm; 1773 int ret; 1774 1775 if (unlikely(len == 0)) 1776 return 0; 1777 1778 mm = get_task_mm(tsk); 1779 if (!mm) { 1780 *(char *)buf = '\0'; 1781 return -EFAULT; 1782 } 1783 1784 ret = __copy_remote_vm_str(mm, addr, buf, len); 1785 1786 mmput(mm); 1787 1788 return ret; 1789 } 1790 EXPORT_SYMBOL_GPL(copy_remote_vm_str); 1791 #endif /* CONFIG_BPF_SYSCALL */ 1792 1793 /** 1794 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode 1795 * @inode: The inode to check 1796 * @size: The current filesize of the inode 1797 * @newsize: The proposed filesize of the inode 1798 * 1799 * Check the shared mappings on an inode on behalf of a shrinking truncate to 1800 * make sure that any outstanding VMAs aren't broken and then shrink the 1801 * vm_regions that extend beyond so that do_mmap() doesn't 1802 * automatically grant mappings that are too large. 1803 */ 1804 int nommu_shrink_inode_mappings(struct inode *inode, size_t size, 1805 size_t newsize) 1806 { 1807 struct vm_area_struct *vma; 1808 struct vm_region *region; 1809 pgoff_t low, high; 1810 size_t r_size, r_top; 1811 1812 low = newsize >> PAGE_SHIFT; 1813 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1814 1815 down_write(&nommu_region_sem); 1816 i_mmap_lock_read(inode->i_mapping); 1817 1818 /* search for VMAs that fall within the dead zone */ 1819 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { 1820 /* found one - only interested if it's shared out of the page 1821 * cache */ 1822 if (vma->vm_flags & VM_SHARED) { 1823 i_mmap_unlock_read(inode->i_mapping); 1824 up_write(&nommu_region_sem); 1825 return -ETXTBSY; /* not quite true, but near enough */ 1826 } 1827 } 1828 1829 /* reduce any regions that overlap the dead zone - if in existence, 1830 * these will be pointed to by VMAs that don't overlap the dead zone 1831 * 1832 * we don't check for any regions that start beyond the EOF as there 1833 * shouldn't be any 1834 */ 1835 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { 1836 if (!(vma->vm_flags & VM_SHARED)) 1837 continue; 1838 1839 region = vma->vm_region; 1840 r_size = region->vm_top - region->vm_start; 1841 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; 1842 1843 if (r_top > newsize) { 1844 region->vm_top -= r_top - newsize; 1845 if (region->vm_end > region->vm_top) 1846 region->vm_end = region->vm_top; 1847 } 1848 } 1849 1850 i_mmap_unlock_read(inode->i_mapping); 1851 up_write(&nommu_region_sem); 1852 return 0; 1853 } 1854 1855 /* 1856 * Initialise sysctl_user_reserve_kbytes. 1857 * 1858 * This is intended to prevent a user from starting a single memory hogging 1859 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 1860 * mode. 1861 * 1862 * The default value is min(3% of free memory, 128MB) 1863 * 128MB is enough to recover with sshd/login, bash, and top/kill. 1864 */ 1865 static int __meminit init_user_reserve(void) 1866 { 1867 unsigned long free_kbytes; 1868 1869 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 1870 1871 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 1872 return 0; 1873 } 1874 subsys_initcall(init_user_reserve); 1875 1876 /* 1877 * Initialise sysctl_admin_reserve_kbytes. 1878 * 1879 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 1880 * to log in and kill a memory hogging process. 1881 * 1882 * Systems with more than 256MB will reserve 8MB, enough to recover 1883 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 1884 * only reserve 3% of free pages by default. 1885 */ 1886 static int __meminit init_admin_reserve(void) 1887 { 1888 unsigned long free_kbytes; 1889 1890 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 1891 1892 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 1893 return 0; 1894 } 1895 subsys_initcall(init_admin_reserve); 1896 1897 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 1898 { 1899 mmap_write_lock(oldmm); 1900 dup_mm_exe_file(mm, oldmm); 1901 mmap_write_unlock(oldmm); 1902 return 0; 1903 } 1904