1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/mmap.c 4 * 5 * Written by obz. 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/backing-dev.h> 15 #include <linux/mm.h> 16 #include <linux/mm_inline.h> 17 #include <linux/shm.h> 18 #include <linux/mman.h> 19 #include <linux/pagemap.h> 20 #include <linux/swap.h> 21 #include <linux/syscalls.h> 22 #include <linux/capability.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/personality.h> 27 #include <linux/security.h> 28 #include <linux/hugetlb.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/profile.h> 31 #include <linux/export.h> 32 #include <linux/mount.h> 33 #include <linux/mempolicy.h> 34 #include <linux/rmap.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/mmdebug.h> 37 #include <linux/perf_event.h> 38 #include <linux/audit.h> 39 #include <linux/khugepaged.h> 40 #include <linux/uprobes.h> 41 #include <linux/notifier.h> 42 #include <linux/memory.h> 43 #include <linux/printk.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/moduleparam.h> 46 #include <linux/pkeys.h> 47 #include <linux/oom.h> 48 #include <linux/sched/mm.h> 49 #include <linux/ksm.h> 50 51 #include <linux/uaccess.h> 52 #include <asm/cacheflush.h> 53 #include <asm/tlb.h> 54 #include <asm/mmu_context.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/mmap.h> 58 59 #include "internal.h" 60 61 #ifndef arch_mmap_check 62 #define arch_mmap_check(addr, len, flags) (0) 63 #endif 64 65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 67 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; 68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 69 #endif 70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 74 #endif 75 76 static bool ignore_rlimit_data; 77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 78 79 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 80 void vma_set_page_prot(struct vm_area_struct *vma) 81 { 82 unsigned long vm_flags = vma->vm_flags; 83 pgprot_t vm_page_prot; 84 85 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 86 if (vma_wants_writenotify(vma, vm_page_prot)) { 87 vm_flags &= ~VM_SHARED; 88 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 89 } 90 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 91 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 92 } 93 94 /* 95 * check_brk_limits() - Use platform specific check of range & verify mlock 96 * limits. 97 * @addr: The address to check 98 * @len: The size of increase. 99 * 100 * Return: 0 on success. 101 */ 102 static int check_brk_limits(unsigned long addr, unsigned long len) 103 { 104 unsigned long mapped_addr; 105 106 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 107 if (IS_ERR_VALUE(mapped_addr)) 108 return mapped_addr; 109 110 return mlock_future_ok(current->mm, current->mm->def_flags, len) 111 ? 0 : -EAGAIN; 112 } 113 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 114 unsigned long addr, unsigned long request, unsigned long flags); 115 SYSCALL_DEFINE1(brk, unsigned long, brk) 116 { 117 unsigned long newbrk, oldbrk, origbrk; 118 struct mm_struct *mm = current->mm; 119 struct vm_area_struct *brkvma, *next = NULL; 120 unsigned long min_brk; 121 bool populate = false; 122 LIST_HEAD(uf); 123 struct vma_iterator vmi; 124 125 if (mmap_write_lock_killable(mm)) 126 return -EINTR; 127 128 origbrk = mm->brk; 129 130 #ifdef CONFIG_COMPAT_BRK 131 /* 132 * CONFIG_COMPAT_BRK can still be overridden by setting 133 * randomize_va_space to 2, which will still cause mm->start_brk 134 * to be arbitrarily shifted 135 */ 136 if (current->brk_randomized) 137 min_brk = mm->start_brk; 138 else 139 min_brk = mm->end_data; 140 #else 141 min_brk = mm->start_brk; 142 #endif 143 if (brk < min_brk) 144 goto out; 145 146 /* 147 * Check against rlimit here. If this check is done later after the test 148 * of oldbrk with newbrk then it can escape the test and let the data 149 * segment grow beyond its set limit the in case where the limit is 150 * not page aligned -Ram Gupta 151 */ 152 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 153 mm->end_data, mm->start_data)) 154 goto out; 155 156 newbrk = PAGE_ALIGN(brk); 157 oldbrk = PAGE_ALIGN(mm->brk); 158 if (oldbrk == newbrk) { 159 mm->brk = brk; 160 goto success; 161 } 162 163 /* Always allow shrinking brk. */ 164 if (brk <= mm->brk) { 165 /* Search one past newbrk */ 166 vma_iter_init(&vmi, mm, newbrk); 167 brkvma = vma_find(&vmi, oldbrk); 168 if (!brkvma || brkvma->vm_start >= oldbrk) 169 goto out; /* mapping intersects with an existing non-brk vma. */ 170 /* 171 * mm->brk must be protected by write mmap_lock. 172 * do_vmi_align_munmap() will drop the lock on success, so 173 * update it before calling do_vma_munmap(). 174 */ 175 mm->brk = brk; 176 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, 177 /* unlock = */ true)) 178 goto out; 179 180 goto success_unlocked; 181 } 182 183 if (check_brk_limits(oldbrk, newbrk - oldbrk)) 184 goto out; 185 186 /* 187 * Only check if the next VMA is within the stack_guard_gap of the 188 * expansion area 189 */ 190 vma_iter_init(&vmi, mm, oldbrk); 191 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); 192 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 193 goto out; 194 195 brkvma = vma_prev_limit(&vmi, mm->start_brk); 196 /* Ok, looks good - let it rip. */ 197 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) 198 goto out; 199 200 mm->brk = brk; 201 if (mm->def_flags & VM_LOCKED) 202 populate = true; 203 204 success: 205 mmap_write_unlock(mm); 206 success_unlocked: 207 userfaultfd_unmap_complete(mm, &uf); 208 if (populate) 209 mm_populate(oldbrk, newbrk - oldbrk); 210 return brk; 211 212 out: 213 mm->brk = origbrk; 214 mmap_write_unlock(mm); 215 return origbrk; 216 } 217 218 /* 219 * If a hint addr is less than mmap_min_addr change hint to be as 220 * low as possible but still greater than mmap_min_addr 221 */ 222 static inline unsigned long round_hint_to_min(unsigned long hint) 223 { 224 hint &= PAGE_MASK; 225 if (((void *)hint != NULL) && 226 (hint < mmap_min_addr)) 227 return PAGE_ALIGN(mmap_min_addr); 228 return hint; 229 } 230 231 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 232 unsigned long bytes) 233 { 234 unsigned long locked_pages, limit_pages; 235 236 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 237 return true; 238 239 locked_pages = bytes >> PAGE_SHIFT; 240 locked_pages += mm->locked_vm; 241 242 limit_pages = rlimit(RLIMIT_MEMLOCK); 243 limit_pages >>= PAGE_SHIFT; 244 245 return locked_pages <= limit_pages; 246 } 247 248 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 249 { 250 if (S_ISREG(inode->i_mode)) 251 return MAX_LFS_FILESIZE; 252 253 if (S_ISBLK(inode->i_mode)) 254 return MAX_LFS_FILESIZE; 255 256 if (S_ISSOCK(inode->i_mode)) 257 return MAX_LFS_FILESIZE; 258 259 /* Special "we do even unsigned file positions" case */ 260 if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET) 261 return 0; 262 263 /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 264 return ULONG_MAX; 265 } 266 267 static inline bool file_mmap_ok(struct file *file, struct inode *inode, 268 unsigned long pgoff, unsigned long len) 269 { 270 u64 maxsize = file_mmap_size_max(file, inode); 271 272 if (maxsize && len > maxsize) 273 return false; 274 maxsize -= len; 275 if (pgoff > maxsize >> PAGE_SHIFT) 276 return false; 277 return true; 278 } 279 280 /* 281 * The caller must write-lock current->mm->mmap_lock. 282 */ 283 unsigned long do_mmap(struct file *file, unsigned long addr, 284 unsigned long len, unsigned long prot, 285 unsigned long flags, vm_flags_t vm_flags, 286 unsigned long pgoff, unsigned long *populate, 287 struct list_head *uf) 288 { 289 struct mm_struct *mm = current->mm; 290 int pkey = 0; 291 292 *populate = 0; 293 294 if (!len) 295 return -EINVAL; 296 297 /* 298 * Does the application expect PROT_READ to imply PROT_EXEC? 299 * 300 * (the exception is when the underlying filesystem is noexec 301 * mounted, in which case we don't add PROT_EXEC.) 302 */ 303 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 304 if (!(file && path_noexec(&file->f_path))) 305 prot |= PROT_EXEC; 306 307 /* force arch specific MAP_FIXED handling in get_unmapped_area */ 308 if (flags & MAP_FIXED_NOREPLACE) 309 flags |= MAP_FIXED; 310 311 if (!(flags & MAP_FIXED)) 312 addr = round_hint_to_min(addr); 313 314 /* Careful about overflows.. */ 315 len = PAGE_ALIGN(len); 316 if (!len) 317 return -ENOMEM; 318 319 /* offset overflow? */ 320 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 321 return -EOVERFLOW; 322 323 /* Too many mappings? */ 324 if (mm->map_count > sysctl_max_map_count) 325 return -ENOMEM; 326 327 /* 328 * addr is returned from get_unmapped_area, 329 * There are two cases: 330 * 1> MAP_FIXED == false 331 * unallocated memory, no need to check sealing. 332 * 1> MAP_FIXED == true 333 * sealing is checked inside mmap_region when 334 * do_vmi_munmap is called. 335 */ 336 337 if (prot == PROT_EXEC) { 338 pkey = execute_only_pkey(mm); 339 if (pkey < 0) 340 pkey = 0; 341 } 342 343 /* Do simple checking here so the lower-level routines won't have 344 * to. we assume access permissions have been handled by the open 345 * of the memory object, so we don't do any here. 346 */ 347 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | 348 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 349 350 /* Obtain the address to map to. we verify (or select) it and ensure 351 * that it represents a valid section of the address space. 352 */ 353 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); 354 if (IS_ERR_VALUE(addr)) 355 return addr; 356 357 if (flags & MAP_FIXED_NOREPLACE) { 358 if (find_vma_intersection(mm, addr, addr + len)) 359 return -EEXIST; 360 } 361 362 if (flags & MAP_LOCKED) 363 if (!can_do_mlock()) 364 return -EPERM; 365 366 if (!mlock_future_ok(mm, vm_flags, len)) 367 return -EAGAIN; 368 369 if (file) { 370 struct inode *inode = file_inode(file); 371 unsigned long flags_mask; 372 373 if (!file_mmap_ok(file, inode, pgoff, len)) 374 return -EOVERFLOW; 375 376 flags_mask = LEGACY_MAP_MASK; 377 if (file->f_op->fop_flags & FOP_MMAP_SYNC) 378 flags_mask |= MAP_SYNC; 379 380 switch (flags & MAP_TYPE) { 381 case MAP_SHARED: 382 /* 383 * Force use of MAP_SHARED_VALIDATE with non-legacy 384 * flags. E.g. MAP_SYNC is dangerous to use with 385 * MAP_SHARED as you don't know which consistency model 386 * you will get. We silently ignore unsupported flags 387 * with MAP_SHARED to preserve backward compatibility. 388 */ 389 flags &= LEGACY_MAP_MASK; 390 fallthrough; 391 case MAP_SHARED_VALIDATE: 392 if (flags & ~flags_mask) 393 return -EOPNOTSUPP; 394 if (prot & PROT_WRITE) { 395 if (!(file->f_mode & FMODE_WRITE)) 396 return -EACCES; 397 if (IS_SWAPFILE(file->f_mapping->host)) 398 return -ETXTBSY; 399 } 400 401 /* 402 * Make sure we don't allow writing to an append-only 403 * file.. 404 */ 405 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 406 return -EACCES; 407 408 vm_flags |= VM_SHARED | VM_MAYSHARE; 409 if (!(file->f_mode & FMODE_WRITE)) 410 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 411 fallthrough; 412 case MAP_PRIVATE: 413 if (!(file->f_mode & FMODE_READ)) 414 return -EACCES; 415 if (path_noexec(&file->f_path)) { 416 if (vm_flags & VM_EXEC) 417 return -EPERM; 418 vm_flags &= ~VM_MAYEXEC; 419 } 420 421 if (!file->f_op->mmap) 422 return -ENODEV; 423 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 424 return -EINVAL; 425 break; 426 427 default: 428 return -EINVAL; 429 } 430 } else { 431 switch (flags & MAP_TYPE) { 432 case MAP_SHARED: 433 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 434 return -EINVAL; 435 /* 436 * Ignore pgoff. 437 */ 438 pgoff = 0; 439 vm_flags |= VM_SHARED | VM_MAYSHARE; 440 break; 441 case MAP_DROPPABLE: 442 if (VM_DROPPABLE == VM_NONE) 443 return -ENOTSUPP; 444 /* 445 * A locked or stack area makes no sense to be droppable. 446 * 447 * Also, since droppable pages can just go away at any time 448 * it makes no sense to copy them on fork or dump them. 449 * 450 * And don't attempt to combine with hugetlb for now. 451 */ 452 if (flags & (MAP_LOCKED | MAP_HUGETLB)) 453 return -EINVAL; 454 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) 455 return -EINVAL; 456 457 vm_flags |= VM_DROPPABLE; 458 459 /* 460 * If the pages can be dropped, then it doesn't make 461 * sense to reserve them. 462 */ 463 vm_flags |= VM_NORESERVE; 464 465 /* 466 * Likewise, they're volatile enough that they 467 * shouldn't survive forks or coredumps. 468 */ 469 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; 470 fallthrough; 471 case MAP_PRIVATE: 472 /* 473 * Set pgoff according to addr for anon_vma. 474 */ 475 pgoff = addr >> PAGE_SHIFT; 476 break; 477 default: 478 return -EINVAL; 479 } 480 } 481 482 /* 483 * Set 'VM_NORESERVE' if we should not account for the 484 * memory use of this mapping. 485 */ 486 if (flags & MAP_NORESERVE) { 487 /* We honor MAP_NORESERVE if allowed to overcommit */ 488 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 489 vm_flags |= VM_NORESERVE; 490 491 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 492 if (file && is_file_hugepages(file)) 493 vm_flags |= VM_NORESERVE; 494 } 495 496 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 497 if (!IS_ERR_VALUE(addr) && 498 ((vm_flags & VM_LOCKED) || 499 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 500 *populate = len; 501 return addr; 502 } 503 504 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 505 unsigned long prot, unsigned long flags, 506 unsigned long fd, unsigned long pgoff) 507 { 508 struct file *file = NULL; 509 unsigned long retval; 510 511 if (!(flags & MAP_ANONYMOUS)) { 512 audit_mmap_fd(fd, flags); 513 file = fget(fd); 514 if (!file) 515 return -EBADF; 516 if (is_file_hugepages(file)) { 517 len = ALIGN(len, huge_page_size(hstate_file(file))); 518 } else if (unlikely(flags & MAP_HUGETLB)) { 519 retval = -EINVAL; 520 goto out_fput; 521 } 522 } else if (flags & MAP_HUGETLB) { 523 struct hstate *hs; 524 525 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 526 if (!hs) 527 return -EINVAL; 528 529 len = ALIGN(len, huge_page_size(hs)); 530 /* 531 * VM_NORESERVE is used because the reservations will be 532 * taken when vm_ops->mmap() is called 533 */ 534 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 535 VM_NORESERVE, 536 HUGETLB_ANONHUGE_INODE, 537 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 538 if (IS_ERR(file)) 539 return PTR_ERR(file); 540 } 541 542 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 543 out_fput: 544 if (file) 545 fput(file); 546 return retval; 547 } 548 549 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 550 unsigned long, prot, unsigned long, flags, 551 unsigned long, fd, unsigned long, pgoff) 552 { 553 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 554 } 555 556 #ifdef __ARCH_WANT_SYS_OLD_MMAP 557 struct mmap_arg_struct { 558 unsigned long addr; 559 unsigned long len; 560 unsigned long prot; 561 unsigned long flags; 562 unsigned long fd; 563 unsigned long offset; 564 }; 565 566 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 567 { 568 struct mmap_arg_struct a; 569 570 if (copy_from_user(&a, arg, sizeof(a))) 571 return -EFAULT; 572 if (offset_in_page(a.offset)) 573 return -EINVAL; 574 575 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 576 a.offset >> PAGE_SHIFT); 577 } 578 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 579 580 /** 581 * unmapped_area() - Find an area between the low_limit and the high_limit with 582 * the correct alignment and offset, all from @info. Note: current->mm is used 583 * for the search. 584 * 585 * @info: The unmapped area information including the range [low_limit - 586 * high_limit), the alignment offset and mask. 587 * 588 * Return: A memory address or -ENOMEM. 589 */ 590 static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 591 { 592 unsigned long length, gap; 593 unsigned long low_limit, high_limit; 594 struct vm_area_struct *tmp; 595 VMA_ITERATOR(vmi, current->mm, 0); 596 597 /* Adjust search length to account for worst case alignment overhead */ 598 length = info->length + info->align_mask + info->start_gap; 599 if (length < info->length) 600 return -ENOMEM; 601 602 low_limit = info->low_limit; 603 if (low_limit < mmap_min_addr) 604 low_limit = mmap_min_addr; 605 high_limit = info->high_limit; 606 retry: 607 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) 608 return -ENOMEM; 609 610 /* 611 * Adjust for the gap first so it doesn't interfere with the 612 * later alignment. The first step is the minimum needed to 613 * fulill the start gap, the next steps is the minimum to align 614 * that. It is the minimum needed to fulill both. 615 */ 616 gap = vma_iter_addr(&vmi) + info->start_gap; 617 gap += (info->align_offset - gap) & info->align_mask; 618 tmp = vma_next(&vmi); 619 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 620 if (vm_start_gap(tmp) < gap + length - 1) { 621 low_limit = tmp->vm_end; 622 vma_iter_reset(&vmi); 623 goto retry; 624 } 625 } else { 626 tmp = vma_prev(&vmi); 627 if (tmp && vm_end_gap(tmp) > gap) { 628 low_limit = vm_end_gap(tmp); 629 vma_iter_reset(&vmi); 630 goto retry; 631 } 632 } 633 634 return gap; 635 } 636 637 /** 638 * unmapped_area_topdown() - Find an area between the low_limit and the 639 * high_limit with the correct alignment and offset at the highest available 640 * address, all from @info. Note: current->mm is used for the search. 641 * 642 * @info: The unmapped area information including the range [low_limit - 643 * high_limit), the alignment offset and mask. 644 * 645 * Return: A memory address or -ENOMEM. 646 */ 647 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 648 { 649 unsigned long length, gap, gap_end; 650 unsigned long low_limit, high_limit; 651 struct vm_area_struct *tmp; 652 VMA_ITERATOR(vmi, current->mm, 0); 653 654 /* Adjust search length to account for worst case alignment overhead */ 655 length = info->length + info->align_mask + info->start_gap; 656 if (length < info->length) 657 return -ENOMEM; 658 659 low_limit = info->low_limit; 660 if (low_limit < mmap_min_addr) 661 low_limit = mmap_min_addr; 662 high_limit = info->high_limit; 663 retry: 664 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) 665 return -ENOMEM; 666 667 gap = vma_iter_end(&vmi) - info->length; 668 gap -= (gap - info->align_offset) & info->align_mask; 669 gap_end = vma_iter_end(&vmi); 670 tmp = vma_next(&vmi); 671 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 672 if (vm_start_gap(tmp) < gap_end) { 673 high_limit = vm_start_gap(tmp); 674 vma_iter_reset(&vmi); 675 goto retry; 676 } 677 } else { 678 tmp = vma_prev(&vmi); 679 if (tmp && vm_end_gap(tmp) > gap) { 680 high_limit = tmp->vm_start; 681 vma_iter_reset(&vmi); 682 goto retry; 683 } 684 } 685 686 return gap; 687 } 688 689 /* 690 * Determine if the allocation needs to ensure that there is no 691 * existing mapping within it's guard gaps, for use as start_gap. 692 */ 693 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) 694 { 695 if (vm_flags & VM_SHADOW_STACK) 696 return PAGE_SIZE; 697 698 return 0; 699 } 700 701 /* 702 * Search for an unmapped address range. 703 * 704 * We are looking for a range that: 705 * - does not intersect with any VMA; 706 * - is contained within the [low_limit, high_limit) interval; 707 * - is at least the desired size. 708 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 709 */ 710 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 711 { 712 unsigned long addr; 713 714 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 715 addr = unmapped_area_topdown(info); 716 else 717 addr = unmapped_area(info); 718 719 trace_vm_unmapped_area(addr, info); 720 return addr; 721 } 722 723 /* Get an address range which is currently unmapped. 724 * For shmat() with addr=0. 725 * 726 * Ugly calling convention alert: 727 * Return value with the low bits set means error value, 728 * ie 729 * if (ret & ~PAGE_MASK) 730 * error = ret; 731 * 732 * This function "knows" that -ENOMEM has the bits set. 733 */ 734 unsigned long 735 generic_get_unmapped_area(struct file *filp, unsigned long addr, 736 unsigned long len, unsigned long pgoff, 737 unsigned long flags, vm_flags_t vm_flags) 738 { 739 struct mm_struct *mm = current->mm; 740 struct vm_area_struct *vma, *prev; 741 struct vm_unmapped_area_info info = {}; 742 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 743 744 if (len > mmap_end - mmap_min_addr) 745 return -ENOMEM; 746 747 if (flags & MAP_FIXED) 748 return addr; 749 750 if (addr) { 751 addr = PAGE_ALIGN(addr); 752 vma = find_vma_prev(mm, addr, &prev); 753 if (mmap_end - len >= addr && addr >= mmap_min_addr && 754 (!vma || addr + len <= vm_start_gap(vma)) && 755 (!prev || addr >= vm_end_gap(prev))) 756 return addr; 757 } 758 759 info.length = len; 760 info.low_limit = mm->mmap_base; 761 info.high_limit = mmap_end; 762 info.start_gap = stack_guard_placement(vm_flags); 763 if (filp && is_file_hugepages(filp)) 764 info.align_mask = huge_page_mask_align(filp); 765 return vm_unmapped_area(&info); 766 } 767 768 #ifndef HAVE_ARCH_UNMAPPED_AREA 769 unsigned long 770 arch_get_unmapped_area(struct file *filp, unsigned long addr, 771 unsigned long len, unsigned long pgoff, 772 unsigned long flags, vm_flags_t vm_flags) 773 { 774 return generic_get_unmapped_area(filp, addr, len, pgoff, flags, 775 vm_flags); 776 } 777 #endif 778 779 /* 780 * This mmap-allocator allocates new areas top-down from below the 781 * stack's low limit (the base): 782 */ 783 unsigned long 784 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 785 unsigned long len, unsigned long pgoff, 786 unsigned long flags, vm_flags_t vm_flags) 787 { 788 struct vm_area_struct *vma, *prev; 789 struct mm_struct *mm = current->mm; 790 struct vm_unmapped_area_info info = {}; 791 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 792 793 /* requested length too big for entire address space */ 794 if (len > mmap_end - mmap_min_addr) 795 return -ENOMEM; 796 797 if (flags & MAP_FIXED) 798 return addr; 799 800 /* requesting a specific address */ 801 if (addr) { 802 addr = PAGE_ALIGN(addr); 803 vma = find_vma_prev(mm, addr, &prev); 804 if (mmap_end - len >= addr && addr >= mmap_min_addr && 805 (!vma || addr + len <= vm_start_gap(vma)) && 806 (!prev || addr >= vm_end_gap(prev))) 807 return addr; 808 } 809 810 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 811 info.length = len; 812 info.low_limit = PAGE_SIZE; 813 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 814 info.start_gap = stack_guard_placement(vm_flags); 815 if (filp && is_file_hugepages(filp)) 816 info.align_mask = huge_page_mask_align(filp); 817 addr = vm_unmapped_area(&info); 818 819 /* 820 * A failed mmap() very likely causes application failure, 821 * so fall back to the bottom-up function here. This scenario 822 * can happen with large stack limits and large mmap() 823 * allocations. 824 */ 825 if (offset_in_page(addr)) { 826 VM_BUG_ON(addr != -ENOMEM); 827 info.flags = 0; 828 info.low_limit = TASK_UNMAPPED_BASE; 829 info.high_limit = mmap_end; 830 addr = vm_unmapped_area(&info); 831 } 832 833 return addr; 834 } 835 836 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 837 unsigned long 838 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 839 unsigned long len, unsigned long pgoff, 840 unsigned long flags, vm_flags_t vm_flags) 841 { 842 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, 843 vm_flags); 844 } 845 #endif 846 847 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, 848 unsigned long addr, unsigned long len, 849 unsigned long pgoff, unsigned long flags, 850 vm_flags_t vm_flags) 851 { 852 if (test_bit(MMF_TOPDOWN, &mm->flags)) 853 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, 854 flags, vm_flags); 855 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); 856 } 857 858 unsigned long 859 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 860 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 861 { 862 unsigned long (*get_area)(struct file *, unsigned long, 863 unsigned long, unsigned long, unsigned long) 864 = NULL; 865 866 unsigned long error = arch_mmap_check(addr, len, flags); 867 if (error) 868 return error; 869 870 /* Careful about overflows.. */ 871 if (len > TASK_SIZE) 872 return -ENOMEM; 873 874 if (file) { 875 if (file->f_op->get_unmapped_area) 876 get_area = file->f_op->get_unmapped_area; 877 } else if (flags & MAP_SHARED) { 878 /* 879 * mmap_region() will call shmem_zero_setup() to create a file, 880 * so use shmem's get_unmapped_area in case it can be huge. 881 */ 882 get_area = shmem_get_unmapped_area; 883 } 884 885 /* Always treat pgoff as zero for anonymous memory. */ 886 if (!file) 887 pgoff = 0; 888 889 if (get_area) { 890 addr = get_area(file, addr, len, pgoff, flags); 891 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) 892 && !addr /* no hint */ 893 && IS_ALIGNED(len, PMD_SIZE)) { 894 /* Ensures that larger anonymous mappings are THP aligned. */ 895 addr = thp_get_unmapped_area_vmflags(file, addr, len, 896 pgoff, flags, vm_flags); 897 } else { 898 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, 899 pgoff, flags, vm_flags); 900 } 901 if (IS_ERR_VALUE(addr)) 902 return addr; 903 904 if (addr > TASK_SIZE - len) 905 return -ENOMEM; 906 if (offset_in_page(addr)) 907 return -EINVAL; 908 909 error = security_mmap_addr(addr); 910 return error ? error : addr; 911 } 912 913 unsigned long 914 mm_get_unmapped_area(struct mm_struct *mm, struct file *file, 915 unsigned long addr, unsigned long len, 916 unsigned long pgoff, unsigned long flags) 917 { 918 if (test_bit(MMF_TOPDOWN, &mm->flags)) 919 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0); 920 return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0); 921 } 922 EXPORT_SYMBOL(mm_get_unmapped_area); 923 924 /** 925 * find_vma_intersection() - Look up the first VMA which intersects the interval 926 * @mm: The process address space. 927 * @start_addr: The inclusive start user address. 928 * @end_addr: The exclusive end user address. 929 * 930 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes 931 * start_addr < end_addr. 932 */ 933 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 934 unsigned long start_addr, 935 unsigned long end_addr) 936 { 937 unsigned long index = start_addr; 938 939 mmap_assert_locked(mm); 940 return mt_find(&mm->mm_mt, &index, end_addr - 1); 941 } 942 EXPORT_SYMBOL(find_vma_intersection); 943 944 /** 945 * find_vma() - Find the VMA for a given address, or the next VMA. 946 * @mm: The mm_struct to check 947 * @addr: The address 948 * 949 * Returns: The VMA associated with addr, or the next VMA. 950 * May return %NULL in the case of no VMA at addr or above. 951 */ 952 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 953 { 954 unsigned long index = addr; 955 956 mmap_assert_locked(mm); 957 return mt_find(&mm->mm_mt, &index, ULONG_MAX); 958 } 959 EXPORT_SYMBOL(find_vma); 960 961 /** 962 * find_vma_prev() - Find the VMA for a given address, or the next vma and 963 * set %pprev to the previous VMA, if any. 964 * @mm: The mm_struct to check 965 * @addr: The address 966 * @pprev: The pointer to set to the previous VMA 967 * 968 * Note that RCU lock is missing here since the external mmap_lock() is used 969 * instead. 970 * 971 * Returns: The VMA associated with @addr, or the next vma. 972 * May return %NULL in the case of no vma at addr or above. 973 */ 974 struct vm_area_struct * 975 find_vma_prev(struct mm_struct *mm, unsigned long addr, 976 struct vm_area_struct **pprev) 977 { 978 struct vm_area_struct *vma; 979 VMA_ITERATOR(vmi, mm, addr); 980 981 vma = vma_iter_load(&vmi); 982 *pprev = vma_prev(&vmi); 983 if (!vma) 984 vma = vma_next(&vmi); 985 return vma; 986 } 987 988 /* 989 * Verify that the stack growth is acceptable and 990 * update accounting. This is shared with both the 991 * grow-up and grow-down cases. 992 */ 993 static int acct_stack_growth(struct vm_area_struct *vma, 994 unsigned long size, unsigned long grow) 995 { 996 struct mm_struct *mm = vma->vm_mm; 997 unsigned long new_start; 998 999 /* address space limit tests */ 1000 if (!may_expand_vm(mm, vma->vm_flags, grow)) 1001 return -ENOMEM; 1002 1003 /* Stack limit test */ 1004 if (size > rlimit(RLIMIT_STACK)) 1005 return -ENOMEM; 1006 1007 /* mlock limit tests */ 1008 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) 1009 return -ENOMEM; 1010 1011 /* Check to ensure the stack will not grow into a hugetlb-only region */ 1012 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 1013 vma->vm_end - size; 1014 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 1015 return -EFAULT; 1016 1017 /* 1018 * Overcommit.. This must be the final test, as it will 1019 * update security statistics. 1020 */ 1021 if (security_vm_enough_memory_mm(mm, grow)) 1022 return -ENOMEM; 1023 1024 return 0; 1025 } 1026 1027 #if defined(CONFIG_STACK_GROWSUP) 1028 /* 1029 * PA-RISC uses this for its stack. 1030 * vma is the last one with address > vma->vm_end. Have to extend vma. 1031 */ 1032 static int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1033 { 1034 struct mm_struct *mm = vma->vm_mm; 1035 struct vm_area_struct *next; 1036 unsigned long gap_addr; 1037 int error = 0; 1038 VMA_ITERATOR(vmi, mm, vma->vm_start); 1039 1040 if (!(vma->vm_flags & VM_GROWSUP)) 1041 return -EFAULT; 1042 1043 mmap_assert_write_locked(mm); 1044 1045 /* Guard against exceeding limits of the address space. */ 1046 address &= PAGE_MASK; 1047 if (address >= (TASK_SIZE & PAGE_MASK)) 1048 return -ENOMEM; 1049 address += PAGE_SIZE; 1050 1051 /* Enforce stack_guard_gap */ 1052 gap_addr = address + stack_guard_gap; 1053 1054 /* Guard against overflow */ 1055 if (gap_addr < address || gap_addr > TASK_SIZE) 1056 gap_addr = TASK_SIZE; 1057 1058 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 1059 if (next && vma_is_accessible(next)) { 1060 if (!(next->vm_flags & VM_GROWSUP)) 1061 return -ENOMEM; 1062 /* Check that both stack segments have the same anon_vma? */ 1063 } 1064 1065 if (next) 1066 vma_iter_prev_range_limit(&vmi, address); 1067 1068 vma_iter_config(&vmi, vma->vm_start, address); 1069 if (vma_iter_prealloc(&vmi, vma)) 1070 return -ENOMEM; 1071 1072 /* We must make sure the anon_vma is allocated. */ 1073 if (unlikely(anon_vma_prepare(vma))) { 1074 vma_iter_free(&vmi); 1075 return -ENOMEM; 1076 } 1077 1078 /* Lock the VMA before expanding to prevent concurrent page faults */ 1079 vma_start_write(vma); 1080 /* We update the anon VMA tree. */ 1081 anon_vma_lock_write(vma->anon_vma); 1082 1083 /* Somebody else might have raced and expanded it already */ 1084 if (address > vma->vm_end) { 1085 unsigned long size, grow; 1086 1087 size = address - vma->vm_start; 1088 grow = (address - vma->vm_end) >> PAGE_SHIFT; 1089 1090 error = -ENOMEM; 1091 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 1092 error = acct_stack_growth(vma, size, grow); 1093 if (!error) { 1094 if (vma->vm_flags & VM_LOCKED) 1095 mm->locked_vm += grow; 1096 vm_stat_account(mm, vma->vm_flags, grow); 1097 anon_vma_interval_tree_pre_update_vma(vma); 1098 vma->vm_end = address; 1099 /* Overwrite old entry in mtree. */ 1100 vma_iter_store(&vmi, vma); 1101 anon_vma_interval_tree_post_update_vma(vma); 1102 1103 perf_event_mmap(vma); 1104 } 1105 } 1106 } 1107 anon_vma_unlock_write(vma->anon_vma); 1108 vma_iter_free(&vmi); 1109 validate_mm(mm); 1110 return error; 1111 } 1112 #endif /* CONFIG_STACK_GROWSUP */ 1113 1114 /* 1115 * vma is the first one with address < vma->vm_start. Have to extend vma. 1116 * mmap_lock held for writing. 1117 */ 1118 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 1119 { 1120 struct mm_struct *mm = vma->vm_mm; 1121 struct vm_area_struct *prev; 1122 int error = 0; 1123 VMA_ITERATOR(vmi, mm, vma->vm_start); 1124 1125 if (!(vma->vm_flags & VM_GROWSDOWN)) 1126 return -EFAULT; 1127 1128 mmap_assert_write_locked(mm); 1129 1130 address &= PAGE_MASK; 1131 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 1132 return -EPERM; 1133 1134 /* Enforce stack_guard_gap */ 1135 prev = vma_prev(&vmi); 1136 /* Check that both stack segments have the same anon_vma? */ 1137 if (prev) { 1138 if (!(prev->vm_flags & VM_GROWSDOWN) && 1139 vma_is_accessible(prev) && 1140 (address - prev->vm_end < stack_guard_gap)) 1141 return -ENOMEM; 1142 } 1143 1144 if (prev) 1145 vma_iter_next_range_limit(&vmi, vma->vm_start); 1146 1147 vma_iter_config(&vmi, address, vma->vm_end); 1148 if (vma_iter_prealloc(&vmi, vma)) 1149 return -ENOMEM; 1150 1151 /* We must make sure the anon_vma is allocated. */ 1152 if (unlikely(anon_vma_prepare(vma))) { 1153 vma_iter_free(&vmi); 1154 return -ENOMEM; 1155 } 1156 1157 /* Lock the VMA before expanding to prevent concurrent page faults */ 1158 vma_start_write(vma); 1159 /* We update the anon VMA tree. */ 1160 anon_vma_lock_write(vma->anon_vma); 1161 1162 /* Somebody else might have raced and expanded it already */ 1163 if (address < vma->vm_start) { 1164 unsigned long size, grow; 1165 1166 size = vma->vm_end - address; 1167 grow = (vma->vm_start - address) >> PAGE_SHIFT; 1168 1169 error = -ENOMEM; 1170 if (grow <= vma->vm_pgoff) { 1171 error = acct_stack_growth(vma, size, grow); 1172 if (!error) { 1173 if (vma->vm_flags & VM_LOCKED) 1174 mm->locked_vm += grow; 1175 vm_stat_account(mm, vma->vm_flags, grow); 1176 anon_vma_interval_tree_pre_update_vma(vma); 1177 vma->vm_start = address; 1178 vma->vm_pgoff -= grow; 1179 /* Overwrite old entry in mtree. */ 1180 vma_iter_store(&vmi, vma); 1181 anon_vma_interval_tree_post_update_vma(vma); 1182 1183 perf_event_mmap(vma); 1184 } 1185 } 1186 } 1187 anon_vma_unlock_write(vma->anon_vma); 1188 vma_iter_free(&vmi); 1189 validate_mm(mm); 1190 return error; 1191 } 1192 1193 /* enforced gap between the expanding stack and other mappings. */ 1194 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 1195 1196 static int __init cmdline_parse_stack_guard_gap(char *p) 1197 { 1198 unsigned long val; 1199 char *endptr; 1200 1201 val = simple_strtoul(p, &endptr, 10); 1202 if (!*endptr) 1203 stack_guard_gap = val << PAGE_SHIFT; 1204 1205 return 1; 1206 } 1207 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 1208 1209 #ifdef CONFIG_STACK_GROWSUP 1210 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 1211 { 1212 return expand_upwards(vma, address); 1213 } 1214 1215 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 1216 { 1217 struct vm_area_struct *vma, *prev; 1218 1219 addr &= PAGE_MASK; 1220 vma = find_vma_prev(mm, addr, &prev); 1221 if (vma && (vma->vm_start <= addr)) 1222 return vma; 1223 if (!prev) 1224 return NULL; 1225 if (expand_stack_locked(prev, addr)) 1226 return NULL; 1227 if (prev->vm_flags & VM_LOCKED) 1228 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 1229 return prev; 1230 } 1231 #else 1232 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 1233 { 1234 return expand_downwards(vma, address); 1235 } 1236 1237 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 1238 { 1239 struct vm_area_struct *vma; 1240 unsigned long start; 1241 1242 addr &= PAGE_MASK; 1243 vma = find_vma(mm, addr); 1244 if (!vma) 1245 return NULL; 1246 if (vma->vm_start <= addr) 1247 return vma; 1248 start = vma->vm_start; 1249 if (expand_stack_locked(vma, addr)) 1250 return NULL; 1251 if (vma->vm_flags & VM_LOCKED) 1252 populate_vma_page_range(vma, addr, start, NULL); 1253 return vma; 1254 } 1255 #endif 1256 1257 #if defined(CONFIG_STACK_GROWSUP) 1258 1259 #define vma_expand_up(vma,addr) expand_upwards(vma, addr) 1260 #define vma_expand_down(vma, addr) (-EFAULT) 1261 1262 #else 1263 1264 #define vma_expand_up(vma,addr) (-EFAULT) 1265 #define vma_expand_down(vma, addr) expand_downwards(vma, addr) 1266 1267 #endif 1268 1269 /* 1270 * expand_stack(): legacy interface for page faulting. Don't use unless 1271 * you have to. 1272 * 1273 * This is called with the mm locked for reading, drops the lock, takes 1274 * the lock for writing, tries to look up a vma again, expands it if 1275 * necessary, and downgrades the lock to reading again. 1276 * 1277 * If no vma is found or it can't be expanded, it returns NULL and has 1278 * dropped the lock. 1279 */ 1280 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) 1281 { 1282 struct vm_area_struct *vma, *prev; 1283 1284 mmap_read_unlock(mm); 1285 if (mmap_write_lock_killable(mm)) 1286 return NULL; 1287 1288 vma = find_vma_prev(mm, addr, &prev); 1289 if (vma && vma->vm_start <= addr) 1290 goto success; 1291 1292 if (prev && !vma_expand_up(prev, addr)) { 1293 vma = prev; 1294 goto success; 1295 } 1296 1297 if (vma && !vma_expand_down(vma, addr)) 1298 goto success; 1299 1300 mmap_write_unlock(mm); 1301 return NULL; 1302 1303 success: 1304 mmap_write_downgrade(mm); 1305 return vma; 1306 } 1307 1308 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. 1309 * @mm: The mm_struct 1310 * @start: The start address to munmap 1311 * @len: The length to be munmapped. 1312 * @uf: The userfaultfd list_head 1313 * 1314 * Return: 0 on success, error otherwise. 1315 */ 1316 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 1317 struct list_head *uf) 1318 { 1319 VMA_ITERATOR(vmi, mm, start); 1320 1321 return do_vmi_munmap(&vmi, mm, start, len, uf, false); 1322 } 1323 1324 unsigned long mmap_region(struct file *file, unsigned long addr, 1325 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 1326 struct list_head *uf) 1327 { 1328 unsigned long ret; 1329 bool writable_file_mapping = false; 1330 1331 /* Check to see if MDWE is applicable. */ 1332 if (map_deny_write_exec(vm_flags, vm_flags)) 1333 return -EACCES; 1334 1335 /* Allow architectures to sanity-check the vm_flags. */ 1336 if (!arch_validate_flags(vm_flags)) 1337 return -EINVAL; 1338 1339 /* Map writable and ensure this isn't a sealed memfd. */ 1340 if (file && is_shared_maywrite(vm_flags)) { 1341 int error = mapping_map_writable(file->f_mapping); 1342 1343 if (error) 1344 return error; 1345 writable_file_mapping = true; 1346 } 1347 1348 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); 1349 1350 /* Clear our write mapping regardless of error. */ 1351 if (writable_file_mapping) 1352 mapping_unmap_writable(file->f_mapping); 1353 1354 validate_mm(current->mm); 1355 return ret; 1356 } 1357 1358 static int __vm_munmap(unsigned long start, size_t len, bool unlock) 1359 { 1360 int ret; 1361 struct mm_struct *mm = current->mm; 1362 LIST_HEAD(uf); 1363 VMA_ITERATOR(vmi, mm, start); 1364 1365 if (mmap_write_lock_killable(mm)) 1366 return -EINTR; 1367 1368 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 1369 if (ret || !unlock) 1370 mmap_write_unlock(mm); 1371 1372 userfaultfd_unmap_complete(mm, &uf); 1373 return ret; 1374 } 1375 1376 int vm_munmap(unsigned long start, size_t len) 1377 { 1378 return __vm_munmap(start, len, false); 1379 } 1380 EXPORT_SYMBOL(vm_munmap); 1381 1382 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1383 { 1384 addr = untagged_addr(addr); 1385 return __vm_munmap(addr, len, true); 1386 } 1387 1388 1389 /* 1390 * Emulation of deprecated remap_file_pages() syscall. 1391 */ 1392 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 1393 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 1394 { 1395 1396 struct mm_struct *mm = current->mm; 1397 struct vm_area_struct *vma; 1398 unsigned long populate = 0; 1399 unsigned long ret = -EINVAL; 1400 struct file *file; 1401 vm_flags_t vm_flags; 1402 1403 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", 1404 current->comm, current->pid); 1405 1406 if (prot) 1407 return ret; 1408 start = start & PAGE_MASK; 1409 size = size & PAGE_MASK; 1410 1411 if (start + size <= start) 1412 return ret; 1413 1414 /* Does pgoff wrap? */ 1415 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 1416 return ret; 1417 1418 if (mmap_read_lock_killable(mm)) 1419 return -EINTR; 1420 1421 /* 1422 * Look up VMA under read lock first so we can perform the security 1423 * without holding locks (which can be problematic). We reacquire a 1424 * write lock later and check nothing changed underneath us. 1425 */ 1426 vma = vma_lookup(mm, start); 1427 1428 if (!vma || !(vma->vm_flags & VM_SHARED)) { 1429 mmap_read_unlock(mm); 1430 return -EINVAL; 1431 } 1432 1433 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 1434 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 1435 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 1436 1437 flags &= MAP_NONBLOCK; 1438 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 1439 if (vma->vm_flags & VM_LOCKED) 1440 flags |= MAP_LOCKED; 1441 1442 /* Save vm_flags used to calculate prot and flags, and recheck later. */ 1443 vm_flags = vma->vm_flags; 1444 file = get_file(vma->vm_file); 1445 1446 mmap_read_unlock(mm); 1447 1448 /* Call outside mmap_lock to be consistent with other callers. */ 1449 ret = security_mmap_file(file, prot, flags); 1450 if (ret) { 1451 fput(file); 1452 return ret; 1453 } 1454 1455 ret = -EINVAL; 1456 1457 /* OK security check passed, take write lock + let it rip. */ 1458 if (mmap_write_lock_killable(mm)) { 1459 fput(file); 1460 return -EINTR; 1461 } 1462 1463 vma = vma_lookup(mm, start); 1464 1465 if (!vma) 1466 goto out; 1467 1468 /* Make sure things didn't change under us. */ 1469 if (vma->vm_flags != vm_flags) 1470 goto out; 1471 if (vma->vm_file != file) 1472 goto out; 1473 1474 if (start + size > vma->vm_end) { 1475 VMA_ITERATOR(vmi, mm, vma->vm_end); 1476 struct vm_area_struct *next, *prev = vma; 1477 1478 for_each_vma_range(vmi, next, start + size) { 1479 /* hole between vmas ? */ 1480 if (next->vm_start != prev->vm_end) 1481 goto out; 1482 1483 if (next->vm_file != vma->vm_file) 1484 goto out; 1485 1486 if (next->vm_flags != vma->vm_flags) 1487 goto out; 1488 1489 if (start + size <= next->vm_end) 1490 break; 1491 1492 prev = next; 1493 } 1494 1495 if (!next) 1496 goto out; 1497 } 1498 1499 ret = do_mmap(vma->vm_file, start, size, 1500 prot, flags, 0, pgoff, &populate, NULL); 1501 out: 1502 mmap_write_unlock(mm); 1503 fput(file); 1504 if (populate) 1505 mm_populate(ret, populate); 1506 if (!IS_ERR_VALUE(ret)) 1507 ret = 0; 1508 return ret; 1509 } 1510 1511 /* 1512 * do_brk_flags() - Increase the brk vma if the flags match. 1513 * @vmi: The vma iterator 1514 * @addr: The start address 1515 * @len: The length of the increase 1516 * @vma: The vma, 1517 * @flags: The VMA Flags 1518 * 1519 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 1520 * do not match then create a new anonymous VMA. Eventually we may be able to 1521 * do some brk-specific accounting here. 1522 */ 1523 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 1524 unsigned long addr, unsigned long len, unsigned long flags) 1525 { 1526 struct mm_struct *mm = current->mm; 1527 1528 /* 1529 * Check against address space limits by the changed size 1530 * Note: This happens *after* clearing old mappings in some code paths. 1531 */ 1532 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 1533 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 1534 return -ENOMEM; 1535 1536 if (mm->map_count > sysctl_max_map_count) 1537 return -ENOMEM; 1538 1539 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 1540 return -ENOMEM; 1541 1542 /* 1543 * Expand the existing vma if possible; Note that singular lists do not 1544 * occur after forking, so the expand will only happen on new VMAs. 1545 */ 1546 if (vma && vma->vm_end == addr) { 1547 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr)); 1548 1549 vmg.prev = vma; 1550 /* vmi is positioned at prev, which this mode expects. */ 1551 vmg.merge_flags = VMG_FLAG_JUST_EXPAND; 1552 1553 if (vma_merge_new_range(&vmg)) 1554 goto out; 1555 else if (vmg_nomem(&vmg)) 1556 goto unacct_fail; 1557 } 1558 1559 if (vma) 1560 vma_iter_next_range(vmi); 1561 /* create a vma struct for an anonymous mapping */ 1562 vma = vm_area_alloc(mm); 1563 if (!vma) 1564 goto unacct_fail; 1565 1566 vma_set_anonymous(vma); 1567 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 1568 vm_flags_init(vma, flags); 1569 vma->vm_page_prot = vm_get_page_prot(flags); 1570 vma_start_write(vma); 1571 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 1572 goto mas_store_fail; 1573 1574 mm->map_count++; 1575 validate_mm(mm); 1576 ksm_add_vma(vma); 1577 out: 1578 perf_event_mmap(vma); 1579 mm->total_vm += len >> PAGE_SHIFT; 1580 mm->data_vm += len >> PAGE_SHIFT; 1581 if (flags & VM_LOCKED) 1582 mm->locked_vm += (len >> PAGE_SHIFT); 1583 vm_flags_set(vma, VM_SOFTDIRTY); 1584 return 0; 1585 1586 mas_store_fail: 1587 vm_area_free(vma); 1588 unacct_fail: 1589 vm_unacct_memory(len >> PAGE_SHIFT); 1590 return -ENOMEM; 1591 } 1592 1593 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 1594 { 1595 struct mm_struct *mm = current->mm; 1596 struct vm_area_struct *vma = NULL; 1597 unsigned long len; 1598 int ret; 1599 bool populate; 1600 LIST_HEAD(uf); 1601 VMA_ITERATOR(vmi, mm, addr); 1602 1603 len = PAGE_ALIGN(request); 1604 if (len < request) 1605 return -ENOMEM; 1606 if (!len) 1607 return 0; 1608 1609 /* Until we need other flags, refuse anything except VM_EXEC. */ 1610 if ((flags & (~VM_EXEC)) != 0) 1611 return -EINVAL; 1612 1613 if (mmap_write_lock_killable(mm)) 1614 return -EINTR; 1615 1616 ret = check_brk_limits(addr, len); 1617 if (ret) 1618 goto limits_failed; 1619 1620 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); 1621 if (ret) 1622 goto munmap_failed; 1623 1624 vma = vma_prev(&vmi); 1625 ret = do_brk_flags(&vmi, vma, addr, len, flags); 1626 populate = ((mm->def_flags & VM_LOCKED) != 0); 1627 mmap_write_unlock(mm); 1628 userfaultfd_unmap_complete(mm, &uf); 1629 if (populate && !ret) 1630 mm_populate(addr, len); 1631 return ret; 1632 1633 munmap_failed: 1634 limits_failed: 1635 mmap_write_unlock(mm); 1636 return ret; 1637 } 1638 EXPORT_SYMBOL(vm_brk_flags); 1639 1640 /* Release all mmaps. */ 1641 void exit_mmap(struct mm_struct *mm) 1642 { 1643 struct mmu_gather tlb; 1644 struct vm_area_struct *vma; 1645 unsigned long nr_accounted = 0; 1646 VMA_ITERATOR(vmi, mm, 0); 1647 int count = 0; 1648 1649 /* mm's last user has gone, and its about to be pulled down */ 1650 mmu_notifier_release(mm); 1651 1652 mmap_read_lock(mm); 1653 arch_exit_mmap(mm); 1654 1655 vma = vma_next(&vmi); 1656 if (!vma || unlikely(xa_is_zero(vma))) { 1657 /* Can happen if dup_mmap() received an OOM */ 1658 mmap_read_unlock(mm); 1659 mmap_write_lock(mm); 1660 goto destroy; 1661 } 1662 1663 lru_add_drain(); 1664 flush_cache_mm(mm); 1665 tlb_gather_mmu_fullmm(&tlb, mm); 1666 /* update_hiwater_rss(mm) here? but nobody should be looking */ 1667 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ 1668 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); 1669 mmap_read_unlock(mm); 1670 1671 /* 1672 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper 1673 * because the memory has been already freed. 1674 */ 1675 set_bit(MMF_OOM_SKIP, &mm->flags); 1676 mmap_write_lock(mm); 1677 mt_clear_in_rcu(&mm->mm_mt); 1678 vma_iter_set(&vmi, vma->vm_end); 1679 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, 1680 USER_PGTABLES_CEILING, true); 1681 tlb_finish_mmu(&tlb); 1682 1683 /* 1684 * Walk the list again, actually closing and freeing it, with preemption 1685 * enabled, without holding any MM locks besides the unreachable 1686 * mmap_write_lock. 1687 */ 1688 vma_iter_set(&vmi, vma->vm_end); 1689 do { 1690 if (vma->vm_flags & VM_ACCOUNT) 1691 nr_accounted += vma_pages(vma); 1692 remove_vma(vma, /* unreachable = */ true); 1693 count++; 1694 cond_resched(); 1695 vma = vma_next(&vmi); 1696 } while (vma && likely(!xa_is_zero(vma))); 1697 1698 BUG_ON(count != mm->map_count); 1699 1700 trace_exit_mmap(mm); 1701 destroy: 1702 __mt_destroy(&mm->mm_mt); 1703 mmap_write_unlock(mm); 1704 vm_unacct_memory(nr_accounted); 1705 } 1706 1707 /* Insert vm structure into process list sorted by address 1708 * and into the inode's i_mmap tree. If vm_file is non-NULL 1709 * then i_mmap_rwsem is taken here. 1710 */ 1711 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 1712 { 1713 unsigned long charged = vma_pages(vma); 1714 1715 1716 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 1717 return -ENOMEM; 1718 1719 if ((vma->vm_flags & VM_ACCOUNT) && 1720 security_vm_enough_memory_mm(mm, charged)) 1721 return -ENOMEM; 1722 1723 /* 1724 * The vm_pgoff of a purely anonymous vma should be irrelevant 1725 * until its first write fault, when page's anon_vma and index 1726 * are set. But now set the vm_pgoff it will almost certainly 1727 * end up with (unless mremap moves it elsewhere before that 1728 * first wfault), so /proc/pid/maps tells a consistent story. 1729 * 1730 * By setting it to reflect the virtual start address of the 1731 * vma, merges and splits can happen in a seamless way, just 1732 * using the existing file pgoff checks and manipulations. 1733 * Similarly in do_mmap and in do_brk_flags. 1734 */ 1735 if (vma_is_anonymous(vma)) { 1736 BUG_ON(vma->anon_vma); 1737 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 1738 } 1739 1740 if (vma_link(mm, vma)) { 1741 if (vma->vm_flags & VM_ACCOUNT) 1742 vm_unacct_memory(charged); 1743 return -ENOMEM; 1744 } 1745 1746 return 0; 1747 } 1748 1749 /* 1750 * Return true if the calling process may expand its vm space by the passed 1751 * number of pages 1752 */ 1753 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 1754 { 1755 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 1756 return false; 1757 1758 if (is_data_mapping(flags) && 1759 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 1760 /* Workaround for Valgrind */ 1761 if (rlimit(RLIMIT_DATA) == 0 && 1762 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 1763 return true; 1764 1765 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 1766 current->comm, current->pid, 1767 (mm->data_vm + npages) << PAGE_SHIFT, 1768 rlimit(RLIMIT_DATA), 1769 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 1770 1771 if (!ignore_rlimit_data) 1772 return false; 1773 } 1774 1775 return true; 1776 } 1777 1778 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 1779 { 1780 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 1781 1782 if (is_exec_mapping(flags)) 1783 mm->exec_vm += npages; 1784 else if (is_stack_mapping(flags)) 1785 mm->stack_vm += npages; 1786 else if (is_data_mapping(flags)) 1787 mm->data_vm += npages; 1788 } 1789 1790 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 1791 1792 /* 1793 * Close hook, called for unmap() and on the old vma for mremap(). 1794 * 1795 * Having a close hook prevents vma merging regardless of flags. 1796 */ 1797 static void special_mapping_close(struct vm_area_struct *vma) 1798 { 1799 const struct vm_special_mapping *sm = vma->vm_private_data; 1800 1801 if (sm->close) 1802 sm->close(sm, vma); 1803 } 1804 1805 static const char *special_mapping_name(struct vm_area_struct *vma) 1806 { 1807 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 1808 } 1809 1810 static int special_mapping_mremap(struct vm_area_struct *new_vma) 1811 { 1812 struct vm_special_mapping *sm = new_vma->vm_private_data; 1813 1814 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 1815 return -EFAULT; 1816 1817 if (sm->mremap) 1818 return sm->mremap(sm, new_vma); 1819 1820 return 0; 1821 } 1822 1823 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 1824 { 1825 /* 1826 * Forbid splitting special mappings - kernel has expectations over 1827 * the number of pages in mapping. Together with VM_DONTEXPAND 1828 * the size of vma should stay the same over the special mapping's 1829 * lifetime. 1830 */ 1831 return -EINVAL; 1832 } 1833 1834 static const struct vm_operations_struct special_mapping_vmops = { 1835 .close = special_mapping_close, 1836 .fault = special_mapping_fault, 1837 .mremap = special_mapping_mremap, 1838 .name = special_mapping_name, 1839 /* vDSO code relies that VVAR can't be accessed remotely */ 1840 .access = NULL, 1841 .may_split = special_mapping_split, 1842 }; 1843 1844 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 1845 { 1846 struct vm_area_struct *vma = vmf->vma; 1847 pgoff_t pgoff; 1848 struct page **pages; 1849 struct vm_special_mapping *sm = vma->vm_private_data; 1850 1851 if (sm->fault) 1852 return sm->fault(sm, vmf->vma, vmf); 1853 1854 pages = sm->pages; 1855 1856 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 1857 pgoff--; 1858 1859 if (*pages) { 1860 struct page *page = *pages; 1861 get_page(page); 1862 vmf->page = page; 1863 return 0; 1864 } 1865 1866 return VM_FAULT_SIGBUS; 1867 } 1868 1869 static struct vm_area_struct *__install_special_mapping( 1870 struct mm_struct *mm, 1871 unsigned long addr, unsigned long len, 1872 unsigned long vm_flags, void *priv, 1873 const struct vm_operations_struct *ops) 1874 { 1875 int ret; 1876 struct vm_area_struct *vma; 1877 1878 vma = vm_area_alloc(mm); 1879 if (unlikely(vma == NULL)) 1880 return ERR_PTR(-ENOMEM); 1881 1882 vma_set_range(vma, addr, addr + len, 0); 1883 vm_flags_init(vma, (vm_flags | mm->def_flags | 1884 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); 1885 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 1886 1887 vma->vm_ops = ops; 1888 vma->vm_private_data = priv; 1889 1890 ret = insert_vm_struct(mm, vma); 1891 if (ret) 1892 goto out; 1893 1894 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 1895 1896 perf_event_mmap(vma); 1897 1898 return vma; 1899 1900 out: 1901 vm_area_free(vma); 1902 return ERR_PTR(ret); 1903 } 1904 1905 bool vma_is_special_mapping(const struct vm_area_struct *vma, 1906 const struct vm_special_mapping *sm) 1907 { 1908 return vma->vm_private_data == sm && 1909 vma->vm_ops == &special_mapping_vmops; 1910 } 1911 1912 /* 1913 * Called with mm->mmap_lock held for writing. 1914 * Insert a new vma covering the given region, with the given flags. 1915 * Its pages are supplied by the given array of struct page *. 1916 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 1917 * The region past the last page supplied will always produce SIGBUS. 1918 * The array pointer and the pages it points to are assumed to stay alive 1919 * for as long as this mapping might exist. 1920 */ 1921 struct vm_area_struct *_install_special_mapping( 1922 struct mm_struct *mm, 1923 unsigned long addr, unsigned long len, 1924 unsigned long vm_flags, const struct vm_special_mapping *spec) 1925 { 1926 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 1927 &special_mapping_vmops); 1928 } 1929 1930 /* 1931 * initialise the percpu counter for VM 1932 */ 1933 void __init mmap_init(void) 1934 { 1935 int ret; 1936 1937 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 1938 VM_BUG_ON(ret); 1939 } 1940 1941 /* 1942 * Initialise sysctl_user_reserve_kbytes. 1943 * 1944 * This is intended to prevent a user from starting a single memory hogging 1945 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 1946 * mode. 1947 * 1948 * The default value is min(3% of free memory, 128MB) 1949 * 128MB is enough to recover with sshd/login, bash, and top/kill. 1950 */ 1951 static int init_user_reserve(void) 1952 { 1953 unsigned long free_kbytes; 1954 1955 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 1956 1957 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); 1958 return 0; 1959 } 1960 subsys_initcall(init_user_reserve); 1961 1962 /* 1963 * Initialise sysctl_admin_reserve_kbytes. 1964 * 1965 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 1966 * to log in and kill a memory hogging process. 1967 * 1968 * Systems with more than 256MB will reserve 8MB, enough to recover 1969 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 1970 * only reserve 3% of free pages by default. 1971 */ 1972 static int init_admin_reserve(void) 1973 { 1974 unsigned long free_kbytes; 1975 1976 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 1977 1978 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); 1979 return 0; 1980 } 1981 subsys_initcall(init_admin_reserve); 1982 1983 /* 1984 * Reinititalise user and admin reserves if memory is added or removed. 1985 * 1986 * The default user reserve max is 128MB, and the default max for the 1987 * admin reserve is 8MB. These are usually, but not always, enough to 1988 * enable recovery from a memory hogging process using login/sshd, a shell, 1989 * and tools like top. It may make sense to increase or even disable the 1990 * reserve depending on the existence of swap or variations in the recovery 1991 * tools. So, the admin may have changed them. 1992 * 1993 * If memory is added and the reserves have been eliminated or increased above 1994 * the default max, then we'll trust the admin. 1995 * 1996 * If memory is removed and there isn't enough free memory, then we 1997 * need to reset the reserves. 1998 * 1999 * Otherwise keep the reserve set by the admin. 2000 */ 2001 static int reserve_mem_notifier(struct notifier_block *nb, 2002 unsigned long action, void *data) 2003 { 2004 unsigned long tmp, free_kbytes; 2005 2006 switch (action) { 2007 case MEM_ONLINE: 2008 /* Default max is 128MB. Leave alone if modified by operator. */ 2009 tmp = sysctl_user_reserve_kbytes; 2010 if (tmp > 0 && tmp < SZ_128K) 2011 init_user_reserve(); 2012 2013 /* Default max is 8MB. Leave alone if modified by operator. */ 2014 tmp = sysctl_admin_reserve_kbytes; 2015 if (tmp > 0 && tmp < SZ_8K) 2016 init_admin_reserve(); 2017 2018 break; 2019 case MEM_OFFLINE: 2020 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 2021 2022 if (sysctl_user_reserve_kbytes > free_kbytes) { 2023 init_user_reserve(); 2024 pr_info("vm.user_reserve_kbytes reset to %lu\n", 2025 sysctl_user_reserve_kbytes); 2026 } 2027 2028 if (sysctl_admin_reserve_kbytes > free_kbytes) { 2029 init_admin_reserve(); 2030 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 2031 sysctl_admin_reserve_kbytes); 2032 } 2033 break; 2034 default: 2035 break; 2036 } 2037 return NOTIFY_OK; 2038 } 2039 2040 static int __meminit init_reserve_notifier(void) 2041 { 2042 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) 2043 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 2044 2045 return 0; 2046 } 2047 subsys_initcall(init_reserve_notifier); 2048 2049 /* 2050 * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between 2051 * this VMA and its relocated range, which will now reside at [vma->vm_start - 2052 * shift, vma->vm_end - shift). 2053 * 2054 * This function is almost certainly NOT what you want for anything other than 2055 * early executable temporary stack relocation. 2056 */ 2057 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) 2058 { 2059 /* 2060 * The process proceeds as follows: 2061 * 2062 * 1) Use shift to calculate the new vma endpoints. 2063 * 2) Extend vma to cover both the old and new ranges. This ensures the 2064 * arguments passed to subsequent functions are consistent. 2065 * 3) Move vma's page tables to the new range. 2066 * 4) Free up any cleared pgd range. 2067 * 5) Shrink the vma to cover only the new range. 2068 */ 2069 2070 struct mm_struct *mm = vma->vm_mm; 2071 unsigned long old_start = vma->vm_start; 2072 unsigned long old_end = vma->vm_end; 2073 unsigned long length = old_end - old_start; 2074 unsigned long new_start = old_start - shift; 2075 unsigned long new_end = old_end - shift; 2076 VMA_ITERATOR(vmi, mm, new_start); 2077 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); 2078 struct vm_area_struct *next; 2079 struct mmu_gather tlb; 2080 2081 BUG_ON(new_start > new_end); 2082 2083 /* 2084 * ensure there are no vmas between where we want to go 2085 * and where we are 2086 */ 2087 if (vma != vma_next(&vmi)) 2088 return -EFAULT; 2089 2090 vma_iter_prev_range(&vmi); 2091 /* 2092 * cover the whole range: [new_start, old_end) 2093 */ 2094 vmg.vma = vma; 2095 if (vma_expand(&vmg)) 2096 return -ENOMEM; 2097 2098 /* 2099 * move the page tables downwards, on failure we rely on 2100 * process cleanup to remove whatever mess we made. 2101 */ 2102 if (length != move_page_tables(vma, old_start, 2103 vma, new_start, length, false, true)) 2104 return -ENOMEM; 2105 2106 lru_add_drain(); 2107 tlb_gather_mmu(&tlb, mm); 2108 next = vma_next(&vmi); 2109 if (new_end > old_start) { 2110 /* 2111 * when the old and new regions overlap clear from new_end. 2112 */ 2113 free_pgd_range(&tlb, new_end, old_end, new_end, 2114 next ? next->vm_start : USER_PGTABLES_CEILING); 2115 } else { 2116 /* 2117 * otherwise, clean from old_start; this is done to not touch 2118 * the address space in [new_end, old_start) some architectures 2119 * have constraints on va-space that make this illegal (IA64) - 2120 * for the others its just a little faster. 2121 */ 2122 free_pgd_range(&tlb, old_start, old_end, new_end, 2123 next ? next->vm_start : USER_PGTABLES_CEILING); 2124 } 2125 tlb_finish_mmu(&tlb); 2126 2127 vma_prev(&vmi); 2128 /* Shrink the vma to just the new range */ 2129 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); 2130 } 2131