1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/mmap.c 4 * 5 * Written by obz. 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/backing-dev.h> 15 #include <linux/mm.h> 16 #include <linux/mm_inline.h> 17 #include <linux/shm.h> 18 #include <linux/mman.h> 19 #include <linux/pagemap.h> 20 #include <linux/swap.h> 21 #include <linux/syscalls.h> 22 #include <linux/capability.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/personality.h> 27 #include <linux/security.h> 28 #include <linux/hugetlb.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/profile.h> 31 #include <linux/export.h> 32 #include <linux/mount.h> 33 #include <linux/mempolicy.h> 34 #include <linux/rmap.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/mmdebug.h> 37 #include <linux/perf_event.h> 38 #include <linux/audit.h> 39 #include <linux/khugepaged.h> 40 #include <linux/uprobes.h> 41 #include <linux/notifier.h> 42 #include <linux/memory.h> 43 #include <linux/printk.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/moduleparam.h> 46 #include <linux/pkeys.h> 47 #include <linux/oom.h> 48 #include <linux/sched/mm.h> 49 #include <linux/ksm.h> 50 51 #include <linux/uaccess.h> 52 #include <asm/cacheflush.h> 53 #include <asm/tlb.h> 54 #include <asm/mmu_context.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/mmap.h> 58 59 #include "internal.h" 60 61 #ifndef arch_mmap_check 62 #define arch_mmap_check(addr, len, flags) (0) 63 #endif 64 65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 67 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; 68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 69 #endif 70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 74 #endif 75 76 static bool ignore_rlimit_data; 77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 78 79 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 80 void vma_set_page_prot(struct vm_area_struct *vma) 81 { 82 unsigned long vm_flags = vma->vm_flags; 83 pgprot_t vm_page_prot; 84 85 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 86 if (vma_wants_writenotify(vma, vm_page_prot)) { 87 vm_flags &= ~VM_SHARED; 88 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 89 } 90 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 91 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 92 } 93 94 /* 95 * check_brk_limits() - Use platform specific check of range & verify mlock 96 * limits. 97 * @addr: The address to check 98 * @len: The size of increase. 99 * 100 * Return: 0 on success. 101 */ 102 static int check_brk_limits(unsigned long addr, unsigned long len) 103 { 104 unsigned long mapped_addr; 105 106 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 107 if (IS_ERR_VALUE(mapped_addr)) 108 return mapped_addr; 109 110 return mlock_future_ok(current->mm, current->mm->def_flags, len) 111 ? 0 : -EAGAIN; 112 } 113 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 114 unsigned long addr, unsigned long request, unsigned long flags); 115 SYSCALL_DEFINE1(brk, unsigned long, brk) 116 { 117 unsigned long newbrk, oldbrk, origbrk; 118 struct mm_struct *mm = current->mm; 119 struct vm_area_struct *brkvma, *next = NULL; 120 unsigned long min_brk; 121 bool populate = false; 122 LIST_HEAD(uf); 123 struct vma_iterator vmi; 124 125 if (mmap_write_lock_killable(mm)) 126 return -EINTR; 127 128 origbrk = mm->brk; 129 130 #ifdef CONFIG_COMPAT_BRK 131 /* 132 * CONFIG_COMPAT_BRK can still be overridden by setting 133 * randomize_va_space to 2, which will still cause mm->start_brk 134 * to be arbitrarily shifted 135 */ 136 if (current->brk_randomized) 137 min_brk = mm->start_brk; 138 else 139 min_brk = mm->end_data; 140 #else 141 min_brk = mm->start_brk; 142 #endif 143 if (brk < min_brk) 144 goto out; 145 146 /* 147 * Check against rlimit here. If this check is done later after the test 148 * of oldbrk with newbrk then it can escape the test and let the data 149 * segment grow beyond its set limit the in case where the limit is 150 * not page aligned -Ram Gupta 151 */ 152 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 153 mm->end_data, mm->start_data)) 154 goto out; 155 156 newbrk = PAGE_ALIGN(brk); 157 oldbrk = PAGE_ALIGN(mm->brk); 158 if (oldbrk == newbrk) { 159 mm->brk = brk; 160 goto success; 161 } 162 163 /* Always allow shrinking brk. */ 164 if (brk <= mm->brk) { 165 /* Search one past newbrk */ 166 vma_iter_init(&vmi, mm, newbrk); 167 brkvma = vma_find(&vmi, oldbrk); 168 if (!brkvma || brkvma->vm_start >= oldbrk) 169 goto out; /* mapping intersects with an existing non-brk vma. */ 170 /* 171 * mm->brk must be protected by write mmap_lock. 172 * do_vmi_align_munmap() will drop the lock on success, so 173 * update it before calling do_vma_munmap(). 174 */ 175 mm->brk = brk; 176 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, 177 /* unlock = */ true)) 178 goto out; 179 180 goto success_unlocked; 181 } 182 183 if (check_brk_limits(oldbrk, newbrk - oldbrk)) 184 goto out; 185 186 /* 187 * Only check if the next VMA is within the stack_guard_gap of the 188 * expansion area 189 */ 190 vma_iter_init(&vmi, mm, oldbrk); 191 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); 192 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 193 goto out; 194 195 brkvma = vma_prev_limit(&vmi, mm->start_brk); 196 /* Ok, looks good - let it rip. */ 197 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) 198 goto out; 199 200 mm->brk = brk; 201 if (mm->def_flags & VM_LOCKED) 202 populate = true; 203 204 success: 205 mmap_write_unlock(mm); 206 success_unlocked: 207 userfaultfd_unmap_complete(mm, &uf); 208 if (populate) 209 mm_populate(oldbrk, newbrk - oldbrk); 210 return brk; 211 212 out: 213 mm->brk = origbrk; 214 mmap_write_unlock(mm); 215 return origbrk; 216 } 217 218 /* 219 * If a hint addr is less than mmap_min_addr change hint to be as 220 * low as possible but still greater than mmap_min_addr 221 */ 222 static inline unsigned long round_hint_to_min(unsigned long hint) 223 { 224 hint &= PAGE_MASK; 225 if (((void *)hint != NULL) && 226 (hint < mmap_min_addr)) 227 return PAGE_ALIGN(mmap_min_addr); 228 return hint; 229 } 230 231 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 232 unsigned long bytes) 233 { 234 unsigned long locked_pages, limit_pages; 235 236 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 237 return true; 238 239 locked_pages = bytes >> PAGE_SHIFT; 240 locked_pages += mm->locked_vm; 241 242 limit_pages = rlimit(RLIMIT_MEMLOCK); 243 limit_pages >>= PAGE_SHIFT; 244 245 return locked_pages <= limit_pages; 246 } 247 248 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 249 { 250 if (S_ISREG(inode->i_mode)) 251 return MAX_LFS_FILESIZE; 252 253 if (S_ISBLK(inode->i_mode)) 254 return MAX_LFS_FILESIZE; 255 256 if (S_ISSOCK(inode->i_mode)) 257 return MAX_LFS_FILESIZE; 258 259 /* Special "we do even unsigned file positions" case */ 260 if (file->f_mode & FMODE_UNSIGNED_OFFSET) 261 return 0; 262 263 /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 264 return ULONG_MAX; 265 } 266 267 static inline bool file_mmap_ok(struct file *file, struct inode *inode, 268 unsigned long pgoff, unsigned long len) 269 { 270 u64 maxsize = file_mmap_size_max(file, inode); 271 272 if (maxsize && len > maxsize) 273 return false; 274 maxsize -= len; 275 if (pgoff > maxsize >> PAGE_SHIFT) 276 return false; 277 return true; 278 } 279 280 /* 281 * The caller must write-lock current->mm->mmap_lock. 282 */ 283 unsigned long do_mmap(struct file *file, unsigned long addr, 284 unsigned long len, unsigned long prot, 285 unsigned long flags, vm_flags_t vm_flags, 286 unsigned long pgoff, unsigned long *populate, 287 struct list_head *uf) 288 { 289 struct mm_struct *mm = current->mm; 290 int pkey = 0; 291 292 *populate = 0; 293 294 if (!len) 295 return -EINVAL; 296 297 /* 298 * Does the application expect PROT_READ to imply PROT_EXEC? 299 * 300 * (the exception is when the underlying filesystem is noexec 301 * mounted, in which case we don't add PROT_EXEC.) 302 */ 303 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 304 if (!(file && path_noexec(&file->f_path))) 305 prot |= PROT_EXEC; 306 307 /* force arch specific MAP_FIXED handling in get_unmapped_area */ 308 if (flags & MAP_FIXED_NOREPLACE) 309 flags |= MAP_FIXED; 310 311 if (!(flags & MAP_FIXED)) 312 addr = round_hint_to_min(addr); 313 314 /* Careful about overflows.. */ 315 len = PAGE_ALIGN(len); 316 if (!len) 317 return -ENOMEM; 318 319 /* offset overflow? */ 320 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 321 return -EOVERFLOW; 322 323 /* Too many mappings? */ 324 if (mm->map_count > sysctl_max_map_count) 325 return -ENOMEM; 326 327 /* 328 * addr is returned from get_unmapped_area, 329 * There are two cases: 330 * 1> MAP_FIXED == false 331 * unallocated memory, no need to check sealing. 332 * 1> MAP_FIXED == true 333 * sealing is checked inside mmap_region when 334 * do_vmi_munmap is called. 335 */ 336 337 if (prot == PROT_EXEC) { 338 pkey = execute_only_pkey(mm); 339 if (pkey < 0) 340 pkey = 0; 341 } 342 343 /* Do simple checking here so the lower-level routines won't have 344 * to. we assume access permissions have been handled by the open 345 * of the memory object, so we don't do any here. 346 */ 347 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 348 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 349 350 /* Obtain the address to map to. we verify (or select) it and ensure 351 * that it represents a valid section of the address space. 352 */ 353 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); 354 if (IS_ERR_VALUE(addr)) 355 return addr; 356 357 if (flags & MAP_FIXED_NOREPLACE) { 358 if (find_vma_intersection(mm, addr, addr + len)) 359 return -EEXIST; 360 } 361 362 if (flags & MAP_LOCKED) 363 if (!can_do_mlock()) 364 return -EPERM; 365 366 if (!mlock_future_ok(mm, vm_flags, len)) 367 return -EAGAIN; 368 369 if (file) { 370 struct inode *inode = file_inode(file); 371 unsigned long flags_mask; 372 373 if (!file_mmap_ok(file, inode, pgoff, len)) 374 return -EOVERFLOW; 375 376 flags_mask = LEGACY_MAP_MASK; 377 if (file->f_op->fop_flags & FOP_MMAP_SYNC) 378 flags_mask |= MAP_SYNC; 379 380 switch (flags & MAP_TYPE) { 381 case MAP_SHARED: 382 /* 383 * Force use of MAP_SHARED_VALIDATE with non-legacy 384 * flags. E.g. MAP_SYNC is dangerous to use with 385 * MAP_SHARED as you don't know which consistency model 386 * you will get. We silently ignore unsupported flags 387 * with MAP_SHARED to preserve backward compatibility. 388 */ 389 flags &= LEGACY_MAP_MASK; 390 fallthrough; 391 case MAP_SHARED_VALIDATE: 392 if (flags & ~flags_mask) 393 return -EOPNOTSUPP; 394 if (prot & PROT_WRITE) { 395 if (!(file->f_mode & FMODE_WRITE)) 396 return -EACCES; 397 if (IS_SWAPFILE(file->f_mapping->host)) 398 return -ETXTBSY; 399 } 400 401 /* 402 * Make sure we don't allow writing to an append-only 403 * file.. 404 */ 405 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 406 return -EACCES; 407 408 vm_flags |= VM_SHARED | VM_MAYSHARE; 409 if (!(file->f_mode & FMODE_WRITE)) 410 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 411 fallthrough; 412 case MAP_PRIVATE: 413 if (!(file->f_mode & FMODE_READ)) 414 return -EACCES; 415 if (path_noexec(&file->f_path)) { 416 if (vm_flags & VM_EXEC) 417 return -EPERM; 418 vm_flags &= ~VM_MAYEXEC; 419 } 420 421 if (!file->f_op->mmap) 422 return -ENODEV; 423 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 424 return -EINVAL; 425 break; 426 427 default: 428 return -EINVAL; 429 } 430 } else { 431 switch (flags & MAP_TYPE) { 432 case MAP_SHARED: 433 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 434 return -EINVAL; 435 /* 436 * Ignore pgoff. 437 */ 438 pgoff = 0; 439 vm_flags |= VM_SHARED | VM_MAYSHARE; 440 break; 441 case MAP_DROPPABLE: 442 if (VM_DROPPABLE == VM_NONE) 443 return -ENOTSUPP; 444 /* 445 * A locked or stack area makes no sense to be droppable. 446 * 447 * Also, since droppable pages can just go away at any time 448 * it makes no sense to copy them on fork or dump them. 449 * 450 * And don't attempt to combine with hugetlb for now. 451 */ 452 if (flags & (MAP_LOCKED | MAP_HUGETLB)) 453 return -EINVAL; 454 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) 455 return -EINVAL; 456 457 vm_flags |= VM_DROPPABLE; 458 459 /* 460 * If the pages can be dropped, then it doesn't make 461 * sense to reserve them. 462 */ 463 vm_flags |= VM_NORESERVE; 464 465 /* 466 * Likewise, they're volatile enough that they 467 * shouldn't survive forks or coredumps. 468 */ 469 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; 470 fallthrough; 471 case MAP_PRIVATE: 472 /* 473 * Set pgoff according to addr for anon_vma. 474 */ 475 pgoff = addr >> PAGE_SHIFT; 476 break; 477 default: 478 return -EINVAL; 479 } 480 } 481 482 /* 483 * Set 'VM_NORESERVE' if we should not account for the 484 * memory use of this mapping. 485 */ 486 if (flags & MAP_NORESERVE) { 487 /* We honor MAP_NORESERVE if allowed to overcommit */ 488 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 489 vm_flags |= VM_NORESERVE; 490 491 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 492 if (file && is_file_hugepages(file)) 493 vm_flags |= VM_NORESERVE; 494 } 495 496 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 497 if (!IS_ERR_VALUE(addr) && 498 ((vm_flags & VM_LOCKED) || 499 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 500 *populate = len; 501 return addr; 502 } 503 504 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 505 unsigned long prot, unsigned long flags, 506 unsigned long fd, unsigned long pgoff) 507 { 508 struct file *file = NULL; 509 unsigned long retval; 510 511 if (!(flags & MAP_ANONYMOUS)) { 512 audit_mmap_fd(fd, flags); 513 file = fget(fd); 514 if (!file) 515 return -EBADF; 516 if (is_file_hugepages(file)) { 517 len = ALIGN(len, huge_page_size(hstate_file(file))); 518 } else if (unlikely(flags & MAP_HUGETLB)) { 519 retval = -EINVAL; 520 goto out_fput; 521 } 522 } else if (flags & MAP_HUGETLB) { 523 struct hstate *hs; 524 525 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 526 if (!hs) 527 return -EINVAL; 528 529 len = ALIGN(len, huge_page_size(hs)); 530 /* 531 * VM_NORESERVE is used because the reservations will be 532 * taken when vm_ops->mmap() is called 533 */ 534 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 535 VM_NORESERVE, 536 HUGETLB_ANONHUGE_INODE, 537 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 538 if (IS_ERR(file)) 539 return PTR_ERR(file); 540 } 541 542 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 543 out_fput: 544 if (file) 545 fput(file); 546 return retval; 547 } 548 549 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 550 unsigned long, prot, unsigned long, flags, 551 unsigned long, fd, unsigned long, pgoff) 552 { 553 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 554 } 555 556 #ifdef __ARCH_WANT_SYS_OLD_MMAP 557 struct mmap_arg_struct { 558 unsigned long addr; 559 unsigned long len; 560 unsigned long prot; 561 unsigned long flags; 562 unsigned long fd; 563 unsigned long offset; 564 }; 565 566 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 567 { 568 struct mmap_arg_struct a; 569 570 if (copy_from_user(&a, arg, sizeof(a))) 571 return -EFAULT; 572 if (offset_in_page(a.offset)) 573 return -EINVAL; 574 575 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 576 a.offset >> PAGE_SHIFT); 577 } 578 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 579 580 /* 581 * We account for memory if it's a private writeable mapping, 582 * not hugepages and VM_NORESERVE wasn't set. 583 */ 584 static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags) 585 { 586 /* 587 * hugetlb has its own accounting separate from the core VM 588 * VM_HUGETLB may not be set yet so we cannot check for that flag. 589 */ 590 if (file && is_file_hugepages(file)) 591 return false; 592 593 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 594 } 595 596 /** 597 * unmapped_area() - Find an area between the low_limit and the high_limit with 598 * the correct alignment and offset, all from @info. Note: current->mm is used 599 * for the search. 600 * 601 * @info: The unmapped area information including the range [low_limit - 602 * high_limit), the alignment offset and mask. 603 * 604 * Return: A memory address or -ENOMEM. 605 */ 606 static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 607 { 608 unsigned long length, gap; 609 unsigned long low_limit, high_limit; 610 struct vm_area_struct *tmp; 611 VMA_ITERATOR(vmi, current->mm, 0); 612 613 /* Adjust search length to account for worst case alignment overhead */ 614 length = info->length + info->align_mask + info->start_gap; 615 if (length < info->length) 616 return -ENOMEM; 617 618 low_limit = info->low_limit; 619 if (low_limit < mmap_min_addr) 620 low_limit = mmap_min_addr; 621 high_limit = info->high_limit; 622 retry: 623 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) 624 return -ENOMEM; 625 626 /* 627 * Adjust for the gap first so it doesn't interfere with the 628 * later alignment. The first step is the minimum needed to 629 * fulill the start gap, the next steps is the minimum to align 630 * that. It is the minimum needed to fulill both. 631 */ 632 gap = vma_iter_addr(&vmi) + info->start_gap; 633 gap += (info->align_offset - gap) & info->align_mask; 634 tmp = vma_next(&vmi); 635 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 636 if (vm_start_gap(tmp) < gap + length - 1) { 637 low_limit = tmp->vm_end; 638 vma_iter_reset(&vmi); 639 goto retry; 640 } 641 } else { 642 tmp = vma_prev(&vmi); 643 if (tmp && vm_end_gap(tmp) > gap) { 644 low_limit = vm_end_gap(tmp); 645 vma_iter_reset(&vmi); 646 goto retry; 647 } 648 } 649 650 return gap; 651 } 652 653 /** 654 * unmapped_area_topdown() - Find an area between the low_limit and the 655 * high_limit with the correct alignment and offset at the highest available 656 * address, all from @info. Note: current->mm is used for the search. 657 * 658 * @info: The unmapped area information including the range [low_limit - 659 * high_limit), the alignment offset and mask. 660 * 661 * Return: A memory address or -ENOMEM. 662 */ 663 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 664 { 665 unsigned long length, gap, gap_end; 666 unsigned long low_limit, high_limit; 667 struct vm_area_struct *tmp; 668 VMA_ITERATOR(vmi, current->mm, 0); 669 670 /* Adjust search length to account for worst case alignment overhead */ 671 length = info->length + info->align_mask + info->start_gap; 672 if (length < info->length) 673 return -ENOMEM; 674 675 low_limit = info->low_limit; 676 if (low_limit < mmap_min_addr) 677 low_limit = mmap_min_addr; 678 high_limit = info->high_limit; 679 retry: 680 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) 681 return -ENOMEM; 682 683 gap = vma_iter_end(&vmi) - info->length; 684 gap -= (gap - info->align_offset) & info->align_mask; 685 gap_end = vma_iter_end(&vmi); 686 tmp = vma_next(&vmi); 687 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 688 if (vm_start_gap(tmp) < gap_end) { 689 high_limit = vm_start_gap(tmp); 690 vma_iter_reset(&vmi); 691 goto retry; 692 } 693 } else { 694 tmp = vma_prev(&vmi); 695 if (tmp && vm_end_gap(tmp) > gap) { 696 high_limit = tmp->vm_start; 697 vma_iter_reset(&vmi); 698 goto retry; 699 } 700 } 701 702 return gap; 703 } 704 705 /* 706 * Search for an unmapped address range. 707 * 708 * We are looking for a range that: 709 * - does not intersect with any VMA; 710 * - is contained within the [low_limit, high_limit) interval; 711 * - is at least the desired size. 712 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 713 */ 714 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 715 { 716 unsigned long addr; 717 718 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 719 addr = unmapped_area_topdown(info); 720 else 721 addr = unmapped_area(info); 722 723 trace_vm_unmapped_area(addr, info); 724 return addr; 725 } 726 727 /* Get an address range which is currently unmapped. 728 * For shmat() with addr=0. 729 * 730 * Ugly calling convention alert: 731 * Return value with the low bits set means error value, 732 * ie 733 * if (ret & ~PAGE_MASK) 734 * error = ret; 735 * 736 * This function "knows" that -ENOMEM has the bits set. 737 */ 738 unsigned long 739 generic_get_unmapped_area(struct file *filp, unsigned long addr, 740 unsigned long len, unsigned long pgoff, 741 unsigned long flags) 742 { 743 struct mm_struct *mm = current->mm; 744 struct vm_area_struct *vma, *prev; 745 struct vm_unmapped_area_info info = {}; 746 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 747 748 if (len > mmap_end - mmap_min_addr) 749 return -ENOMEM; 750 751 if (flags & MAP_FIXED) 752 return addr; 753 754 if (addr) { 755 addr = PAGE_ALIGN(addr); 756 vma = find_vma_prev(mm, addr, &prev); 757 if (mmap_end - len >= addr && addr >= mmap_min_addr && 758 (!vma || addr + len <= vm_start_gap(vma)) && 759 (!prev || addr >= vm_end_gap(prev))) 760 return addr; 761 } 762 763 info.length = len; 764 info.low_limit = mm->mmap_base; 765 info.high_limit = mmap_end; 766 return vm_unmapped_area(&info); 767 } 768 769 #ifndef HAVE_ARCH_UNMAPPED_AREA 770 unsigned long 771 arch_get_unmapped_area(struct file *filp, unsigned long addr, 772 unsigned long len, unsigned long pgoff, 773 unsigned long flags) 774 { 775 return generic_get_unmapped_area(filp, addr, len, pgoff, flags); 776 } 777 #endif 778 779 /* 780 * This mmap-allocator allocates new areas top-down from below the 781 * stack's low limit (the base): 782 */ 783 unsigned long 784 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 785 unsigned long len, unsigned long pgoff, 786 unsigned long flags) 787 { 788 struct vm_area_struct *vma, *prev; 789 struct mm_struct *mm = current->mm; 790 struct vm_unmapped_area_info info = {}; 791 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 792 793 /* requested length too big for entire address space */ 794 if (len > mmap_end - mmap_min_addr) 795 return -ENOMEM; 796 797 if (flags & MAP_FIXED) 798 return addr; 799 800 /* requesting a specific address */ 801 if (addr) { 802 addr = PAGE_ALIGN(addr); 803 vma = find_vma_prev(mm, addr, &prev); 804 if (mmap_end - len >= addr && addr >= mmap_min_addr && 805 (!vma || addr + len <= vm_start_gap(vma)) && 806 (!prev || addr >= vm_end_gap(prev))) 807 return addr; 808 } 809 810 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 811 info.length = len; 812 info.low_limit = PAGE_SIZE; 813 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 814 addr = vm_unmapped_area(&info); 815 816 /* 817 * A failed mmap() very likely causes application failure, 818 * so fall back to the bottom-up function here. This scenario 819 * can happen with large stack limits and large mmap() 820 * allocations. 821 */ 822 if (offset_in_page(addr)) { 823 VM_BUG_ON(addr != -ENOMEM); 824 info.flags = 0; 825 info.low_limit = TASK_UNMAPPED_BASE; 826 info.high_limit = mmap_end; 827 addr = vm_unmapped_area(&info); 828 } 829 830 return addr; 831 } 832 833 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 834 unsigned long 835 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 836 unsigned long len, unsigned long pgoff, 837 unsigned long flags) 838 { 839 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 840 } 841 #endif 842 843 #ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS 844 unsigned long 845 arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, 846 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 847 { 848 return arch_get_unmapped_area(filp, addr, len, pgoff, flags); 849 } 850 851 unsigned long 852 arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, 853 unsigned long len, unsigned long pgoff, 854 unsigned long flags, vm_flags_t vm_flags) 855 { 856 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 857 } 858 #endif 859 860 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, 861 unsigned long addr, unsigned long len, 862 unsigned long pgoff, unsigned long flags, 863 vm_flags_t vm_flags) 864 { 865 if (test_bit(MMF_TOPDOWN, &mm->flags)) 866 return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, 867 flags, vm_flags); 868 return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags); 869 } 870 871 unsigned long 872 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 873 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 874 { 875 unsigned long (*get_area)(struct file *, unsigned long, 876 unsigned long, unsigned long, unsigned long) 877 = NULL; 878 879 unsigned long error = arch_mmap_check(addr, len, flags); 880 if (error) 881 return error; 882 883 /* Careful about overflows.. */ 884 if (len > TASK_SIZE) 885 return -ENOMEM; 886 887 if (file) { 888 if (file->f_op->get_unmapped_area) 889 get_area = file->f_op->get_unmapped_area; 890 } else if (flags & MAP_SHARED) { 891 /* 892 * mmap_region() will call shmem_zero_setup() to create a file, 893 * so use shmem's get_unmapped_area in case it can be huge. 894 */ 895 get_area = shmem_get_unmapped_area; 896 } 897 898 /* Always treat pgoff as zero for anonymous memory. */ 899 if (!file) 900 pgoff = 0; 901 902 if (get_area) { 903 addr = get_area(file, addr, len, pgoff, flags); 904 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 905 /* Ensures that larger anonymous mappings are THP aligned. */ 906 addr = thp_get_unmapped_area_vmflags(file, addr, len, 907 pgoff, flags, vm_flags); 908 } else { 909 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, 910 pgoff, flags, vm_flags); 911 } 912 if (IS_ERR_VALUE(addr)) 913 return addr; 914 915 if (addr > TASK_SIZE - len) 916 return -ENOMEM; 917 if (offset_in_page(addr)) 918 return -EINVAL; 919 920 error = security_mmap_addr(addr); 921 return error ? error : addr; 922 } 923 924 unsigned long 925 mm_get_unmapped_area(struct mm_struct *mm, struct file *file, 926 unsigned long addr, unsigned long len, 927 unsigned long pgoff, unsigned long flags) 928 { 929 if (test_bit(MMF_TOPDOWN, &mm->flags)) 930 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags); 931 return arch_get_unmapped_area(file, addr, len, pgoff, flags); 932 } 933 EXPORT_SYMBOL(mm_get_unmapped_area); 934 935 /** 936 * find_vma_intersection() - Look up the first VMA which intersects the interval 937 * @mm: The process address space. 938 * @start_addr: The inclusive start user address. 939 * @end_addr: The exclusive end user address. 940 * 941 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes 942 * start_addr < end_addr. 943 */ 944 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 945 unsigned long start_addr, 946 unsigned long end_addr) 947 { 948 unsigned long index = start_addr; 949 950 mmap_assert_locked(mm); 951 return mt_find(&mm->mm_mt, &index, end_addr - 1); 952 } 953 EXPORT_SYMBOL(find_vma_intersection); 954 955 /** 956 * find_vma() - Find the VMA for a given address, or the next VMA. 957 * @mm: The mm_struct to check 958 * @addr: The address 959 * 960 * Returns: The VMA associated with addr, or the next VMA. 961 * May return %NULL in the case of no VMA at addr or above. 962 */ 963 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 964 { 965 unsigned long index = addr; 966 967 mmap_assert_locked(mm); 968 return mt_find(&mm->mm_mt, &index, ULONG_MAX); 969 } 970 EXPORT_SYMBOL(find_vma); 971 972 /** 973 * find_vma_prev() - Find the VMA for a given address, or the next vma and 974 * set %pprev to the previous VMA, if any. 975 * @mm: The mm_struct to check 976 * @addr: The address 977 * @pprev: The pointer to set to the previous VMA 978 * 979 * Note that RCU lock is missing here since the external mmap_lock() is used 980 * instead. 981 * 982 * Returns: The VMA associated with @addr, or the next vma. 983 * May return %NULL in the case of no vma at addr or above. 984 */ 985 struct vm_area_struct * 986 find_vma_prev(struct mm_struct *mm, unsigned long addr, 987 struct vm_area_struct **pprev) 988 { 989 struct vm_area_struct *vma; 990 VMA_ITERATOR(vmi, mm, addr); 991 992 vma = vma_iter_load(&vmi); 993 *pprev = vma_prev(&vmi); 994 if (!vma) 995 vma = vma_next(&vmi); 996 return vma; 997 } 998 999 /* 1000 * Verify that the stack growth is acceptable and 1001 * update accounting. This is shared with both the 1002 * grow-up and grow-down cases. 1003 */ 1004 static int acct_stack_growth(struct vm_area_struct *vma, 1005 unsigned long size, unsigned long grow) 1006 { 1007 struct mm_struct *mm = vma->vm_mm; 1008 unsigned long new_start; 1009 1010 /* address space limit tests */ 1011 if (!may_expand_vm(mm, vma->vm_flags, grow)) 1012 return -ENOMEM; 1013 1014 /* Stack limit test */ 1015 if (size > rlimit(RLIMIT_STACK)) 1016 return -ENOMEM; 1017 1018 /* mlock limit tests */ 1019 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) 1020 return -ENOMEM; 1021 1022 /* Check to ensure the stack will not grow into a hugetlb-only region */ 1023 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 1024 vma->vm_end - size; 1025 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 1026 return -EFAULT; 1027 1028 /* 1029 * Overcommit.. This must be the final test, as it will 1030 * update security statistics. 1031 */ 1032 if (security_vm_enough_memory_mm(mm, grow)) 1033 return -ENOMEM; 1034 1035 return 0; 1036 } 1037 1038 #if defined(CONFIG_STACK_GROWSUP) 1039 /* 1040 * PA-RISC uses this for its stack. 1041 * vma is the last one with address > vma->vm_end. Have to extend vma. 1042 */ 1043 static int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1044 { 1045 struct mm_struct *mm = vma->vm_mm; 1046 struct vm_area_struct *next; 1047 unsigned long gap_addr; 1048 int error = 0; 1049 VMA_ITERATOR(vmi, mm, vma->vm_start); 1050 1051 if (!(vma->vm_flags & VM_GROWSUP)) 1052 return -EFAULT; 1053 1054 /* Guard against exceeding limits of the address space. */ 1055 address &= PAGE_MASK; 1056 if (address >= (TASK_SIZE & PAGE_MASK)) 1057 return -ENOMEM; 1058 address += PAGE_SIZE; 1059 1060 /* Enforce stack_guard_gap */ 1061 gap_addr = address + stack_guard_gap; 1062 1063 /* Guard against overflow */ 1064 if (gap_addr < address || gap_addr > TASK_SIZE) 1065 gap_addr = TASK_SIZE; 1066 1067 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 1068 if (next && vma_is_accessible(next)) { 1069 if (!(next->vm_flags & VM_GROWSUP)) 1070 return -ENOMEM; 1071 /* Check that both stack segments have the same anon_vma? */ 1072 } 1073 1074 if (next) 1075 vma_iter_prev_range_limit(&vmi, address); 1076 1077 vma_iter_config(&vmi, vma->vm_start, address); 1078 if (vma_iter_prealloc(&vmi, vma)) 1079 return -ENOMEM; 1080 1081 /* We must make sure the anon_vma is allocated. */ 1082 if (unlikely(anon_vma_prepare(vma))) { 1083 vma_iter_free(&vmi); 1084 return -ENOMEM; 1085 } 1086 1087 /* Lock the VMA before expanding to prevent concurrent page faults */ 1088 vma_start_write(vma); 1089 /* 1090 * vma->vm_start/vm_end cannot change under us because the caller 1091 * is required to hold the mmap_lock in read mode. We need the 1092 * anon_vma lock to serialize against concurrent expand_stacks. 1093 */ 1094 anon_vma_lock_write(vma->anon_vma); 1095 1096 /* Somebody else might have raced and expanded it already */ 1097 if (address > vma->vm_end) { 1098 unsigned long size, grow; 1099 1100 size = address - vma->vm_start; 1101 grow = (address - vma->vm_end) >> PAGE_SHIFT; 1102 1103 error = -ENOMEM; 1104 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 1105 error = acct_stack_growth(vma, size, grow); 1106 if (!error) { 1107 /* 1108 * We only hold a shared mmap_lock lock here, so 1109 * we need to protect against concurrent vma 1110 * expansions. anon_vma_lock_write() doesn't 1111 * help here, as we don't guarantee that all 1112 * growable vmas in a mm share the same root 1113 * anon vma. So, we reuse mm->page_table_lock 1114 * to guard against concurrent vma expansions. 1115 */ 1116 spin_lock(&mm->page_table_lock); 1117 if (vma->vm_flags & VM_LOCKED) 1118 mm->locked_vm += grow; 1119 vm_stat_account(mm, vma->vm_flags, grow); 1120 anon_vma_interval_tree_pre_update_vma(vma); 1121 vma->vm_end = address; 1122 /* Overwrite old entry in mtree. */ 1123 vma_iter_store(&vmi, vma); 1124 anon_vma_interval_tree_post_update_vma(vma); 1125 spin_unlock(&mm->page_table_lock); 1126 1127 perf_event_mmap(vma); 1128 } 1129 } 1130 } 1131 anon_vma_unlock_write(vma->anon_vma); 1132 vma_iter_free(&vmi); 1133 validate_mm(mm); 1134 return error; 1135 } 1136 #endif /* CONFIG_STACK_GROWSUP */ 1137 1138 /* 1139 * vma is the first one with address < vma->vm_start. Have to extend vma. 1140 * mmap_lock held for writing. 1141 */ 1142 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 1143 { 1144 struct mm_struct *mm = vma->vm_mm; 1145 struct vm_area_struct *prev; 1146 int error = 0; 1147 VMA_ITERATOR(vmi, mm, vma->vm_start); 1148 1149 if (!(vma->vm_flags & VM_GROWSDOWN)) 1150 return -EFAULT; 1151 1152 address &= PAGE_MASK; 1153 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 1154 return -EPERM; 1155 1156 /* Enforce stack_guard_gap */ 1157 prev = vma_prev(&vmi); 1158 /* Check that both stack segments have the same anon_vma? */ 1159 if (prev) { 1160 if (!(prev->vm_flags & VM_GROWSDOWN) && 1161 vma_is_accessible(prev) && 1162 (address - prev->vm_end < stack_guard_gap)) 1163 return -ENOMEM; 1164 } 1165 1166 if (prev) 1167 vma_iter_next_range_limit(&vmi, vma->vm_start); 1168 1169 vma_iter_config(&vmi, address, vma->vm_end); 1170 if (vma_iter_prealloc(&vmi, vma)) 1171 return -ENOMEM; 1172 1173 /* We must make sure the anon_vma is allocated. */ 1174 if (unlikely(anon_vma_prepare(vma))) { 1175 vma_iter_free(&vmi); 1176 return -ENOMEM; 1177 } 1178 1179 /* Lock the VMA before expanding to prevent concurrent page faults */ 1180 vma_start_write(vma); 1181 /* 1182 * vma->vm_start/vm_end cannot change under us because the caller 1183 * is required to hold the mmap_lock in read mode. We need the 1184 * anon_vma lock to serialize against concurrent expand_stacks. 1185 */ 1186 anon_vma_lock_write(vma->anon_vma); 1187 1188 /* Somebody else might have raced and expanded it already */ 1189 if (address < vma->vm_start) { 1190 unsigned long size, grow; 1191 1192 size = vma->vm_end - address; 1193 grow = (vma->vm_start - address) >> PAGE_SHIFT; 1194 1195 error = -ENOMEM; 1196 if (grow <= vma->vm_pgoff) { 1197 error = acct_stack_growth(vma, size, grow); 1198 if (!error) { 1199 /* 1200 * We only hold a shared mmap_lock lock here, so 1201 * we need to protect against concurrent vma 1202 * expansions. anon_vma_lock_write() doesn't 1203 * help here, as we don't guarantee that all 1204 * growable vmas in a mm share the same root 1205 * anon vma. So, we reuse mm->page_table_lock 1206 * to guard against concurrent vma expansions. 1207 */ 1208 spin_lock(&mm->page_table_lock); 1209 if (vma->vm_flags & VM_LOCKED) 1210 mm->locked_vm += grow; 1211 vm_stat_account(mm, vma->vm_flags, grow); 1212 anon_vma_interval_tree_pre_update_vma(vma); 1213 vma->vm_start = address; 1214 vma->vm_pgoff -= grow; 1215 /* Overwrite old entry in mtree. */ 1216 vma_iter_store(&vmi, vma); 1217 anon_vma_interval_tree_post_update_vma(vma); 1218 spin_unlock(&mm->page_table_lock); 1219 1220 perf_event_mmap(vma); 1221 } 1222 } 1223 } 1224 anon_vma_unlock_write(vma->anon_vma); 1225 vma_iter_free(&vmi); 1226 validate_mm(mm); 1227 return error; 1228 } 1229 1230 /* enforced gap between the expanding stack and other mappings. */ 1231 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 1232 1233 static int __init cmdline_parse_stack_guard_gap(char *p) 1234 { 1235 unsigned long val; 1236 char *endptr; 1237 1238 val = simple_strtoul(p, &endptr, 10); 1239 if (!*endptr) 1240 stack_guard_gap = val << PAGE_SHIFT; 1241 1242 return 1; 1243 } 1244 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 1245 1246 #ifdef CONFIG_STACK_GROWSUP 1247 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 1248 { 1249 return expand_upwards(vma, address); 1250 } 1251 1252 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 1253 { 1254 struct vm_area_struct *vma, *prev; 1255 1256 addr &= PAGE_MASK; 1257 vma = find_vma_prev(mm, addr, &prev); 1258 if (vma && (vma->vm_start <= addr)) 1259 return vma; 1260 if (!prev) 1261 return NULL; 1262 if (expand_stack_locked(prev, addr)) 1263 return NULL; 1264 if (prev->vm_flags & VM_LOCKED) 1265 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 1266 return prev; 1267 } 1268 #else 1269 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 1270 { 1271 return expand_downwards(vma, address); 1272 } 1273 1274 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 1275 { 1276 struct vm_area_struct *vma; 1277 unsigned long start; 1278 1279 addr &= PAGE_MASK; 1280 vma = find_vma(mm, addr); 1281 if (!vma) 1282 return NULL; 1283 if (vma->vm_start <= addr) 1284 return vma; 1285 start = vma->vm_start; 1286 if (expand_stack_locked(vma, addr)) 1287 return NULL; 1288 if (vma->vm_flags & VM_LOCKED) 1289 populate_vma_page_range(vma, addr, start, NULL); 1290 return vma; 1291 } 1292 #endif 1293 1294 #if defined(CONFIG_STACK_GROWSUP) 1295 1296 #define vma_expand_up(vma,addr) expand_upwards(vma, addr) 1297 #define vma_expand_down(vma, addr) (-EFAULT) 1298 1299 #else 1300 1301 #define vma_expand_up(vma,addr) (-EFAULT) 1302 #define vma_expand_down(vma, addr) expand_downwards(vma, addr) 1303 1304 #endif 1305 1306 /* 1307 * expand_stack(): legacy interface for page faulting. Don't use unless 1308 * you have to. 1309 * 1310 * This is called with the mm locked for reading, drops the lock, takes 1311 * the lock for writing, tries to look up a vma again, expands it if 1312 * necessary, and downgrades the lock to reading again. 1313 * 1314 * If no vma is found or it can't be expanded, it returns NULL and has 1315 * dropped the lock. 1316 */ 1317 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) 1318 { 1319 struct vm_area_struct *vma, *prev; 1320 1321 mmap_read_unlock(mm); 1322 if (mmap_write_lock_killable(mm)) 1323 return NULL; 1324 1325 vma = find_vma_prev(mm, addr, &prev); 1326 if (vma && vma->vm_start <= addr) 1327 goto success; 1328 1329 if (prev && !vma_expand_up(prev, addr)) { 1330 vma = prev; 1331 goto success; 1332 } 1333 1334 if (vma && !vma_expand_down(vma, addr)) 1335 goto success; 1336 1337 mmap_write_unlock(mm); 1338 return NULL; 1339 1340 success: 1341 mmap_write_downgrade(mm); 1342 return vma; 1343 } 1344 1345 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. 1346 * @mm: The mm_struct 1347 * @start: The start address to munmap 1348 * @len: The length to be munmapped. 1349 * @uf: The userfaultfd list_head 1350 * 1351 * Return: 0 on success, error otherwise. 1352 */ 1353 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 1354 struct list_head *uf) 1355 { 1356 VMA_ITERATOR(vmi, mm, start); 1357 1358 return do_vmi_munmap(&vmi, mm, start, len, uf, false); 1359 } 1360 1361 unsigned long mmap_region(struct file *file, unsigned long addr, 1362 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 1363 struct list_head *uf) 1364 { 1365 struct mm_struct *mm = current->mm; 1366 struct vm_area_struct *vma = NULL; 1367 struct vm_area_struct *next, *prev, *merge; 1368 pgoff_t pglen = PHYS_PFN(len); 1369 unsigned long charged = 0; 1370 struct vma_munmap_struct vms; 1371 struct ma_state mas_detach; 1372 struct maple_tree mt_detach; 1373 unsigned long end = addr + len; 1374 unsigned long merge_start = addr, merge_end = end; 1375 bool writable_file_mapping = false; 1376 pgoff_t vm_pgoff; 1377 int error = -ENOMEM; 1378 VMA_ITERATOR(vmi, mm, addr); 1379 unsigned long nr_pages, nr_accounted; 1380 1381 nr_pages = count_vma_pages_range(mm, addr, end, &nr_accounted); 1382 1383 /* 1384 * Check against address space limit. 1385 * MAP_FIXED may remove pages of mappings that intersects with requested 1386 * mapping. Account for the pages it would unmap. 1387 */ 1388 if (!may_expand_vm(mm, vm_flags, pglen - nr_pages)) 1389 return -ENOMEM; 1390 1391 /* Find the first overlapping VMA */ 1392 vma = vma_find(&vmi, end); 1393 init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false); 1394 if (vma) { 1395 mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 1396 mt_on_stack(mt_detach); 1397 mas_init(&mas_detach, &mt_detach, /* addr = */ 0); 1398 /* Prepare to unmap any existing mapping in the area */ 1399 error = vms_gather_munmap_vmas(&vms, &mas_detach); 1400 if (error) 1401 goto gather_failed; 1402 1403 next = vms.next; 1404 prev = vms.prev; 1405 vma = NULL; 1406 } else { 1407 next = vma_next(&vmi); 1408 prev = vma_prev(&vmi); 1409 if (prev) 1410 vma_iter_next_range(&vmi); 1411 } 1412 1413 /* 1414 * Private writable mapping: check memory availability 1415 */ 1416 if (accountable_mapping(file, vm_flags)) { 1417 charged = pglen; 1418 charged -= vms.nr_accounted; 1419 if (charged && security_vm_enough_memory_mm(mm, charged)) 1420 goto abort_munmap; 1421 1422 vms.nr_accounted = 0; 1423 vm_flags |= VM_ACCOUNT; 1424 } 1425 1426 if (vm_flags & VM_SPECIAL) 1427 goto cannot_expand; 1428 1429 /* Attempt to expand an old mapping */ 1430 /* Check next */ 1431 if (next && next->vm_start == end && !vma_policy(next) && 1432 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, 1433 NULL_VM_UFFD_CTX, NULL)) { 1434 merge_end = next->vm_end; 1435 vma = next; 1436 vm_pgoff = next->vm_pgoff - pglen; 1437 } 1438 1439 /* Check prev */ 1440 if (prev && prev->vm_end == addr && !vma_policy(prev) && 1441 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, 1442 pgoff, vma->vm_userfaultfd_ctx, NULL) : 1443 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, 1444 NULL_VM_UFFD_CTX, NULL))) { 1445 merge_start = prev->vm_start; 1446 vma = prev; 1447 vm_pgoff = prev->vm_pgoff; 1448 vma_prev(&vmi); /* Equivalent to going to the previous range */ 1449 } 1450 1451 if (vma) { 1452 /* Actually expand, if possible */ 1453 if (!vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { 1454 khugepaged_enter_vma(vma, vm_flags); 1455 goto expanded; 1456 } 1457 1458 /* If the expand fails, then reposition the vma iterator */ 1459 if (unlikely(vma == prev)) 1460 vma_iter_set(&vmi, addr); 1461 } 1462 1463 cannot_expand: 1464 1465 /* 1466 * Determine the object being mapped and call the appropriate 1467 * specific mapper. the address has already been validated, but 1468 * not unmapped, but the maps are removed from the list. 1469 */ 1470 vma = vm_area_alloc(mm); 1471 if (!vma) 1472 goto unacct_error; 1473 1474 vma_iter_config(&vmi, addr, end); 1475 vma_set_range(vma, addr, end, pgoff); 1476 vm_flags_init(vma, vm_flags); 1477 vma->vm_page_prot = vm_get_page_prot(vm_flags); 1478 1479 if (file) { 1480 vma->vm_file = get_file(file); 1481 /* 1482 * call_mmap() may map PTE, so ensure there are no existing PTEs 1483 * and call the vm_ops close function if one exists. 1484 */ 1485 vms_clean_up_area(&vms, &mas_detach); 1486 error = call_mmap(file, vma); 1487 if (error) 1488 goto unmap_and_free_vma; 1489 1490 if (vma_is_shared_maywrite(vma)) { 1491 error = mapping_map_writable(file->f_mapping); 1492 if (error) 1493 goto close_and_free_vma; 1494 1495 writable_file_mapping = true; 1496 } 1497 1498 /* 1499 * Expansion is handled above, merging is handled below. 1500 * Drivers should not alter the address of the VMA. 1501 */ 1502 error = -EINVAL; 1503 if (WARN_ON((addr != vma->vm_start))) 1504 goto close_and_free_vma; 1505 1506 vma_iter_config(&vmi, addr, end); 1507 /* 1508 * If vm_flags changed after call_mmap(), we should try merge 1509 * vma again as we may succeed this time. 1510 */ 1511 if (unlikely(vm_flags != vma->vm_flags && prev)) { 1512 merge = vma_merge_new_vma(&vmi, prev, vma, 1513 vma->vm_start, vma->vm_end, 1514 vma->vm_pgoff); 1515 if (merge) { 1516 /* 1517 * ->mmap() can change vma->vm_file and fput 1518 * the original file. So fput the vma->vm_file 1519 * here or we would add an extra fput for file 1520 * and cause general protection fault 1521 * ultimately. 1522 */ 1523 fput(vma->vm_file); 1524 vm_area_free(vma); 1525 vma = merge; 1526 /* Update vm_flags to pick up the change. */ 1527 vm_flags = vma->vm_flags; 1528 goto unmap_writable; 1529 } 1530 } 1531 1532 vm_flags = vma->vm_flags; 1533 } else if (vm_flags & VM_SHARED) { 1534 error = shmem_zero_setup(vma); 1535 if (error) 1536 goto free_vma; 1537 } else { 1538 vma_set_anonymous(vma); 1539 } 1540 1541 if (map_deny_write_exec(vma, vma->vm_flags)) { 1542 error = -EACCES; 1543 goto close_and_free_vma; 1544 } 1545 1546 /* Allow architectures to sanity-check the vm_flags */ 1547 error = -EINVAL; 1548 if (!arch_validate_flags(vma->vm_flags)) 1549 goto close_and_free_vma; 1550 1551 error = -ENOMEM; 1552 if (vma_iter_prealloc(&vmi, vma)) 1553 goto close_and_free_vma; 1554 1555 /* Lock the VMA since it is modified after insertion into VMA tree */ 1556 vma_start_write(vma); 1557 vma_iter_store(&vmi, vma); 1558 mm->map_count++; 1559 vma_link_file(vma); 1560 1561 /* 1562 * vma_merge() calls khugepaged_enter_vma() either, the below 1563 * call covers the non-merge case. 1564 */ 1565 khugepaged_enter_vma(vma, vma->vm_flags); 1566 1567 /* Once vma denies write, undo our temporary denial count */ 1568 unmap_writable: 1569 if (writable_file_mapping) 1570 mapping_unmap_writable(file->f_mapping); 1571 file = vma->vm_file; 1572 ksm_add_vma(vma); 1573 expanded: 1574 perf_event_mmap(vma); 1575 1576 /* Unmap any existing mapping in the area */ 1577 vms_complete_munmap_vmas(&vms, &mas_detach); 1578 1579 vm_stat_account(mm, vm_flags, pglen); 1580 if (vm_flags & VM_LOCKED) { 1581 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 1582 is_vm_hugetlb_page(vma) || 1583 vma == get_gate_vma(current->mm)) 1584 vm_flags_clear(vma, VM_LOCKED_MASK); 1585 else 1586 mm->locked_vm += pglen; 1587 } 1588 1589 if (file) 1590 uprobe_mmap(vma); 1591 1592 /* 1593 * New (or expanded) vma always get soft dirty status. 1594 * Otherwise user-space soft-dirty page tracker won't 1595 * be able to distinguish situation when vma area unmapped, 1596 * then new mapped in-place (which must be aimed as 1597 * a completely new data area). 1598 */ 1599 vm_flags_set(vma, VM_SOFTDIRTY); 1600 1601 vma_set_page_prot(vma); 1602 1603 validate_mm(mm); 1604 return addr; 1605 1606 close_and_free_vma: 1607 if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close) 1608 vma->vm_ops->close(vma); 1609 1610 if (file || vma->vm_file) { 1611 unmap_and_free_vma: 1612 fput(vma->vm_file); 1613 vma->vm_file = NULL; 1614 1615 vma_iter_set(&vmi, vma->vm_end); 1616 /* Undo any partial mapping done by a device driver. */ 1617 unmap_region(&vmi.mas, vma, prev, next); 1618 } 1619 if (writable_file_mapping) 1620 mapping_unmap_writable(file->f_mapping); 1621 free_vma: 1622 vm_area_free(vma); 1623 unacct_error: 1624 if (charged) 1625 vm_unacct_memory(charged); 1626 1627 abort_munmap: 1628 vms_abort_munmap_vmas(&vms, &mas_detach); 1629 gather_failed: 1630 validate_mm(mm); 1631 return error; 1632 } 1633 1634 static int __vm_munmap(unsigned long start, size_t len, bool unlock) 1635 { 1636 int ret; 1637 struct mm_struct *mm = current->mm; 1638 LIST_HEAD(uf); 1639 VMA_ITERATOR(vmi, mm, start); 1640 1641 if (mmap_write_lock_killable(mm)) 1642 return -EINTR; 1643 1644 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 1645 if (ret || !unlock) 1646 mmap_write_unlock(mm); 1647 1648 userfaultfd_unmap_complete(mm, &uf); 1649 return ret; 1650 } 1651 1652 int vm_munmap(unsigned long start, size_t len) 1653 { 1654 return __vm_munmap(start, len, false); 1655 } 1656 EXPORT_SYMBOL(vm_munmap); 1657 1658 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1659 { 1660 addr = untagged_addr(addr); 1661 return __vm_munmap(addr, len, true); 1662 } 1663 1664 1665 /* 1666 * Emulation of deprecated remap_file_pages() syscall. 1667 */ 1668 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 1669 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 1670 { 1671 1672 struct mm_struct *mm = current->mm; 1673 struct vm_area_struct *vma; 1674 unsigned long populate = 0; 1675 unsigned long ret = -EINVAL; 1676 struct file *file; 1677 1678 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", 1679 current->comm, current->pid); 1680 1681 if (prot) 1682 return ret; 1683 start = start & PAGE_MASK; 1684 size = size & PAGE_MASK; 1685 1686 if (start + size <= start) 1687 return ret; 1688 1689 /* Does pgoff wrap? */ 1690 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 1691 return ret; 1692 1693 if (mmap_write_lock_killable(mm)) 1694 return -EINTR; 1695 1696 vma = vma_lookup(mm, start); 1697 1698 if (!vma || !(vma->vm_flags & VM_SHARED)) 1699 goto out; 1700 1701 if (start + size > vma->vm_end) { 1702 VMA_ITERATOR(vmi, mm, vma->vm_end); 1703 struct vm_area_struct *next, *prev = vma; 1704 1705 for_each_vma_range(vmi, next, start + size) { 1706 /* hole between vmas ? */ 1707 if (next->vm_start != prev->vm_end) 1708 goto out; 1709 1710 if (next->vm_file != vma->vm_file) 1711 goto out; 1712 1713 if (next->vm_flags != vma->vm_flags) 1714 goto out; 1715 1716 if (start + size <= next->vm_end) 1717 break; 1718 1719 prev = next; 1720 } 1721 1722 if (!next) 1723 goto out; 1724 } 1725 1726 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 1727 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 1728 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 1729 1730 flags &= MAP_NONBLOCK; 1731 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 1732 if (vma->vm_flags & VM_LOCKED) 1733 flags |= MAP_LOCKED; 1734 1735 file = get_file(vma->vm_file); 1736 ret = do_mmap(vma->vm_file, start, size, 1737 prot, flags, 0, pgoff, &populate, NULL); 1738 fput(file); 1739 out: 1740 mmap_write_unlock(mm); 1741 if (populate) 1742 mm_populate(ret, populate); 1743 if (!IS_ERR_VALUE(ret)) 1744 ret = 0; 1745 return ret; 1746 } 1747 1748 /* 1749 * do_brk_flags() - Increase the brk vma if the flags match. 1750 * @vmi: The vma iterator 1751 * @addr: The start address 1752 * @len: The length of the increase 1753 * @vma: The vma, 1754 * @flags: The VMA Flags 1755 * 1756 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 1757 * do not match then create a new anonymous VMA. Eventually we may be able to 1758 * do some brk-specific accounting here. 1759 */ 1760 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 1761 unsigned long addr, unsigned long len, unsigned long flags) 1762 { 1763 struct mm_struct *mm = current->mm; 1764 struct vma_prepare vp; 1765 1766 /* 1767 * Check against address space limits by the changed size 1768 * Note: This happens *after* clearing old mappings in some code paths. 1769 */ 1770 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 1771 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 1772 return -ENOMEM; 1773 1774 if (mm->map_count > sysctl_max_map_count) 1775 return -ENOMEM; 1776 1777 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 1778 return -ENOMEM; 1779 1780 /* 1781 * Expand the existing vma if possible; Note that singular lists do not 1782 * occur after forking, so the expand will only happen on new VMAs. 1783 */ 1784 if (vma && vma->vm_end == addr && !vma_policy(vma) && 1785 can_vma_merge_after(vma, flags, NULL, NULL, 1786 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { 1787 vma_iter_config(vmi, vma->vm_start, addr + len); 1788 if (vma_iter_prealloc(vmi, vma)) 1789 goto unacct_fail; 1790 1791 vma_start_write(vma); 1792 1793 init_vma_prep(&vp, vma); 1794 vma_prepare(&vp); 1795 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); 1796 vma->vm_end = addr + len; 1797 vm_flags_set(vma, VM_SOFTDIRTY); 1798 vma_iter_store(vmi, vma); 1799 1800 vma_complete(&vp, vmi, mm); 1801 validate_mm(mm); 1802 khugepaged_enter_vma(vma, flags); 1803 goto out; 1804 } 1805 1806 if (vma) 1807 vma_iter_next_range(vmi); 1808 /* create a vma struct for an anonymous mapping */ 1809 vma = vm_area_alloc(mm); 1810 if (!vma) 1811 goto unacct_fail; 1812 1813 vma_set_anonymous(vma); 1814 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 1815 vm_flags_init(vma, flags); 1816 vma->vm_page_prot = vm_get_page_prot(flags); 1817 vma_start_write(vma); 1818 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 1819 goto mas_store_fail; 1820 1821 mm->map_count++; 1822 validate_mm(mm); 1823 ksm_add_vma(vma); 1824 out: 1825 perf_event_mmap(vma); 1826 mm->total_vm += len >> PAGE_SHIFT; 1827 mm->data_vm += len >> PAGE_SHIFT; 1828 if (flags & VM_LOCKED) 1829 mm->locked_vm += (len >> PAGE_SHIFT); 1830 vm_flags_set(vma, VM_SOFTDIRTY); 1831 return 0; 1832 1833 mas_store_fail: 1834 vm_area_free(vma); 1835 unacct_fail: 1836 vm_unacct_memory(len >> PAGE_SHIFT); 1837 return -ENOMEM; 1838 } 1839 1840 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 1841 { 1842 struct mm_struct *mm = current->mm; 1843 struct vm_area_struct *vma = NULL; 1844 unsigned long len; 1845 int ret; 1846 bool populate; 1847 LIST_HEAD(uf); 1848 VMA_ITERATOR(vmi, mm, addr); 1849 1850 len = PAGE_ALIGN(request); 1851 if (len < request) 1852 return -ENOMEM; 1853 if (!len) 1854 return 0; 1855 1856 /* Until we need other flags, refuse anything except VM_EXEC. */ 1857 if ((flags & (~VM_EXEC)) != 0) 1858 return -EINVAL; 1859 1860 if (mmap_write_lock_killable(mm)) 1861 return -EINTR; 1862 1863 ret = check_brk_limits(addr, len); 1864 if (ret) 1865 goto limits_failed; 1866 1867 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); 1868 if (ret) 1869 goto munmap_failed; 1870 1871 vma = vma_prev(&vmi); 1872 ret = do_brk_flags(&vmi, vma, addr, len, flags); 1873 populate = ((mm->def_flags & VM_LOCKED) != 0); 1874 mmap_write_unlock(mm); 1875 userfaultfd_unmap_complete(mm, &uf); 1876 if (populate && !ret) 1877 mm_populate(addr, len); 1878 return ret; 1879 1880 munmap_failed: 1881 limits_failed: 1882 mmap_write_unlock(mm); 1883 return ret; 1884 } 1885 EXPORT_SYMBOL(vm_brk_flags); 1886 1887 /* Release all mmaps. */ 1888 void exit_mmap(struct mm_struct *mm) 1889 { 1890 struct mmu_gather tlb; 1891 struct vm_area_struct *vma; 1892 unsigned long nr_accounted = 0; 1893 VMA_ITERATOR(vmi, mm, 0); 1894 int count = 0; 1895 1896 /* mm's last user has gone, and its about to be pulled down */ 1897 mmu_notifier_release(mm); 1898 1899 mmap_read_lock(mm); 1900 arch_exit_mmap(mm); 1901 1902 vma = vma_next(&vmi); 1903 if (!vma || unlikely(xa_is_zero(vma))) { 1904 /* Can happen if dup_mmap() received an OOM */ 1905 mmap_read_unlock(mm); 1906 mmap_write_lock(mm); 1907 goto destroy; 1908 } 1909 1910 lru_add_drain(); 1911 flush_cache_mm(mm); 1912 tlb_gather_mmu_fullmm(&tlb, mm); 1913 /* update_hiwater_rss(mm) here? but nobody should be looking */ 1914 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ 1915 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); 1916 mmap_read_unlock(mm); 1917 1918 /* 1919 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper 1920 * because the memory has been already freed. 1921 */ 1922 set_bit(MMF_OOM_SKIP, &mm->flags); 1923 mmap_write_lock(mm); 1924 mt_clear_in_rcu(&mm->mm_mt); 1925 vma_iter_set(&vmi, vma->vm_end); 1926 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, 1927 USER_PGTABLES_CEILING, true); 1928 tlb_finish_mmu(&tlb); 1929 1930 /* 1931 * Walk the list again, actually closing and freeing it, with preemption 1932 * enabled, without holding any MM locks besides the unreachable 1933 * mmap_write_lock. 1934 */ 1935 vma_iter_set(&vmi, vma->vm_end); 1936 do { 1937 if (vma->vm_flags & VM_ACCOUNT) 1938 nr_accounted += vma_pages(vma); 1939 remove_vma(vma, /* unreachable = */ true, /* closed = */ false); 1940 count++; 1941 cond_resched(); 1942 vma = vma_next(&vmi); 1943 } while (vma && likely(!xa_is_zero(vma))); 1944 1945 BUG_ON(count != mm->map_count); 1946 1947 trace_exit_mmap(mm); 1948 destroy: 1949 __mt_destroy(&mm->mm_mt); 1950 mmap_write_unlock(mm); 1951 vm_unacct_memory(nr_accounted); 1952 } 1953 1954 /* Insert vm structure into process list sorted by address 1955 * and into the inode's i_mmap tree. If vm_file is non-NULL 1956 * then i_mmap_rwsem is taken here. 1957 */ 1958 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 1959 { 1960 unsigned long charged = vma_pages(vma); 1961 1962 1963 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 1964 return -ENOMEM; 1965 1966 if ((vma->vm_flags & VM_ACCOUNT) && 1967 security_vm_enough_memory_mm(mm, charged)) 1968 return -ENOMEM; 1969 1970 /* 1971 * The vm_pgoff of a purely anonymous vma should be irrelevant 1972 * until its first write fault, when page's anon_vma and index 1973 * are set. But now set the vm_pgoff it will almost certainly 1974 * end up with (unless mremap moves it elsewhere before that 1975 * first wfault), so /proc/pid/maps tells a consistent story. 1976 * 1977 * By setting it to reflect the virtual start address of the 1978 * vma, merges and splits can happen in a seamless way, just 1979 * using the existing file pgoff checks and manipulations. 1980 * Similarly in do_mmap and in do_brk_flags. 1981 */ 1982 if (vma_is_anonymous(vma)) { 1983 BUG_ON(vma->anon_vma); 1984 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 1985 } 1986 1987 if (vma_link(mm, vma)) { 1988 if (vma->vm_flags & VM_ACCOUNT) 1989 vm_unacct_memory(charged); 1990 return -ENOMEM; 1991 } 1992 1993 return 0; 1994 } 1995 1996 /* 1997 * Return true if the calling process may expand its vm space by the passed 1998 * number of pages 1999 */ 2000 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 2001 { 2002 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 2003 return false; 2004 2005 if (is_data_mapping(flags) && 2006 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 2007 /* Workaround for Valgrind */ 2008 if (rlimit(RLIMIT_DATA) == 0 && 2009 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 2010 return true; 2011 2012 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 2013 current->comm, current->pid, 2014 (mm->data_vm + npages) << PAGE_SHIFT, 2015 rlimit(RLIMIT_DATA), 2016 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 2017 2018 if (!ignore_rlimit_data) 2019 return false; 2020 } 2021 2022 return true; 2023 } 2024 2025 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 2026 { 2027 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 2028 2029 if (is_exec_mapping(flags)) 2030 mm->exec_vm += npages; 2031 else if (is_stack_mapping(flags)) 2032 mm->stack_vm += npages; 2033 else if (is_data_mapping(flags)) 2034 mm->data_vm += npages; 2035 } 2036 2037 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 2038 2039 /* 2040 * Close hook, called for unmap() and on the old vma for mremap(). 2041 * 2042 * Having a close hook prevents vma merging regardless of flags. 2043 */ 2044 static void special_mapping_close(struct vm_area_struct *vma) 2045 { 2046 const struct vm_special_mapping *sm = vma->vm_private_data; 2047 2048 if (sm->close) 2049 sm->close(sm, vma); 2050 } 2051 2052 static const char *special_mapping_name(struct vm_area_struct *vma) 2053 { 2054 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 2055 } 2056 2057 static int special_mapping_mremap(struct vm_area_struct *new_vma) 2058 { 2059 struct vm_special_mapping *sm = new_vma->vm_private_data; 2060 2061 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 2062 return -EFAULT; 2063 2064 if (sm->mremap) 2065 return sm->mremap(sm, new_vma); 2066 2067 return 0; 2068 } 2069 2070 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 2071 { 2072 /* 2073 * Forbid splitting special mappings - kernel has expectations over 2074 * the number of pages in mapping. Together with VM_DONTEXPAND 2075 * the size of vma should stay the same over the special mapping's 2076 * lifetime. 2077 */ 2078 return -EINVAL; 2079 } 2080 2081 static const struct vm_operations_struct special_mapping_vmops = { 2082 .close = special_mapping_close, 2083 .fault = special_mapping_fault, 2084 .mremap = special_mapping_mremap, 2085 .name = special_mapping_name, 2086 /* vDSO code relies that VVAR can't be accessed remotely */ 2087 .access = NULL, 2088 .may_split = special_mapping_split, 2089 }; 2090 2091 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 2092 { 2093 struct vm_area_struct *vma = vmf->vma; 2094 pgoff_t pgoff; 2095 struct page **pages; 2096 struct vm_special_mapping *sm = vma->vm_private_data; 2097 2098 if (sm->fault) 2099 return sm->fault(sm, vmf->vma, vmf); 2100 2101 pages = sm->pages; 2102 2103 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 2104 pgoff--; 2105 2106 if (*pages) { 2107 struct page *page = *pages; 2108 get_page(page); 2109 vmf->page = page; 2110 return 0; 2111 } 2112 2113 return VM_FAULT_SIGBUS; 2114 } 2115 2116 static struct vm_area_struct *__install_special_mapping( 2117 struct mm_struct *mm, 2118 unsigned long addr, unsigned long len, 2119 unsigned long vm_flags, void *priv, 2120 const struct vm_operations_struct *ops) 2121 { 2122 int ret; 2123 struct vm_area_struct *vma; 2124 2125 vma = vm_area_alloc(mm); 2126 if (unlikely(vma == NULL)) 2127 return ERR_PTR(-ENOMEM); 2128 2129 vma_set_range(vma, addr, addr + len, 0); 2130 vm_flags_init(vma, (vm_flags | mm->def_flags | 2131 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); 2132 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2133 2134 vma->vm_ops = ops; 2135 vma->vm_private_data = priv; 2136 2137 ret = insert_vm_struct(mm, vma); 2138 if (ret) 2139 goto out; 2140 2141 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 2142 2143 perf_event_mmap(vma); 2144 2145 return vma; 2146 2147 out: 2148 vm_area_free(vma); 2149 return ERR_PTR(ret); 2150 } 2151 2152 bool vma_is_special_mapping(const struct vm_area_struct *vma, 2153 const struct vm_special_mapping *sm) 2154 { 2155 return vma->vm_private_data == sm && 2156 vma->vm_ops == &special_mapping_vmops; 2157 } 2158 2159 /* 2160 * Called with mm->mmap_lock held for writing. 2161 * Insert a new vma covering the given region, with the given flags. 2162 * Its pages are supplied by the given array of struct page *. 2163 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 2164 * The region past the last page supplied will always produce SIGBUS. 2165 * The array pointer and the pages it points to are assumed to stay alive 2166 * for as long as this mapping might exist. 2167 */ 2168 struct vm_area_struct *_install_special_mapping( 2169 struct mm_struct *mm, 2170 unsigned long addr, unsigned long len, 2171 unsigned long vm_flags, const struct vm_special_mapping *spec) 2172 { 2173 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 2174 &special_mapping_vmops); 2175 } 2176 2177 /* 2178 * initialise the percpu counter for VM 2179 */ 2180 void __init mmap_init(void) 2181 { 2182 int ret; 2183 2184 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 2185 VM_BUG_ON(ret); 2186 } 2187 2188 /* 2189 * Initialise sysctl_user_reserve_kbytes. 2190 * 2191 * This is intended to prevent a user from starting a single memory hogging 2192 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 2193 * mode. 2194 * 2195 * The default value is min(3% of free memory, 128MB) 2196 * 128MB is enough to recover with sshd/login, bash, and top/kill. 2197 */ 2198 static int init_user_reserve(void) 2199 { 2200 unsigned long free_kbytes; 2201 2202 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 2203 2204 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); 2205 return 0; 2206 } 2207 subsys_initcall(init_user_reserve); 2208 2209 /* 2210 * Initialise sysctl_admin_reserve_kbytes. 2211 * 2212 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 2213 * to log in and kill a memory hogging process. 2214 * 2215 * Systems with more than 256MB will reserve 8MB, enough to recover 2216 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 2217 * only reserve 3% of free pages by default. 2218 */ 2219 static int init_admin_reserve(void) 2220 { 2221 unsigned long free_kbytes; 2222 2223 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 2224 2225 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); 2226 return 0; 2227 } 2228 subsys_initcall(init_admin_reserve); 2229 2230 /* 2231 * Reinititalise user and admin reserves if memory is added or removed. 2232 * 2233 * The default user reserve max is 128MB, and the default max for the 2234 * admin reserve is 8MB. These are usually, but not always, enough to 2235 * enable recovery from a memory hogging process using login/sshd, a shell, 2236 * and tools like top. It may make sense to increase or even disable the 2237 * reserve depending on the existence of swap or variations in the recovery 2238 * tools. So, the admin may have changed them. 2239 * 2240 * If memory is added and the reserves have been eliminated or increased above 2241 * the default max, then we'll trust the admin. 2242 * 2243 * If memory is removed and there isn't enough free memory, then we 2244 * need to reset the reserves. 2245 * 2246 * Otherwise keep the reserve set by the admin. 2247 */ 2248 static int reserve_mem_notifier(struct notifier_block *nb, 2249 unsigned long action, void *data) 2250 { 2251 unsigned long tmp, free_kbytes; 2252 2253 switch (action) { 2254 case MEM_ONLINE: 2255 /* Default max is 128MB. Leave alone if modified by operator. */ 2256 tmp = sysctl_user_reserve_kbytes; 2257 if (tmp > 0 && tmp < SZ_128K) 2258 init_user_reserve(); 2259 2260 /* Default max is 8MB. Leave alone if modified by operator. */ 2261 tmp = sysctl_admin_reserve_kbytes; 2262 if (tmp > 0 && tmp < SZ_8K) 2263 init_admin_reserve(); 2264 2265 break; 2266 case MEM_OFFLINE: 2267 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 2268 2269 if (sysctl_user_reserve_kbytes > free_kbytes) { 2270 init_user_reserve(); 2271 pr_info("vm.user_reserve_kbytes reset to %lu\n", 2272 sysctl_user_reserve_kbytes); 2273 } 2274 2275 if (sysctl_admin_reserve_kbytes > free_kbytes) { 2276 init_admin_reserve(); 2277 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 2278 sysctl_admin_reserve_kbytes); 2279 } 2280 break; 2281 default: 2282 break; 2283 } 2284 return NOTIFY_OK; 2285 } 2286 2287 static int __meminit init_reserve_notifier(void) 2288 { 2289 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) 2290 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 2291 2292 return 0; 2293 } 2294 subsys_initcall(init_reserve_notifier); 2295 2296 /* 2297 * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between 2298 * this VMA and its relocated range, which will now reside at [vma->vm_start - 2299 * shift, vma->vm_end - shift). 2300 * 2301 * This function is almost certainly NOT what you want for anything other than 2302 * early executable temporary stack relocation. 2303 */ 2304 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) 2305 { 2306 /* 2307 * The process proceeds as follows: 2308 * 2309 * 1) Use shift to calculate the new vma endpoints. 2310 * 2) Extend vma to cover both the old and new ranges. This ensures the 2311 * arguments passed to subsequent functions are consistent. 2312 * 3) Move vma's page tables to the new range. 2313 * 4) Free up any cleared pgd range. 2314 * 5) Shrink the vma to cover only the new range. 2315 */ 2316 2317 struct mm_struct *mm = vma->vm_mm; 2318 unsigned long old_start = vma->vm_start; 2319 unsigned long old_end = vma->vm_end; 2320 unsigned long length = old_end - old_start; 2321 unsigned long new_start = old_start - shift; 2322 unsigned long new_end = old_end - shift; 2323 VMA_ITERATOR(vmi, mm, new_start); 2324 struct vm_area_struct *next; 2325 struct mmu_gather tlb; 2326 2327 BUG_ON(new_start > new_end); 2328 2329 /* 2330 * ensure there are no vmas between where we want to go 2331 * and where we are 2332 */ 2333 if (vma != vma_next(&vmi)) 2334 return -EFAULT; 2335 2336 vma_iter_prev_range(&vmi); 2337 /* 2338 * cover the whole range: [new_start, old_end) 2339 */ 2340 if (vma_expand(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL)) 2341 return -ENOMEM; 2342 2343 /* 2344 * move the page tables downwards, on failure we rely on 2345 * process cleanup to remove whatever mess we made. 2346 */ 2347 if (length != move_page_tables(vma, old_start, 2348 vma, new_start, length, false, true)) 2349 return -ENOMEM; 2350 2351 lru_add_drain(); 2352 tlb_gather_mmu(&tlb, mm); 2353 next = vma_next(&vmi); 2354 if (new_end > old_start) { 2355 /* 2356 * when the old and new regions overlap clear from new_end. 2357 */ 2358 free_pgd_range(&tlb, new_end, old_end, new_end, 2359 next ? next->vm_start : USER_PGTABLES_CEILING); 2360 } else { 2361 /* 2362 * otherwise, clean from old_start; this is done to not touch 2363 * the address space in [new_end, old_start) some architectures 2364 * have constraints on va-space that make this illegal (IA64) - 2365 * for the others its just a little faster. 2366 */ 2367 free_pgd_range(&tlb, old_start, old_end, new_end, 2368 next ? next->vm_start : USER_PGTABLES_CEILING); 2369 } 2370 tlb_finish_mmu(&tlb); 2371 2372 vma_prev(&vmi); 2373 /* Shrink the vma to just the new range */ 2374 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); 2375 } 2376