1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/mmap.c 4 * 5 * Written by obz. 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/backing-dev.h> 15 #include <linux/mm.h> 16 #include <linux/mm_inline.h> 17 #include <linux/shm.h> 18 #include <linux/mman.h> 19 #include <linux/pagemap.h> 20 #include <linux/swap.h> 21 #include <linux/syscalls.h> 22 #include <linux/capability.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/personality.h> 27 #include <linux/security.h> 28 #include <linux/hugetlb.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/profile.h> 31 #include <linux/export.h> 32 #include <linux/mount.h> 33 #include <linux/mempolicy.h> 34 #include <linux/rmap.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/mmdebug.h> 37 #include <linux/perf_event.h> 38 #include <linux/audit.h> 39 #include <linux/khugepaged.h> 40 #include <linux/uprobes.h> 41 #include <linux/notifier.h> 42 #include <linux/memory.h> 43 #include <linux/printk.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/moduleparam.h> 46 #include <linux/pkeys.h> 47 #include <linux/oom.h> 48 #include <linux/sched/mm.h> 49 #include <linux/ksm.h> 50 51 #include <linux/uaccess.h> 52 #include <asm/cacheflush.h> 53 #include <asm/tlb.h> 54 #include <asm/mmu_context.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/mmap.h> 58 59 #include "internal.h" 60 61 #ifndef arch_mmap_check 62 #define arch_mmap_check(addr, len, flags) (0) 63 #endif 64 65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 67 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; 68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 69 #endif 70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 74 #endif 75 76 static bool ignore_rlimit_data; 77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 78 79 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 80 void vma_set_page_prot(struct vm_area_struct *vma) 81 { 82 unsigned long vm_flags = vma->vm_flags; 83 pgprot_t vm_page_prot; 84 85 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 86 if (vma_wants_writenotify(vma, vm_page_prot)) { 87 vm_flags &= ~VM_SHARED; 88 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 89 } 90 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 91 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 92 } 93 94 /* 95 * check_brk_limits() - Use platform specific check of range & verify mlock 96 * limits. 97 * @addr: The address to check 98 * @len: The size of increase. 99 * 100 * Return: 0 on success. 101 */ 102 static int check_brk_limits(unsigned long addr, unsigned long len) 103 { 104 unsigned long mapped_addr; 105 106 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 107 if (IS_ERR_VALUE(mapped_addr)) 108 return mapped_addr; 109 110 return mlock_future_ok(current->mm, current->mm->def_flags, len) 111 ? 0 : -EAGAIN; 112 } 113 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 114 unsigned long addr, unsigned long request, unsigned long flags); 115 SYSCALL_DEFINE1(brk, unsigned long, brk) 116 { 117 unsigned long newbrk, oldbrk, origbrk; 118 struct mm_struct *mm = current->mm; 119 struct vm_area_struct *brkvma, *next = NULL; 120 unsigned long min_brk; 121 bool populate = false; 122 LIST_HEAD(uf); 123 struct vma_iterator vmi; 124 125 if (mmap_write_lock_killable(mm)) 126 return -EINTR; 127 128 origbrk = mm->brk; 129 130 #ifdef CONFIG_COMPAT_BRK 131 /* 132 * CONFIG_COMPAT_BRK can still be overridden by setting 133 * randomize_va_space to 2, which will still cause mm->start_brk 134 * to be arbitrarily shifted 135 */ 136 if (current->brk_randomized) 137 min_brk = mm->start_brk; 138 else 139 min_brk = mm->end_data; 140 #else 141 min_brk = mm->start_brk; 142 #endif 143 if (brk < min_brk) 144 goto out; 145 146 /* 147 * Check against rlimit here. If this check is done later after the test 148 * of oldbrk with newbrk then it can escape the test and let the data 149 * segment grow beyond its set limit the in case where the limit is 150 * not page aligned -Ram Gupta 151 */ 152 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 153 mm->end_data, mm->start_data)) 154 goto out; 155 156 newbrk = PAGE_ALIGN(brk); 157 oldbrk = PAGE_ALIGN(mm->brk); 158 if (oldbrk == newbrk) { 159 mm->brk = brk; 160 goto success; 161 } 162 163 /* Always allow shrinking brk. */ 164 if (brk <= mm->brk) { 165 /* Search one past newbrk */ 166 vma_iter_init(&vmi, mm, newbrk); 167 brkvma = vma_find(&vmi, oldbrk); 168 if (!brkvma || brkvma->vm_start >= oldbrk) 169 goto out; /* mapping intersects with an existing non-brk vma. */ 170 /* 171 * mm->brk must be protected by write mmap_lock. 172 * do_vmi_align_munmap() will drop the lock on success, so 173 * update it before calling do_vma_munmap(). 174 */ 175 mm->brk = brk; 176 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, 177 /* unlock = */ true)) 178 goto out; 179 180 goto success_unlocked; 181 } 182 183 if (check_brk_limits(oldbrk, newbrk - oldbrk)) 184 goto out; 185 186 /* 187 * Only check if the next VMA is within the stack_guard_gap of the 188 * expansion area 189 */ 190 vma_iter_init(&vmi, mm, oldbrk); 191 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); 192 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 193 goto out; 194 195 brkvma = vma_prev_limit(&vmi, mm->start_brk); 196 /* Ok, looks good - let it rip. */ 197 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) 198 goto out; 199 200 mm->brk = brk; 201 if (mm->def_flags & VM_LOCKED) 202 populate = true; 203 204 success: 205 mmap_write_unlock(mm); 206 success_unlocked: 207 userfaultfd_unmap_complete(mm, &uf); 208 if (populate) 209 mm_populate(oldbrk, newbrk - oldbrk); 210 return brk; 211 212 out: 213 mm->brk = origbrk; 214 mmap_write_unlock(mm); 215 return origbrk; 216 } 217 218 /* 219 * If a hint addr is less than mmap_min_addr change hint to be as 220 * low as possible but still greater than mmap_min_addr 221 */ 222 static inline unsigned long round_hint_to_min(unsigned long hint) 223 { 224 hint &= PAGE_MASK; 225 if (((void *)hint != NULL) && 226 (hint < mmap_min_addr)) 227 return PAGE_ALIGN(mmap_min_addr); 228 return hint; 229 } 230 231 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 232 unsigned long bytes) 233 { 234 unsigned long locked_pages, limit_pages; 235 236 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 237 return true; 238 239 locked_pages = bytes >> PAGE_SHIFT; 240 locked_pages += mm->locked_vm; 241 242 limit_pages = rlimit(RLIMIT_MEMLOCK); 243 limit_pages >>= PAGE_SHIFT; 244 245 return locked_pages <= limit_pages; 246 } 247 248 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 249 { 250 if (S_ISREG(inode->i_mode)) 251 return MAX_LFS_FILESIZE; 252 253 if (S_ISBLK(inode->i_mode)) 254 return MAX_LFS_FILESIZE; 255 256 if (S_ISSOCK(inode->i_mode)) 257 return MAX_LFS_FILESIZE; 258 259 /* Special "we do even unsigned file positions" case */ 260 if (file->f_mode & FMODE_UNSIGNED_OFFSET) 261 return 0; 262 263 /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 264 return ULONG_MAX; 265 } 266 267 static inline bool file_mmap_ok(struct file *file, struct inode *inode, 268 unsigned long pgoff, unsigned long len) 269 { 270 u64 maxsize = file_mmap_size_max(file, inode); 271 272 if (maxsize && len > maxsize) 273 return false; 274 maxsize -= len; 275 if (pgoff > maxsize >> PAGE_SHIFT) 276 return false; 277 return true; 278 } 279 280 /* 281 * The caller must write-lock current->mm->mmap_lock. 282 */ 283 unsigned long do_mmap(struct file *file, unsigned long addr, 284 unsigned long len, unsigned long prot, 285 unsigned long flags, vm_flags_t vm_flags, 286 unsigned long pgoff, unsigned long *populate, 287 struct list_head *uf) 288 { 289 struct mm_struct *mm = current->mm; 290 int pkey = 0; 291 292 *populate = 0; 293 294 if (!len) 295 return -EINVAL; 296 297 /* 298 * Does the application expect PROT_READ to imply PROT_EXEC? 299 * 300 * (the exception is when the underlying filesystem is noexec 301 * mounted, in which case we don't add PROT_EXEC.) 302 */ 303 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 304 if (!(file && path_noexec(&file->f_path))) 305 prot |= PROT_EXEC; 306 307 /* force arch specific MAP_FIXED handling in get_unmapped_area */ 308 if (flags & MAP_FIXED_NOREPLACE) 309 flags |= MAP_FIXED; 310 311 if (!(flags & MAP_FIXED)) 312 addr = round_hint_to_min(addr); 313 314 /* Careful about overflows.. */ 315 len = PAGE_ALIGN(len); 316 if (!len) 317 return -ENOMEM; 318 319 /* offset overflow? */ 320 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 321 return -EOVERFLOW; 322 323 /* Too many mappings? */ 324 if (mm->map_count > sysctl_max_map_count) 325 return -ENOMEM; 326 327 /* 328 * addr is returned from get_unmapped_area, 329 * There are two cases: 330 * 1> MAP_FIXED == false 331 * unallocated memory, no need to check sealing. 332 * 1> MAP_FIXED == true 333 * sealing is checked inside mmap_region when 334 * do_vmi_munmap is called. 335 */ 336 337 if (prot == PROT_EXEC) { 338 pkey = execute_only_pkey(mm); 339 if (pkey < 0) 340 pkey = 0; 341 } 342 343 /* Do simple checking here so the lower-level routines won't have 344 * to. we assume access permissions have been handled by the open 345 * of the memory object, so we don't do any here. 346 */ 347 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 348 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 349 350 /* Obtain the address to map to. we verify (or select) it and ensure 351 * that it represents a valid section of the address space. 352 */ 353 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); 354 if (IS_ERR_VALUE(addr)) 355 return addr; 356 357 if (flags & MAP_FIXED_NOREPLACE) { 358 if (find_vma_intersection(mm, addr, addr + len)) 359 return -EEXIST; 360 } 361 362 if (flags & MAP_LOCKED) 363 if (!can_do_mlock()) 364 return -EPERM; 365 366 if (!mlock_future_ok(mm, vm_flags, len)) 367 return -EAGAIN; 368 369 if (file) { 370 struct inode *inode = file_inode(file); 371 unsigned long flags_mask; 372 373 if (!file_mmap_ok(file, inode, pgoff, len)) 374 return -EOVERFLOW; 375 376 flags_mask = LEGACY_MAP_MASK; 377 if (file->f_op->fop_flags & FOP_MMAP_SYNC) 378 flags_mask |= MAP_SYNC; 379 380 switch (flags & MAP_TYPE) { 381 case MAP_SHARED: 382 /* 383 * Force use of MAP_SHARED_VALIDATE with non-legacy 384 * flags. E.g. MAP_SYNC is dangerous to use with 385 * MAP_SHARED as you don't know which consistency model 386 * you will get. We silently ignore unsupported flags 387 * with MAP_SHARED to preserve backward compatibility. 388 */ 389 flags &= LEGACY_MAP_MASK; 390 fallthrough; 391 case MAP_SHARED_VALIDATE: 392 if (flags & ~flags_mask) 393 return -EOPNOTSUPP; 394 if (prot & PROT_WRITE) { 395 if (!(file->f_mode & FMODE_WRITE)) 396 return -EACCES; 397 if (IS_SWAPFILE(file->f_mapping->host)) 398 return -ETXTBSY; 399 } 400 401 /* 402 * Make sure we don't allow writing to an append-only 403 * file.. 404 */ 405 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 406 return -EACCES; 407 408 vm_flags |= VM_SHARED | VM_MAYSHARE; 409 if (!(file->f_mode & FMODE_WRITE)) 410 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 411 fallthrough; 412 case MAP_PRIVATE: 413 if (!(file->f_mode & FMODE_READ)) 414 return -EACCES; 415 if (path_noexec(&file->f_path)) { 416 if (vm_flags & VM_EXEC) 417 return -EPERM; 418 vm_flags &= ~VM_MAYEXEC; 419 } 420 421 if (!file->f_op->mmap) 422 return -ENODEV; 423 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 424 return -EINVAL; 425 break; 426 427 default: 428 return -EINVAL; 429 } 430 } else { 431 switch (flags & MAP_TYPE) { 432 case MAP_SHARED: 433 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 434 return -EINVAL; 435 /* 436 * Ignore pgoff. 437 */ 438 pgoff = 0; 439 vm_flags |= VM_SHARED | VM_MAYSHARE; 440 break; 441 case MAP_DROPPABLE: 442 if (VM_DROPPABLE == VM_NONE) 443 return -ENOTSUPP; 444 /* 445 * A locked or stack area makes no sense to be droppable. 446 * 447 * Also, since droppable pages can just go away at any time 448 * it makes no sense to copy them on fork or dump them. 449 * 450 * And don't attempt to combine with hugetlb for now. 451 */ 452 if (flags & (MAP_LOCKED | MAP_HUGETLB)) 453 return -EINVAL; 454 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) 455 return -EINVAL; 456 457 vm_flags |= VM_DROPPABLE; 458 459 /* 460 * If the pages can be dropped, then it doesn't make 461 * sense to reserve them. 462 */ 463 vm_flags |= VM_NORESERVE; 464 465 /* 466 * Likewise, they're volatile enough that they 467 * shouldn't survive forks or coredumps. 468 */ 469 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; 470 fallthrough; 471 case MAP_PRIVATE: 472 /* 473 * Set pgoff according to addr for anon_vma. 474 */ 475 pgoff = addr >> PAGE_SHIFT; 476 break; 477 default: 478 return -EINVAL; 479 } 480 } 481 482 /* 483 * Set 'VM_NORESERVE' if we should not account for the 484 * memory use of this mapping. 485 */ 486 if (flags & MAP_NORESERVE) { 487 /* We honor MAP_NORESERVE if allowed to overcommit */ 488 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 489 vm_flags |= VM_NORESERVE; 490 491 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 492 if (file && is_file_hugepages(file)) 493 vm_flags |= VM_NORESERVE; 494 } 495 496 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 497 if (!IS_ERR_VALUE(addr) && 498 ((vm_flags & VM_LOCKED) || 499 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 500 *populate = len; 501 return addr; 502 } 503 504 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 505 unsigned long prot, unsigned long flags, 506 unsigned long fd, unsigned long pgoff) 507 { 508 struct file *file = NULL; 509 unsigned long retval; 510 511 if (!(flags & MAP_ANONYMOUS)) { 512 audit_mmap_fd(fd, flags); 513 file = fget(fd); 514 if (!file) 515 return -EBADF; 516 if (is_file_hugepages(file)) { 517 len = ALIGN(len, huge_page_size(hstate_file(file))); 518 } else if (unlikely(flags & MAP_HUGETLB)) { 519 retval = -EINVAL; 520 goto out_fput; 521 } 522 } else if (flags & MAP_HUGETLB) { 523 struct hstate *hs; 524 525 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 526 if (!hs) 527 return -EINVAL; 528 529 len = ALIGN(len, huge_page_size(hs)); 530 /* 531 * VM_NORESERVE is used because the reservations will be 532 * taken when vm_ops->mmap() is called 533 */ 534 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 535 VM_NORESERVE, 536 HUGETLB_ANONHUGE_INODE, 537 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 538 if (IS_ERR(file)) 539 return PTR_ERR(file); 540 } 541 542 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 543 out_fput: 544 if (file) 545 fput(file); 546 return retval; 547 } 548 549 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 550 unsigned long, prot, unsigned long, flags, 551 unsigned long, fd, unsigned long, pgoff) 552 { 553 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 554 } 555 556 #ifdef __ARCH_WANT_SYS_OLD_MMAP 557 struct mmap_arg_struct { 558 unsigned long addr; 559 unsigned long len; 560 unsigned long prot; 561 unsigned long flags; 562 unsigned long fd; 563 unsigned long offset; 564 }; 565 566 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 567 { 568 struct mmap_arg_struct a; 569 570 if (copy_from_user(&a, arg, sizeof(a))) 571 return -EFAULT; 572 if (offset_in_page(a.offset)) 573 return -EINVAL; 574 575 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 576 a.offset >> PAGE_SHIFT); 577 } 578 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 579 580 /* 581 * We account for memory if it's a private writeable mapping, 582 * not hugepages and VM_NORESERVE wasn't set. 583 */ 584 static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags) 585 { 586 /* 587 * hugetlb has its own accounting separate from the core VM 588 * VM_HUGETLB may not be set yet so we cannot check for that flag. 589 */ 590 if (file && is_file_hugepages(file)) 591 return false; 592 593 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 594 } 595 596 /** 597 * unmapped_area() - Find an area between the low_limit and the high_limit with 598 * the correct alignment and offset, all from @info. Note: current->mm is used 599 * for the search. 600 * 601 * @info: The unmapped area information including the range [low_limit - 602 * high_limit), the alignment offset and mask. 603 * 604 * Return: A memory address or -ENOMEM. 605 */ 606 static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 607 { 608 unsigned long length, gap; 609 unsigned long low_limit, high_limit; 610 struct vm_area_struct *tmp; 611 VMA_ITERATOR(vmi, current->mm, 0); 612 613 /* Adjust search length to account for worst case alignment overhead */ 614 length = info->length + info->align_mask + info->start_gap; 615 if (length < info->length) 616 return -ENOMEM; 617 618 low_limit = info->low_limit; 619 if (low_limit < mmap_min_addr) 620 low_limit = mmap_min_addr; 621 high_limit = info->high_limit; 622 retry: 623 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) 624 return -ENOMEM; 625 626 /* 627 * Adjust for the gap first so it doesn't interfere with the 628 * later alignment. The first step is the minimum needed to 629 * fulill the start gap, the next steps is the minimum to align 630 * that. It is the minimum needed to fulill both. 631 */ 632 gap = vma_iter_addr(&vmi) + info->start_gap; 633 gap += (info->align_offset - gap) & info->align_mask; 634 tmp = vma_next(&vmi); 635 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 636 if (vm_start_gap(tmp) < gap + length - 1) { 637 low_limit = tmp->vm_end; 638 vma_iter_reset(&vmi); 639 goto retry; 640 } 641 } else { 642 tmp = vma_prev(&vmi); 643 if (tmp && vm_end_gap(tmp) > gap) { 644 low_limit = vm_end_gap(tmp); 645 vma_iter_reset(&vmi); 646 goto retry; 647 } 648 } 649 650 return gap; 651 } 652 653 /** 654 * unmapped_area_topdown() - Find an area between the low_limit and the 655 * high_limit with the correct alignment and offset at the highest available 656 * address, all from @info. Note: current->mm is used for the search. 657 * 658 * @info: The unmapped area information including the range [low_limit - 659 * high_limit), the alignment offset and mask. 660 * 661 * Return: A memory address or -ENOMEM. 662 */ 663 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 664 { 665 unsigned long length, gap, gap_end; 666 unsigned long low_limit, high_limit; 667 struct vm_area_struct *tmp; 668 VMA_ITERATOR(vmi, current->mm, 0); 669 670 /* Adjust search length to account for worst case alignment overhead */ 671 length = info->length + info->align_mask + info->start_gap; 672 if (length < info->length) 673 return -ENOMEM; 674 675 low_limit = info->low_limit; 676 if (low_limit < mmap_min_addr) 677 low_limit = mmap_min_addr; 678 high_limit = info->high_limit; 679 retry: 680 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) 681 return -ENOMEM; 682 683 gap = vma_iter_end(&vmi) - info->length; 684 gap -= (gap - info->align_offset) & info->align_mask; 685 gap_end = vma_iter_end(&vmi); 686 tmp = vma_next(&vmi); 687 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 688 if (vm_start_gap(tmp) < gap_end) { 689 high_limit = vm_start_gap(tmp); 690 vma_iter_reset(&vmi); 691 goto retry; 692 } 693 } else { 694 tmp = vma_prev(&vmi); 695 if (tmp && vm_end_gap(tmp) > gap) { 696 high_limit = tmp->vm_start; 697 vma_iter_reset(&vmi); 698 goto retry; 699 } 700 } 701 702 return gap; 703 } 704 705 /* 706 * Search for an unmapped address range. 707 * 708 * We are looking for a range that: 709 * - does not intersect with any VMA; 710 * - is contained within the [low_limit, high_limit) interval; 711 * - is at least the desired size. 712 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 713 */ 714 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 715 { 716 unsigned long addr; 717 718 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 719 addr = unmapped_area_topdown(info); 720 else 721 addr = unmapped_area(info); 722 723 trace_vm_unmapped_area(addr, info); 724 return addr; 725 } 726 727 /* Get an address range which is currently unmapped. 728 * For shmat() with addr=0. 729 * 730 * Ugly calling convention alert: 731 * Return value with the low bits set means error value, 732 * ie 733 * if (ret & ~PAGE_MASK) 734 * error = ret; 735 * 736 * This function "knows" that -ENOMEM has the bits set. 737 */ 738 unsigned long 739 generic_get_unmapped_area(struct file *filp, unsigned long addr, 740 unsigned long len, unsigned long pgoff, 741 unsigned long flags) 742 { 743 struct mm_struct *mm = current->mm; 744 struct vm_area_struct *vma, *prev; 745 struct vm_unmapped_area_info info = {}; 746 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 747 748 if (len > mmap_end - mmap_min_addr) 749 return -ENOMEM; 750 751 if (flags & MAP_FIXED) 752 return addr; 753 754 if (addr) { 755 addr = PAGE_ALIGN(addr); 756 vma = find_vma_prev(mm, addr, &prev); 757 if (mmap_end - len >= addr && addr >= mmap_min_addr && 758 (!vma || addr + len <= vm_start_gap(vma)) && 759 (!prev || addr >= vm_end_gap(prev))) 760 return addr; 761 } 762 763 info.length = len; 764 info.low_limit = mm->mmap_base; 765 info.high_limit = mmap_end; 766 return vm_unmapped_area(&info); 767 } 768 769 #ifndef HAVE_ARCH_UNMAPPED_AREA 770 unsigned long 771 arch_get_unmapped_area(struct file *filp, unsigned long addr, 772 unsigned long len, unsigned long pgoff, 773 unsigned long flags) 774 { 775 return generic_get_unmapped_area(filp, addr, len, pgoff, flags); 776 } 777 #endif 778 779 /* 780 * This mmap-allocator allocates new areas top-down from below the 781 * stack's low limit (the base): 782 */ 783 unsigned long 784 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 785 unsigned long len, unsigned long pgoff, 786 unsigned long flags) 787 { 788 struct vm_area_struct *vma, *prev; 789 struct mm_struct *mm = current->mm; 790 struct vm_unmapped_area_info info = {}; 791 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 792 793 /* requested length too big for entire address space */ 794 if (len > mmap_end - mmap_min_addr) 795 return -ENOMEM; 796 797 if (flags & MAP_FIXED) 798 return addr; 799 800 /* requesting a specific address */ 801 if (addr) { 802 addr = PAGE_ALIGN(addr); 803 vma = find_vma_prev(mm, addr, &prev); 804 if (mmap_end - len >= addr && addr >= mmap_min_addr && 805 (!vma || addr + len <= vm_start_gap(vma)) && 806 (!prev || addr >= vm_end_gap(prev))) 807 return addr; 808 } 809 810 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 811 info.length = len; 812 info.low_limit = PAGE_SIZE; 813 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 814 addr = vm_unmapped_area(&info); 815 816 /* 817 * A failed mmap() very likely causes application failure, 818 * so fall back to the bottom-up function here. This scenario 819 * can happen with large stack limits and large mmap() 820 * allocations. 821 */ 822 if (offset_in_page(addr)) { 823 VM_BUG_ON(addr != -ENOMEM); 824 info.flags = 0; 825 info.low_limit = TASK_UNMAPPED_BASE; 826 info.high_limit = mmap_end; 827 addr = vm_unmapped_area(&info); 828 } 829 830 return addr; 831 } 832 833 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 834 unsigned long 835 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 836 unsigned long len, unsigned long pgoff, 837 unsigned long flags) 838 { 839 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 840 } 841 #endif 842 843 #ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS 844 unsigned long 845 arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, 846 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 847 { 848 return arch_get_unmapped_area(filp, addr, len, pgoff, flags); 849 } 850 851 unsigned long 852 arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, 853 unsigned long len, unsigned long pgoff, 854 unsigned long flags, vm_flags_t vm_flags) 855 { 856 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 857 } 858 #endif 859 860 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, 861 unsigned long addr, unsigned long len, 862 unsigned long pgoff, unsigned long flags, 863 vm_flags_t vm_flags) 864 { 865 if (test_bit(MMF_TOPDOWN, &mm->flags)) 866 return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, 867 flags, vm_flags); 868 return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags); 869 } 870 871 unsigned long 872 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 873 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 874 { 875 unsigned long (*get_area)(struct file *, unsigned long, 876 unsigned long, unsigned long, unsigned long) 877 = NULL; 878 879 unsigned long error = arch_mmap_check(addr, len, flags); 880 if (error) 881 return error; 882 883 /* Careful about overflows.. */ 884 if (len > TASK_SIZE) 885 return -ENOMEM; 886 887 if (file) { 888 if (file->f_op->get_unmapped_area) 889 get_area = file->f_op->get_unmapped_area; 890 } else if (flags & MAP_SHARED) { 891 /* 892 * mmap_region() will call shmem_zero_setup() to create a file, 893 * so use shmem's get_unmapped_area in case it can be huge. 894 */ 895 get_area = shmem_get_unmapped_area; 896 } 897 898 /* Always treat pgoff as zero for anonymous memory. */ 899 if (!file) 900 pgoff = 0; 901 902 if (get_area) { 903 addr = get_area(file, addr, len, pgoff, flags); 904 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 905 /* Ensures that larger anonymous mappings are THP aligned. */ 906 addr = thp_get_unmapped_area_vmflags(file, addr, len, 907 pgoff, flags, vm_flags); 908 } else { 909 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, 910 pgoff, flags, vm_flags); 911 } 912 if (IS_ERR_VALUE(addr)) 913 return addr; 914 915 if (addr > TASK_SIZE - len) 916 return -ENOMEM; 917 if (offset_in_page(addr)) 918 return -EINVAL; 919 920 error = security_mmap_addr(addr); 921 return error ? error : addr; 922 } 923 924 unsigned long 925 mm_get_unmapped_area(struct mm_struct *mm, struct file *file, 926 unsigned long addr, unsigned long len, 927 unsigned long pgoff, unsigned long flags) 928 { 929 if (test_bit(MMF_TOPDOWN, &mm->flags)) 930 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags); 931 return arch_get_unmapped_area(file, addr, len, pgoff, flags); 932 } 933 EXPORT_SYMBOL(mm_get_unmapped_area); 934 935 /** 936 * find_vma_intersection() - Look up the first VMA which intersects the interval 937 * @mm: The process address space. 938 * @start_addr: The inclusive start user address. 939 * @end_addr: The exclusive end user address. 940 * 941 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes 942 * start_addr < end_addr. 943 */ 944 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 945 unsigned long start_addr, 946 unsigned long end_addr) 947 { 948 unsigned long index = start_addr; 949 950 mmap_assert_locked(mm); 951 return mt_find(&mm->mm_mt, &index, end_addr - 1); 952 } 953 EXPORT_SYMBOL(find_vma_intersection); 954 955 /** 956 * find_vma() - Find the VMA for a given address, or the next VMA. 957 * @mm: The mm_struct to check 958 * @addr: The address 959 * 960 * Returns: The VMA associated with addr, or the next VMA. 961 * May return %NULL in the case of no VMA at addr or above. 962 */ 963 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 964 { 965 unsigned long index = addr; 966 967 mmap_assert_locked(mm); 968 return mt_find(&mm->mm_mt, &index, ULONG_MAX); 969 } 970 EXPORT_SYMBOL(find_vma); 971 972 /** 973 * find_vma_prev() - Find the VMA for a given address, or the next vma and 974 * set %pprev to the previous VMA, if any. 975 * @mm: The mm_struct to check 976 * @addr: The address 977 * @pprev: The pointer to set to the previous VMA 978 * 979 * Note that RCU lock is missing here since the external mmap_lock() is used 980 * instead. 981 * 982 * Returns: The VMA associated with @addr, or the next vma. 983 * May return %NULL in the case of no vma at addr or above. 984 */ 985 struct vm_area_struct * 986 find_vma_prev(struct mm_struct *mm, unsigned long addr, 987 struct vm_area_struct **pprev) 988 { 989 struct vm_area_struct *vma; 990 VMA_ITERATOR(vmi, mm, addr); 991 992 vma = vma_iter_load(&vmi); 993 *pprev = vma_prev(&vmi); 994 if (!vma) 995 vma = vma_next(&vmi); 996 return vma; 997 } 998 999 /* 1000 * Verify that the stack growth is acceptable and 1001 * update accounting. This is shared with both the 1002 * grow-up and grow-down cases. 1003 */ 1004 static int acct_stack_growth(struct vm_area_struct *vma, 1005 unsigned long size, unsigned long grow) 1006 { 1007 struct mm_struct *mm = vma->vm_mm; 1008 unsigned long new_start; 1009 1010 /* address space limit tests */ 1011 if (!may_expand_vm(mm, vma->vm_flags, grow)) 1012 return -ENOMEM; 1013 1014 /* Stack limit test */ 1015 if (size > rlimit(RLIMIT_STACK)) 1016 return -ENOMEM; 1017 1018 /* mlock limit tests */ 1019 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) 1020 return -ENOMEM; 1021 1022 /* Check to ensure the stack will not grow into a hugetlb-only region */ 1023 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 1024 vma->vm_end - size; 1025 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 1026 return -EFAULT; 1027 1028 /* 1029 * Overcommit.. This must be the final test, as it will 1030 * update security statistics. 1031 */ 1032 if (security_vm_enough_memory_mm(mm, grow)) 1033 return -ENOMEM; 1034 1035 return 0; 1036 } 1037 1038 #if defined(CONFIG_STACK_GROWSUP) 1039 /* 1040 * PA-RISC uses this for its stack. 1041 * vma is the last one with address > vma->vm_end. Have to extend vma. 1042 */ 1043 static int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1044 { 1045 struct mm_struct *mm = vma->vm_mm; 1046 struct vm_area_struct *next; 1047 unsigned long gap_addr; 1048 int error = 0; 1049 VMA_ITERATOR(vmi, mm, vma->vm_start); 1050 1051 if (!(vma->vm_flags & VM_GROWSUP)) 1052 return -EFAULT; 1053 1054 /* Guard against exceeding limits of the address space. */ 1055 address &= PAGE_MASK; 1056 if (address >= (TASK_SIZE & PAGE_MASK)) 1057 return -ENOMEM; 1058 address += PAGE_SIZE; 1059 1060 /* Enforce stack_guard_gap */ 1061 gap_addr = address + stack_guard_gap; 1062 1063 /* Guard against overflow */ 1064 if (gap_addr < address || gap_addr > TASK_SIZE) 1065 gap_addr = TASK_SIZE; 1066 1067 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 1068 if (next && vma_is_accessible(next)) { 1069 if (!(next->vm_flags & VM_GROWSUP)) 1070 return -ENOMEM; 1071 /* Check that both stack segments have the same anon_vma? */ 1072 } 1073 1074 if (next) 1075 vma_iter_prev_range_limit(&vmi, address); 1076 1077 vma_iter_config(&vmi, vma->vm_start, address); 1078 if (vma_iter_prealloc(&vmi, vma)) 1079 return -ENOMEM; 1080 1081 /* We must make sure the anon_vma is allocated. */ 1082 if (unlikely(anon_vma_prepare(vma))) { 1083 vma_iter_free(&vmi); 1084 return -ENOMEM; 1085 } 1086 1087 /* Lock the VMA before expanding to prevent concurrent page faults */ 1088 vma_start_write(vma); 1089 /* 1090 * vma->vm_start/vm_end cannot change under us because the caller 1091 * is required to hold the mmap_lock in read mode. We need the 1092 * anon_vma lock to serialize against concurrent expand_stacks. 1093 */ 1094 anon_vma_lock_write(vma->anon_vma); 1095 1096 /* Somebody else might have raced and expanded it already */ 1097 if (address > vma->vm_end) { 1098 unsigned long size, grow; 1099 1100 size = address - vma->vm_start; 1101 grow = (address - vma->vm_end) >> PAGE_SHIFT; 1102 1103 error = -ENOMEM; 1104 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 1105 error = acct_stack_growth(vma, size, grow); 1106 if (!error) { 1107 /* 1108 * We only hold a shared mmap_lock lock here, so 1109 * we need to protect against concurrent vma 1110 * expansions. anon_vma_lock_write() doesn't 1111 * help here, as we don't guarantee that all 1112 * growable vmas in a mm share the same root 1113 * anon vma. So, we reuse mm->page_table_lock 1114 * to guard against concurrent vma expansions. 1115 */ 1116 spin_lock(&mm->page_table_lock); 1117 if (vma->vm_flags & VM_LOCKED) 1118 mm->locked_vm += grow; 1119 vm_stat_account(mm, vma->vm_flags, grow); 1120 anon_vma_interval_tree_pre_update_vma(vma); 1121 vma->vm_end = address; 1122 /* Overwrite old entry in mtree. */ 1123 vma_iter_store(&vmi, vma); 1124 anon_vma_interval_tree_post_update_vma(vma); 1125 spin_unlock(&mm->page_table_lock); 1126 1127 perf_event_mmap(vma); 1128 } 1129 } 1130 } 1131 anon_vma_unlock_write(vma->anon_vma); 1132 vma_iter_free(&vmi); 1133 validate_mm(mm); 1134 return error; 1135 } 1136 #endif /* CONFIG_STACK_GROWSUP */ 1137 1138 /* 1139 * vma is the first one with address < vma->vm_start. Have to extend vma. 1140 * mmap_lock held for writing. 1141 */ 1142 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 1143 { 1144 struct mm_struct *mm = vma->vm_mm; 1145 struct vm_area_struct *prev; 1146 int error = 0; 1147 VMA_ITERATOR(vmi, mm, vma->vm_start); 1148 1149 if (!(vma->vm_flags & VM_GROWSDOWN)) 1150 return -EFAULT; 1151 1152 address &= PAGE_MASK; 1153 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 1154 return -EPERM; 1155 1156 /* Enforce stack_guard_gap */ 1157 prev = vma_prev(&vmi); 1158 /* Check that both stack segments have the same anon_vma? */ 1159 if (prev) { 1160 if (!(prev->vm_flags & VM_GROWSDOWN) && 1161 vma_is_accessible(prev) && 1162 (address - prev->vm_end < stack_guard_gap)) 1163 return -ENOMEM; 1164 } 1165 1166 if (prev) 1167 vma_iter_next_range_limit(&vmi, vma->vm_start); 1168 1169 vma_iter_config(&vmi, address, vma->vm_end); 1170 if (vma_iter_prealloc(&vmi, vma)) 1171 return -ENOMEM; 1172 1173 /* We must make sure the anon_vma is allocated. */ 1174 if (unlikely(anon_vma_prepare(vma))) { 1175 vma_iter_free(&vmi); 1176 return -ENOMEM; 1177 } 1178 1179 /* Lock the VMA before expanding to prevent concurrent page faults */ 1180 vma_start_write(vma); 1181 /* 1182 * vma->vm_start/vm_end cannot change under us because the caller 1183 * is required to hold the mmap_lock in read mode. We need the 1184 * anon_vma lock to serialize against concurrent expand_stacks. 1185 */ 1186 anon_vma_lock_write(vma->anon_vma); 1187 1188 /* Somebody else might have raced and expanded it already */ 1189 if (address < vma->vm_start) { 1190 unsigned long size, grow; 1191 1192 size = vma->vm_end - address; 1193 grow = (vma->vm_start - address) >> PAGE_SHIFT; 1194 1195 error = -ENOMEM; 1196 if (grow <= vma->vm_pgoff) { 1197 error = acct_stack_growth(vma, size, grow); 1198 if (!error) { 1199 /* 1200 * We only hold a shared mmap_lock lock here, so 1201 * we need to protect against concurrent vma 1202 * expansions. anon_vma_lock_write() doesn't 1203 * help here, as we don't guarantee that all 1204 * growable vmas in a mm share the same root 1205 * anon vma. So, we reuse mm->page_table_lock 1206 * to guard against concurrent vma expansions. 1207 */ 1208 spin_lock(&mm->page_table_lock); 1209 if (vma->vm_flags & VM_LOCKED) 1210 mm->locked_vm += grow; 1211 vm_stat_account(mm, vma->vm_flags, grow); 1212 anon_vma_interval_tree_pre_update_vma(vma); 1213 vma->vm_start = address; 1214 vma->vm_pgoff -= grow; 1215 /* Overwrite old entry in mtree. */ 1216 vma_iter_store(&vmi, vma); 1217 anon_vma_interval_tree_post_update_vma(vma); 1218 spin_unlock(&mm->page_table_lock); 1219 1220 perf_event_mmap(vma); 1221 } 1222 } 1223 } 1224 anon_vma_unlock_write(vma->anon_vma); 1225 vma_iter_free(&vmi); 1226 validate_mm(mm); 1227 return error; 1228 } 1229 1230 /* enforced gap between the expanding stack and other mappings. */ 1231 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 1232 1233 static int __init cmdline_parse_stack_guard_gap(char *p) 1234 { 1235 unsigned long val; 1236 char *endptr; 1237 1238 val = simple_strtoul(p, &endptr, 10); 1239 if (!*endptr) 1240 stack_guard_gap = val << PAGE_SHIFT; 1241 1242 return 1; 1243 } 1244 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 1245 1246 #ifdef CONFIG_STACK_GROWSUP 1247 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 1248 { 1249 return expand_upwards(vma, address); 1250 } 1251 1252 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 1253 { 1254 struct vm_area_struct *vma, *prev; 1255 1256 addr &= PAGE_MASK; 1257 vma = find_vma_prev(mm, addr, &prev); 1258 if (vma && (vma->vm_start <= addr)) 1259 return vma; 1260 if (!prev) 1261 return NULL; 1262 if (expand_stack_locked(prev, addr)) 1263 return NULL; 1264 if (prev->vm_flags & VM_LOCKED) 1265 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 1266 return prev; 1267 } 1268 #else 1269 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 1270 { 1271 return expand_downwards(vma, address); 1272 } 1273 1274 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 1275 { 1276 struct vm_area_struct *vma; 1277 unsigned long start; 1278 1279 addr &= PAGE_MASK; 1280 vma = find_vma(mm, addr); 1281 if (!vma) 1282 return NULL; 1283 if (vma->vm_start <= addr) 1284 return vma; 1285 start = vma->vm_start; 1286 if (expand_stack_locked(vma, addr)) 1287 return NULL; 1288 if (vma->vm_flags & VM_LOCKED) 1289 populate_vma_page_range(vma, addr, start, NULL); 1290 return vma; 1291 } 1292 #endif 1293 1294 #if defined(CONFIG_STACK_GROWSUP) 1295 1296 #define vma_expand_up(vma,addr) expand_upwards(vma, addr) 1297 #define vma_expand_down(vma, addr) (-EFAULT) 1298 1299 #else 1300 1301 #define vma_expand_up(vma,addr) (-EFAULT) 1302 #define vma_expand_down(vma, addr) expand_downwards(vma, addr) 1303 1304 #endif 1305 1306 /* 1307 * expand_stack(): legacy interface for page faulting. Don't use unless 1308 * you have to. 1309 * 1310 * This is called with the mm locked for reading, drops the lock, takes 1311 * the lock for writing, tries to look up a vma again, expands it if 1312 * necessary, and downgrades the lock to reading again. 1313 * 1314 * If no vma is found or it can't be expanded, it returns NULL and has 1315 * dropped the lock. 1316 */ 1317 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) 1318 { 1319 struct vm_area_struct *vma, *prev; 1320 1321 mmap_read_unlock(mm); 1322 if (mmap_write_lock_killable(mm)) 1323 return NULL; 1324 1325 vma = find_vma_prev(mm, addr, &prev); 1326 if (vma && vma->vm_start <= addr) 1327 goto success; 1328 1329 if (prev && !vma_expand_up(prev, addr)) { 1330 vma = prev; 1331 goto success; 1332 } 1333 1334 if (vma && !vma_expand_down(vma, addr)) 1335 goto success; 1336 1337 mmap_write_unlock(mm); 1338 return NULL; 1339 1340 success: 1341 mmap_write_downgrade(mm); 1342 return vma; 1343 } 1344 1345 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. 1346 * @mm: The mm_struct 1347 * @start: The start address to munmap 1348 * @len: The length to be munmapped. 1349 * @uf: The userfaultfd list_head 1350 * 1351 * Return: 0 on success, error otherwise. 1352 */ 1353 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 1354 struct list_head *uf) 1355 { 1356 VMA_ITERATOR(vmi, mm, start); 1357 1358 return do_vmi_munmap(&vmi, mm, start, len, uf, false); 1359 } 1360 1361 unsigned long mmap_region(struct file *file, unsigned long addr, 1362 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 1363 struct list_head *uf) 1364 { 1365 struct mm_struct *mm = current->mm; 1366 struct vm_area_struct *vma = NULL; 1367 pgoff_t pglen = PHYS_PFN(len); 1368 struct vm_area_struct *merge; 1369 unsigned long charged = 0; 1370 struct vma_munmap_struct vms; 1371 struct ma_state mas_detach; 1372 struct maple_tree mt_detach; 1373 unsigned long end = addr + len; 1374 bool writable_file_mapping = false; 1375 int error = -ENOMEM; 1376 VMA_ITERATOR(vmi, mm, addr); 1377 VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff); 1378 1379 vmg.file = file; 1380 /* Find the first overlapping VMA */ 1381 vma = vma_find(&vmi, end); 1382 init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false); 1383 if (vma) { 1384 mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 1385 mt_on_stack(mt_detach); 1386 mas_init(&mas_detach, &mt_detach, /* addr = */ 0); 1387 /* Prepare to unmap any existing mapping in the area */ 1388 error = vms_gather_munmap_vmas(&vms, &mas_detach); 1389 if (error) 1390 goto gather_failed; 1391 1392 vmg.next = vms.next; 1393 vmg.prev = vms.prev; 1394 vma = NULL; 1395 } else { 1396 vmg.next = vma_iter_next_rewind(&vmi, &vmg.prev); 1397 } 1398 1399 /* Check against address space limit. */ 1400 if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages)) 1401 goto abort_munmap; 1402 1403 /* 1404 * Private writable mapping: check memory availability 1405 */ 1406 if (accountable_mapping(file, vm_flags)) { 1407 charged = pglen; 1408 charged -= vms.nr_accounted; 1409 if (charged && security_vm_enough_memory_mm(mm, charged)) 1410 goto abort_munmap; 1411 1412 vms.nr_accounted = 0; 1413 vm_flags |= VM_ACCOUNT; 1414 vmg.flags = vm_flags; 1415 } 1416 1417 vma = vma_merge_new_range(&vmg); 1418 if (vma) 1419 goto expanded; 1420 /* 1421 * Determine the object being mapped and call the appropriate 1422 * specific mapper. the address has already been validated, but 1423 * not unmapped, but the maps are removed from the list. 1424 */ 1425 vma = vm_area_alloc(mm); 1426 if (!vma) 1427 goto unacct_error; 1428 1429 vma_iter_config(&vmi, addr, end); 1430 vma_set_range(vma, addr, end, pgoff); 1431 vm_flags_init(vma, vm_flags); 1432 vma->vm_page_prot = vm_get_page_prot(vm_flags); 1433 1434 if (file) { 1435 vma->vm_file = get_file(file); 1436 /* 1437 * call_mmap() may map PTE, so ensure there are no existing PTEs 1438 * and call the vm_ops close function if one exists. 1439 */ 1440 vms_clean_up_area(&vms, &mas_detach); 1441 error = call_mmap(file, vma); 1442 if (error) 1443 goto unmap_and_free_vma; 1444 1445 if (vma_is_shared_maywrite(vma)) { 1446 error = mapping_map_writable(file->f_mapping); 1447 if (error) 1448 goto close_and_free_vma; 1449 1450 writable_file_mapping = true; 1451 } 1452 1453 /* 1454 * Expansion is handled above, merging is handled below. 1455 * Drivers should not alter the address of the VMA. 1456 */ 1457 error = -EINVAL; 1458 if (WARN_ON((addr != vma->vm_start))) 1459 goto close_and_free_vma; 1460 1461 vma_iter_config(&vmi, addr, end); 1462 /* 1463 * If vm_flags changed after call_mmap(), we should try merge 1464 * vma again as we may succeed this time. 1465 */ 1466 if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) { 1467 vmg.flags = vma->vm_flags; 1468 /* If this fails, state is reset ready for a reattempt. */ 1469 merge = vma_merge_new_range(&vmg); 1470 1471 if (merge) { 1472 /* 1473 * ->mmap() can change vma->vm_file and fput 1474 * the original file. So fput the vma->vm_file 1475 * here or we would add an extra fput for file 1476 * and cause general protection fault 1477 * ultimately. 1478 */ 1479 fput(vma->vm_file); 1480 vm_area_free(vma); 1481 vma = merge; 1482 /* Update vm_flags to pick up the change. */ 1483 vm_flags = vma->vm_flags; 1484 goto unmap_writable; 1485 } 1486 vma_iter_config(&vmi, addr, end); 1487 } 1488 1489 vm_flags = vma->vm_flags; 1490 } else if (vm_flags & VM_SHARED) { 1491 error = shmem_zero_setup(vma); 1492 if (error) 1493 goto free_vma; 1494 } else { 1495 vma_set_anonymous(vma); 1496 } 1497 1498 if (map_deny_write_exec(vma, vma->vm_flags)) { 1499 error = -EACCES; 1500 goto close_and_free_vma; 1501 } 1502 1503 /* Allow architectures to sanity-check the vm_flags */ 1504 error = -EINVAL; 1505 if (!arch_validate_flags(vma->vm_flags)) 1506 goto close_and_free_vma; 1507 1508 error = -ENOMEM; 1509 if (vma_iter_prealloc(&vmi, vma)) 1510 goto close_and_free_vma; 1511 1512 /* Lock the VMA since it is modified after insertion into VMA tree */ 1513 vma_start_write(vma); 1514 vma_iter_store(&vmi, vma); 1515 mm->map_count++; 1516 vma_link_file(vma); 1517 1518 /* 1519 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below 1520 * call covers the non-merge case. 1521 */ 1522 khugepaged_enter_vma(vma, vma->vm_flags); 1523 1524 /* Once vma denies write, undo our temporary denial count */ 1525 unmap_writable: 1526 if (writable_file_mapping) 1527 mapping_unmap_writable(file->f_mapping); 1528 file = vma->vm_file; 1529 ksm_add_vma(vma); 1530 expanded: 1531 perf_event_mmap(vma); 1532 1533 /* Unmap any existing mapping in the area */ 1534 vms_complete_munmap_vmas(&vms, &mas_detach); 1535 1536 vm_stat_account(mm, vm_flags, pglen); 1537 if (vm_flags & VM_LOCKED) { 1538 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 1539 is_vm_hugetlb_page(vma) || 1540 vma == get_gate_vma(current->mm)) 1541 vm_flags_clear(vma, VM_LOCKED_MASK); 1542 else 1543 mm->locked_vm += pglen; 1544 } 1545 1546 if (file) 1547 uprobe_mmap(vma); 1548 1549 /* 1550 * New (or expanded) vma always get soft dirty status. 1551 * Otherwise user-space soft-dirty page tracker won't 1552 * be able to distinguish situation when vma area unmapped, 1553 * then new mapped in-place (which must be aimed as 1554 * a completely new data area). 1555 */ 1556 vm_flags_set(vma, VM_SOFTDIRTY); 1557 1558 vma_set_page_prot(vma); 1559 1560 validate_mm(mm); 1561 return addr; 1562 1563 close_and_free_vma: 1564 if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close) 1565 vma->vm_ops->close(vma); 1566 1567 if (file || vma->vm_file) { 1568 unmap_and_free_vma: 1569 fput(vma->vm_file); 1570 vma->vm_file = NULL; 1571 1572 vma_iter_set(&vmi, vma->vm_end); 1573 /* Undo any partial mapping done by a device driver. */ 1574 unmap_region(&vmi.mas, vma, vmg.prev, vmg.next); 1575 } 1576 if (writable_file_mapping) 1577 mapping_unmap_writable(file->f_mapping); 1578 free_vma: 1579 vm_area_free(vma); 1580 unacct_error: 1581 if (charged) 1582 vm_unacct_memory(charged); 1583 1584 abort_munmap: 1585 vms_abort_munmap_vmas(&vms, &mas_detach); 1586 gather_failed: 1587 validate_mm(mm); 1588 return error; 1589 } 1590 1591 static int __vm_munmap(unsigned long start, size_t len, bool unlock) 1592 { 1593 int ret; 1594 struct mm_struct *mm = current->mm; 1595 LIST_HEAD(uf); 1596 VMA_ITERATOR(vmi, mm, start); 1597 1598 if (mmap_write_lock_killable(mm)) 1599 return -EINTR; 1600 1601 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 1602 if (ret || !unlock) 1603 mmap_write_unlock(mm); 1604 1605 userfaultfd_unmap_complete(mm, &uf); 1606 return ret; 1607 } 1608 1609 int vm_munmap(unsigned long start, size_t len) 1610 { 1611 return __vm_munmap(start, len, false); 1612 } 1613 EXPORT_SYMBOL(vm_munmap); 1614 1615 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1616 { 1617 addr = untagged_addr(addr); 1618 return __vm_munmap(addr, len, true); 1619 } 1620 1621 1622 /* 1623 * Emulation of deprecated remap_file_pages() syscall. 1624 */ 1625 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 1626 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 1627 { 1628 1629 struct mm_struct *mm = current->mm; 1630 struct vm_area_struct *vma; 1631 unsigned long populate = 0; 1632 unsigned long ret = -EINVAL; 1633 struct file *file; 1634 1635 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", 1636 current->comm, current->pid); 1637 1638 if (prot) 1639 return ret; 1640 start = start & PAGE_MASK; 1641 size = size & PAGE_MASK; 1642 1643 if (start + size <= start) 1644 return ret; 1645 1646 /* Does pgoff wrap? */ 1647 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 1648 return ret; 1649 1650 if (mmap_write_lock_killable(mm)) 1651 return -EINTR; 1652 1653 vma = vma_lookup(mm, start); 1654 1655 if (!vma || !(vma->vm_flags & VM_SHARED)) 1656 goto out; 1657 1658 if (start + size > vma->vm_end) { 1659 VMA_ITERATOR(vmi, mm, vma->vm_end); 1660 struct vm_area_struct *next, *prev = vma; 1661 1662 for_each_vma_range(vmi, next, start + size) { 1663 /* hole between vmas ? */ 1664 if (next->vm_start != prev->vm_end) 1665 goto out; 1666 1667 if (next->vm_file != vma->vm_file) 1668 goto out; 1669 1670 if (next->vm_flags != vma->vm_flags) 1671 goto out; 1672 1673 if (start + size <= next->vm_end) 1674 break; 1675 1676 prev = next; 1677 } 1678 1679 if (!next) 1680 goto out; 1681 } 1682 1683 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 1684 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 1685 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 1686 1687 flags &= MAP_NONBLOCK; 1688 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 1689 if (vma->vm_flags & VM_LOCKED) 1690 flags |= MAP_LOCKED; 1691 1692 file = get_file(vma->vm_file); 1693 ret = do_mmap(vma->vm_file, start, size, 1694 prot, flags, 0, pgoff, &populate, NULL); 1695 fput(file); 1696 out: 1697 mmap_write_unlock(mm); 1698 if (populate) 1699 mm_populate(ret, populate); 1700 if (!IS_ERR_VALUE(ret)) 1701 ret = 0; 1702 return ret; 1703 } 1704 1705 /* 1706 * do_brk_flags() - Increase the brk vma if the flags match. 1707 * @vmi: The vma iterator 1708 * @addr: The start address 1709 * @len: The length of the increase 1710 * @vma: The vma, 1711 * @flags: The VMA Flags 1712 * 1713 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 1714 * do not match then create a new anonymous VMA. Eventually we may be able to 1715 * do some brk-specific accounting here. 1716 */ 1717 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 1718 unsigned long addr, unsigned long len, unsigned long flags) 1719 { 1720 struct mm_struct *mm = current->mm; 1721 1722 /* 1723 * Check against address space limits by the changed size 1724 * Note: This happens *after* clearing old mappings in some code paths. 1725 */ 1726 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 1727 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 1728 return -ENOMEM; 1729 1730 if (mm->map_count > sysctl_max_map_count) 1731 return -ENOMEM; 1732 1733 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 1734 return -ENOMEM; 1735 1736 /* 1737 * Expand the existing vma if possible; Note that singular lists do not 1738 * occur after forking, so the expand will only happen on new VMAs. 1739 */ 1740 if (vma && vma->vm_end == addr) { 1741 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr)); 1742 1743 vmg.prev = vma; 1744 vma_iter_next_range(vmi); 1745 1746 if (vma_merge_new_range(&vmg)) 1747 goto out; 1748 else if (vmg_nomem(&vmg)) 1749 goto unacct_fail; 1750 } 1751 1752 if (vma) 1753 vma_iter_next_range(vmi); 1754 /* create a vma struct for an anonymous mapping */ 1755 vma = vm_area_alloc(mm); 1756 if (!vma) 1757 goto unacct_fail; 1758 1759 vma_set_anonymous(vma); 1760 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 1761 vm_flags_init(vma, flags); 1762 vma->vm_page_prot = vm_get_page_prot(flags); 1763 vma_start_write(vma); 1764 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 1765 goto mas_store_fail; 1766 1767 mm->map_count++; 1768 validate_mm(mm); 1769 ksm_add_vma(vma); 1770 out: 1771 perf_event_mmap(vma); 1772 mm->total_vm += len >> PAGE_SHIFT; 1773 mm->data_vm += len >> PAGE_SHIFT; 1774 if (flags & VM_LOCKED) 1775 mm->locked_vm += (len >> PAGE_SHIFT); 1776 vm_flags_set(vma, VM_SOFTDIRTY); 1777 return 0; 1778 1779 mas_store_fail: 1780 vm_area_free(vma); 1781 unacct_fail: 1782 vm_unacct_memory(len >> PAGE_SHIFT); 1783 return -ENOMEM; 1784 } 1785 1786 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 1787 { 1788 struct mm_struct *mm = current->mm; 1789 struct vm_area_struct *vma = NULL; 1790 unsigned long len; 1791 int ret; 1792 bool populate; 1793 LIST_HEAD(uf); 1794 VMA_ITERATOR(vmi, mm, addr); 1795 1796 len = PAGE_ALIGN(request); 1797 if (len < request) 1798 return -ENOMEM; 1799 if (!len) 1800 return 0; 1801 1802 /* Until we need other flags, refuse anything except VM_EXEC. */ 1803 if ((flags & (~VM_EXEC)) != 0) 1804 return -EINVAL; 1805 1806 if (mmap_write_lock_killable(mm)) 1807 return -EINTR; 1808 1809 ret = check_brk_limits(addr, len); 1810 if (ret) 1811 goto limits_failed; 1812 1813 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); 1814 if (ret) 1815 goto munmap_failed; 1816 1817 vma = vma_prev(&vmi); 1818 ret = do_brk_flags(&vmi, vma, addr, len, flags); 1819 populate = ((mm->def_flags & VM_LOCKED) != 0); 1820 mmap_write_unlock(mm); 1821 userfaultfd_unmap_complete(mm, &uf); 1822 if (populate && !ret) 1823 mm_populate(addr, len); 1824 return ret; 1825 1826 munmap_failed: 1827 limits_failed: 1828 mmap_write_unlock(mm); 1829 return ret; 1830 } 1831 EXPORT_SYMBOL(vm_brk_flags); 1832 1833 /* Release all mmaps. */ 1834 void exit_mmap(struct mm_struct *mm) 1835 { 1836 struct mmu_gather tlb; 1837 struct vm_area_struct *vma; 1838 unsigned long nr_accounted = 0; 1839 VMA_ITERATOR(vmi, mm, 0); 1840 int count = 0; 1841 1842 /* mm's last user has gone, and its about to be pulled down */ 1843 mmu_notifier_release(mm); 1844 1845 mmap_read_lock(mm); 1846 arch_exit_mmap(mm); 1847 1848 vma = vma_next(&vmi); 1849 if (!vma || unlikely(xa_is_zero(vma))) { 1850 /* Can happen if dup_mmap() received an OOM */ 1851 mmap_read_unlock(mm); 1852 mmap_write_lock(mm); 1853 goto destroy; 1854 } 1855 1856 lru_add_drain(); 1857 flush_cache_mm(mm); 1858 tlb_gather_mmu_fullmm(&tlb, mm); 1859 /* update_hiwater_rss(mm) here? but nobody should be looking */ 1860 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ 1861 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); 1862 mmap_read_unlock(mm); 1863 1864 /* 1865 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper 1866 * because the memory has been already freed. 1867 */ 1868 set_bit(MMF_OOM_SKIP, &mm->flags); 1869 mmap_write_lock(mm); 1870 mt_clear_in_rcu(&mm->mm_mt); 1871 vma_iter_set(&vmi, vma->vm_end); 1872 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, 1873 USER_PGTABLES_CEILING, true); 1874 tlb_finish_mmu(&tlb); 1875 1876 /* 1877 * Walk the list again, actually closing and freeing it, with preemption 1878 * enabled, without holding any MM locks besides the unreachable 1879 * mmap_write_lock. 1880 */ 1881 vma_iter_set(&vmi, vma->vm_end); 1882 do { 1883 if (vma->vm_flags & VM_ACCOUNT) 1884 nr_accounted += vma_pages(vma); 1885 remove_vma(vma, /* unreachable = */ true, /* closed = */ false); 1886 count++; 1887 cond_resched(); 1888 vma = vma_next(&vmi); 1889 } while (vma && likely(!xa_is_zero(vma))); 1890 1891 BUG_ON(count != mm->map_count); 1892 1893 trace_exit_mmap(mm); 1894 destroy: 1895 __mt_destroy(&mm->mm_mt); 1896 mmap_write_unlock(mm); 1897 vm_unacct_memory(nr_accounted); 1898 } 1899 1900 /* Insert vm structure into process list sorted by address 1901 * and into the inode's i_mmap tree. If vm_file is non-NULL 1902 * then i_mmap_rwsem is taken here. 1903 */ 1904 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 1905 { 1906 unsigned long charged = vma_pages(vma); 1907 1908 1909 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 1910 return -ENOMEM; 1911 1912 if ((vma->vm_flags & VM_ACCOUNT) && 1913 security_vm_enough_memory_mm(mm, charged)) 1914 return -ENOMEM; 1915 1916 /* 1917 * The vm_pgoff of a purely anonymous vma should be irrelevant 1918 * until its first write fault, when page's anon_vma and index 1919 * are set. But now set the vm_pgoff it will almost certainly 1920 * end up with (unless mremap moves it elsewhere before that 1921 * first wfault), so /proc/pid/maps tells a consistent story. 1922 * 1923 * By setting it to reflect the virtual start address of the 1924 * vma, merges and splits can happen in a seamless way, just 1925 * using the existing file pgoff checks and manipulations. 1926 * Similarly in do_mmap and in do_brk_flags. 1927 */ 1928 if (vma_is_anonymous(vma)) { 1929 BUG_ON(vma->anon_vma); 1930 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 1931 } 1932 1933 if (vma_link(mm, vma)) { 1934 if (vma->vm_flags & VM_ACCOUNT) 1935 vm_unacct_memory(charged); 1936 return -ENOMEM; 1937 } 1938 1939 return 0; 1940 } 1941 1942 /* 1943 * Return true if the calling process may expand its vm space by the passed 1944 * number of pages 1945 */ 1946 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 1947 { 1948 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 1949 return false; 1950 1951 if (is_data_mapping(flags) && 1952 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 1953 /* Workaround for Valgrind */ 1954 if (rlimit(RLIMIT_DATA) == 0 && 1955 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 1956 return true; 1957 1958 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 1959 current->comm, current->pid, 1960 (mm->data_vm + npages) << PAGE_SHIFT, 1961 rlimit(RLIMIT_DATA), 1962 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 1963 1964 if (!ignore_rlimit_data) 1965 return false; 1966 } 1967 1968 return true; 1969 } 1970 1971 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 1972 { 1973 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 1974 1975 if (is_exec_mapping(flags)) 1976 mm->exec_vm += npages; 1977 else if (is_stack_mapping(flags)) 1978 mm->stack_vm += npages; 1979 else if (is_data_mapping(flags)) 1980 mm->data_vm += npages; 1981 } 1982 1983 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 1984 1985 /* 1986 * Close hook, called for unmap() and on the old vma for mremap(). 1987 * 1988 * Having a close hook prevents vma merging regardless of flags. 1989 */ 1990 static void special_mapping_close(struct vm_area_struct *vma) 1991 { 1992 const struct vm_special_mapping *sm = vma->vm_private_data; 1993 1994 if (sm->close) 1995 sm->close(sm, vma); 1996 } 1997 1998 static const char *special_mapping_name(struct vm_area_struct *vma) 1999 { 2000 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 2001 } 2002 2003 static int special_mapping_mremap(struct vm_area_struct *new_vma) 2004 { 2005 struct vm_special_mapping *sm = new_vma->vm_private_data; 2006 2007 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 2008 return -EFAULT; 2009 2010 if (sm->mremap) 2011 return sm->mremap(sm, new_vma); 2012 2013 return 0; 2014 } 2015 2016 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 2017 { 2018 /* 2019 * Forbid splitting special mappings - kernel has expectations over 2020 * the number of pages in mapping. Together with VM_DONTEXPAND 2021 * the size of vma should stay the same over the special mapping's 2022 * lifetime. 2023 */ 2024 return -EINVAL; 2025 } 2026 2027 static const struct vm_operations_struct special_mapping_vmops = { 2028 .close = special_mapping_close, 2029 .fault = special_mapping_fault, 2030 .mremap = special_mapping_mremap, 2031 .name = special_mapping_name, 2032 /* vDSO code relies that VVAR can't be accessed remotely */ 2033 .access = NULL, 2034 .may_split = special_mapping_split, 2035 }; 2036 2037 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 2038 { 2039 struct vm_area_struct *vma = vmf->vma; 2040 pgoff_t pgoff; 2041 struct page **pages; 2042 struct vm_special_mapping *sm = vma->vm_private_data; 2043 2044 if (sm->fault) 2045 return sm->fault(sm, vmf->vma, vmf); 2046 2047 pages = sm->pages; 2048 2049 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 2050 pgoff--; 2051 2052 if (*pages) { 2053 struct page *page = *pages; 2054 get_page(page); 2055 vmf->page = page; 2056 return 0; 2057 } 2058 2059 return VM_FAULT_SIGBUS; 2060 } 2061 2062 static struct vm_area_struct *__install_special_mapping( 2063 struct mm_struct *mm, 2064 unsigned long addr, unsigned long len, 2065 unsigned long vm_flags, void *priv, 2066 const struct vm_operations_struct *ops) 2067 { 2068 int ret; 2069 struct vm_area_struct *vma; 2070 2071 vma = vm_area_alloc(mm); 2072 if (unlikely(vma == NULL)) 2073 return ERR_PTR(-ENOMEM); 2074 2075 vma_set_range(vma, addr, addr + len, 0); 2076 vm_flags_init(vma, (vm_flags | mm->def_flags | 2077 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); 2078 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2079 2080 vma->vm_ops = ops; 2081 vma->vm_private_data = priv; 2082 2083 ret = insert_vm_struct(mm, vma); 2084 if (ret) 2085 goto out; 2086 2087 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 2088 2089 perf_event_mmap(vma); 2090 2091 return vma; 2092 2093 out: 2094 vm_area_free(vma); 2095 return ERR_PTR(ret); 2096 } 2097 2098 bool vma_is_special_mapping(const struct vm_area_struct *vma, 2099 const struct vm_special_mapping *sm) 2100 { 2101 return vma->vm_private_data == sm && 2102 vma->vm_ops == &special_mapping_vmops; 2103 } 2104 2105 /* 2106 * Called with mm->mmap_lock held for writing. 2107 * Insert a new vma covering the given region, with the given flags. 2108 * Its pages are supplied by the given array of struct page *. 2109 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 2110 * The region past the last page supplied will always produce SIGBUS. 2111 * The array pointer and the pages it points to are assumed to stay alive 2112 * for as long as this mapping might exist. 2113 */ 2114 struct vm_area_struct *_install_special_mapping( 2115 struct mm_struct *mm, 2116 unsigned long addr, unsigned long len, 2117 unsigned long vm_flags, const struct vm_special_mapping *spec) 2118 { 2119 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 2120 &special_mapping_vmops); 2121 } 2122 2123 /* 2124 * initialise the percpu counter for VM 2125 */ 2126 void __init mmap_init(void) 2127 { 2128 int ret; 2129 2130 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 2131 VM_BUG_ON(ret); 2132 } 2133 2134 /* 2135 * Initialise sysctl_user_reserve_kbytes. 2136 * 2137 * This is intended to prevent a user from starting a single memory hogging 2138 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 2139 * mode. 2140 * 2141 * The default value is min(3% of free memory, 128MB) 2142 * 128MB is enough to recover with sshd/login, bash, and top/kill. 2143 */ 2144 static int init_user_reserve(void) 2145 { 2146 unsigned long free_kbytes; 2147 2148 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 2149 2150 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); 2151 return 0; 2152 } 2153 subsys_initcall(init_user_reserve); 2154 2155 /* 2156 * Initialise sysctl_admin_reserve_kbytes. 2157 * 2158 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 2159 * to log in and kill a memory hogging process. 2160 * 2161 * Systems with more than 256MB will reserve 8MB, enough to recover 2162 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 2163 * only reserve 3% of free pages by default. 2164 */ 2165 static int init_admin_reserve(void) 2166 { 2167 unsigned long free_kbytes; 2168 2169 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 2170 2171 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); 2172 return 0; 2173 } 2174 subsys_initcall(init_admin_reserve); 2175 2176 /* 2177 * Reinititalise user and admin reserves if memory is added or removed. 2178 * 2179 * The default user reserve max is 128MB, and the default max for the 2180 * admin reserve is 8MB. These are usually, but not always, enough to 2181 * enable recovery from a memory hogging process using login/sshd, a shell, 2182 * and tools like top. It may make sense to increase or even disable the 2183 * reserve depending on the existence of swap or variations in the recovery 2184 * tools. So, the admin may have changed them. 2185 * 2186 * If memory is added and the reserves have been eliminated or increased above 2187 * the default max, then we'll trust the admin. 2188 * 2189 * If memory is removed and there isn't enough free memory, then we 2190 * need to reset the reserves. 2191 * 2192 * Otherwise keep the reserve set by the admin. 2193 */ 2194 static int reserve_mem_notifier(struct notifier_block *nb, 2195 unsigned long action, void *data) 2196 { 2197 unsigned long tmp, free_kbytes; 2198 2199 switch (action) { 2200 case MEM_ONLINE: 2201 /* Default max is 128MB. Leave alone if modified by operator. */ 2202 tmp = sysctl_user_reserve_kbytes; 2203 if (tmp > 0 && tmp < SZ_128K) 2204 init_user_reserve(); 2205 2206 /* Default max is 8MB. Leave alone if modified by operator. */ 2207 tmp = sysctl_admin_reserve_kbytes; 2208 if (tmp > 0 && tmp < SZ_8K) 2209 init_admin_reserve(); 2210 2211 break; 2212 case MEM_OFFLINE: 2213 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 2214 2215 if (sysctl_user_reserve_kbytes > free_kbytes) { 2216 init_user_reserve(); 2217 pr_info("vm.user_reserve_kbytes reset to %lu\n", 2218 sysctl_user_reserve_kbytes); 2219 } 2220 2221 if (sysctl_admin_reserve_kbytes > free_kbytes) { 2222 init_admin_reserve(); 2223 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 2224 sysctl_admin_reserve_kbytes); 2225 } 2226 break; 2227 default: 2228 break; 2229 } 2230 return NOTIFY_OK; 2231 } 2232 2233 static int __meminit init_reserve_notifier(void) 2234 { 2235 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) 2236 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 2237 2238 return 0; 2239 } 2240 subsys_initcall(init_reserve_notifier); 2241 2242 /* 2243 * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between 2244 * this VMA and its relocated range, which will now reside at [vma->vm_start - 2245 * shift, vma->vm_end - shift). 2246 * 2247 * This function is almost certainly NOT what you want for anything other than 2248 * early executable temporary stack relocation. 2249 */ 2250 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) 2251 { 2252 /* 2253 * The process proceeds as follows: 2254 * 2255 * 1) Use shift to calculate the new vma endpoints. 2256 * 2) Extend vma to cover both the old and new ranges. This ensures the 2257 * arguments passed to subsequent functions are consistent. 2258 * 3) Move vma's page tables to the new range. 2259 * 4) Free up any cleared pgd range. 2260 * 5) Shrink the vma to cover only the new range. 2261 */ 2262 2263 struct mm_struct *mm = vma->vm_mm; 2264 unsigned long old_start = vma->vm_start; 2265 unsigned long old_end = vma->vm_end; 2266 unsigned long length = old_end - old_start; 2267 unsigned long new_start = old_start - shift; 2268 unsigned long new_end = old_end - shift; 2269 VMA_ITERATOR(vmi, mm, new_start); 2270 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); 2271 struct vm_area_struct *next; 2272 struct mmu_gather tlb; 2273 2274 BUG_ON(new_start > new_end); 2275 2276 /* 2277 * ensure there are no vmas between where we want to go 2278 * and where we are 2279 */ 2280 if (vma != vma_next(&vmi)) 2281 return -EFAULT; 2282 2283 vma_iter_prev_range(&vmi); 2284 /* 2285 * cover the whole range: [new_start, old_end) 2286 */ 2287 vmg.vma = vma; 2288 if (vma_expand(&vmg)) 2289 return -ENOMEM; 2290 2291 /* 2292 * move the page tables downwards, on failure we rely on 2293 * process cleanup to remove whatever mess we made. 2294 */ 2295 if (length != move_page_tables(vma, old_start, 2296 vma, new_start, length, false, true)) 2297 return -ENOMEM; 2298 2299 lru_add_drain(); 2300 tlb_gather_mmu(&tlb, mm); 2301 next = vma_next(&vmi); 2302 if (new_end > old_start) { 2303 /* 2304 * when the old and new regions overlap clear from new_end. 2305 */ 2306 free_pgd_range(&tlb, new_end, old_end, new_end, 2307 next ? next->vm_start : USER_PGTABLES_CEILING); 2308 } else { 2309 /* 2310 * otherwise, clean from old_start; this is done to not touch 2311 * the address space in [new_end, old_start) some architectures 2312 * have constraints on va-space that make this illegal (IA64) - 2313 * for the others its just a little faster. 2314 */ 2315 free_pgd_range(&tlb, old_start, old_end, new_end, 2316 next ? next->vm_start : USER_PGTABLES_CEILING); 2317 } 2318 tlb_finish_mmu(&tlb); 2319 2320 vma_prev(&vmi); 2321 /* Shrink the vma to just the new range */ 2322 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); 2323 } 2324