1 /* 2 * mm/mmap.c 3 * 4 * Written by obz. 5 * 6 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/backing-dev.h> 14 #include <linux/mm.h> 15 #include <linux/vmacache.h> 16 #include <linux/shm.h> 17 #include <linux/mman.h> 18 #include <linux/pagemap.h> 19 #include <linux/swap.h> 20 #include <linux/syscalls.h> 21 #include <linux/capability.h> 22 #include <linux/init.h> 23 #include <linux/file.h> 24 #include <linux/fs.h> 25 #include <linux/personality.h> 26 #include <linux/security.h> 27 #include <linux/hugetlb.h> 28 #include <linux/profile.h> 29 #include <linux/export.h> 30 #include <linux/mount.h> 31 #include <linux/mempolicy.h> 32 #include <linux/rmap.h> 33 #include <linux/mmu_notifier.h> 34 #include <linux/mmdebug.h> 35 #include <linux/perf_event.h> 36 #include <linux/audit.h> 37 #include <linux/khugepaged.h> 38 #include <linux/uprobes.h> 39 #include <linux/rbtree_augmented.h> 40 #include <linux/sched/sysctl.h> 41 #include <linux/notifier.h> 42 #include <linux/memory.h> 43 #include <linux/printk.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/moduleparam.h> 46 #include <linux/pkeys.h> 47 48 #include <asm/uaccess.h> 49 #include <asm/cacheflush.h> 50 #include <asm/tlb.h> 51 #include <asm/mmu_context.h> 52 53 #include "internal.h" 54 55 #ifndef arch_mmap_check 56 #define arch_mmap_check(addr, len, flags) (0) 57 #endif 58 59 #ifndef arch_rebalance_pgtables 60 #define arch_rebalance_pgtables(addr, len) (addr) 61 #endif 62 63 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 64 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 65 const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; 66 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 67 #endif 68 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 69 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 70 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 71 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 72 #endif 73 74 static bool ignore_rlimit_data = true; 75 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 76 77 static void unmap_region(struct mm_struct *mm, 78 struct vm_area_struct *vma, struct vm_area_struct *prev, 79 unsigned long start, unsigned long end); 80 81 /* description of effects of mapping type and prot in current implementation. 82 * this is due to the limited x86 page protection hardware. The expected 83 * behavior is in parens: 84 * 85 * map_type prot 86 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 87 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 88 * w: (no) no w: (no) no w: (yes) yes w: (no) no 89 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 90 * 91 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 92 * w: (no) no w: (no) no w: (copy) copy w: (no) no 93 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 94 * 95 */ 96 pgprot_t protection_map[16] = { 97 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, 98 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 99 }; 100 101 pgprot_t vm_get_page_prot(unsigned long vm_flags) 102 { 103 return __pgprot(pgprot_val(protection_map[vm_flags & 104 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | 105 pgprot_val(arch_vm_get_page_prot(vm_flags))); 106 } 107 EXPORT_SYMBOL(vm_get_page_prot); 108 109 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 110 { 111 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 112 } 113 114 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 115 void vma_set_page_prot(struct vm_area_struct *vma) 116 { 117 unsigned long vm_flags = vma->vm_flags; 118 119 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 120 if (vma_wants_writenotify(vma)) { 121 vm_flags &= ~VM_SHARED; 122 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, 123 vm_flags); 124 } 125 } 126 127 128 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */ 129 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */ 130 unsigned long sysctl_overcommit_kbytes __read_mostly; 131 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 132 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 133 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 134 /* 135 * Make sure vm_committed_as in one cacheline and not cacheline shared with 136 * other variables. It can be updated by several CPUs frequently. 137 */ 138 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; 139 140 /* 141 * The global memory commitment made in the system can be a metric 142 * that can be used to drive ballooning decisions when Linux is hosted 143 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 144 * balancing memory across competing virtual machines that are hosted. 145 * Several metrics drive this policy engine including the guest reported 146 * memory commitment. 147 */ 148 unsigned long vm_memory_committed(void) 149 { 150 return percpu_counter_read_positive(&vm_committed_as); 151 } 152 EXPORT_SYMBOL_GPL(vm_memory_committed); 153 154 /* 155 * Check that a process has enough memory to allocate a new virtual 156 * mapping. 0 means there is enough memory for the allocation to 157 * succeed and -ENOMEM implies there is not. 158 * 159 * We currently support three overcommit policies, which are set via the 160 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 161 * 162 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 163 * Additional code 2002 Jul 20 by Robert Love. 164 * 165 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 166 * 167 * Note this is a helper function intended to be used by LSMs which 168 * wish to use this logic. 169 */ 170 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 171 { 172 long free, allowed, reserve; 173 174 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < 175 -(s64)vm_committed_as_batch * num_online_cpus(), 176 "memory commitment underflow"); 177 178 vm_acct_memory(pages); 179 180 /* 181 * Sometimes we want to use more memory than we have 182 */ 183 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 184 return 0; 185 186 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 187 free = global_page_state(NR_FREE_PAGES); 188 free += global_page_state(NR_FILE_PAGES); 189 190 /* 191 * shmem pages shouldn't be counted as free in this 192 * case, they can't be purged, only swapped out, and 193 * that won't affect the overall amount of available 194 * memory in the system. 195 */ 196 free -= global_page_state(NR_SHMEM); 197 198 free += get_nr_swap_pages(); 199 200 /* 201 * Any slabs which are created with the 202 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 203 * which are reclaimable, under pressure. The dentry 204 * cache and most inode caches should fall into this 205 */ 206 free += global_page_state(NR_SLAB_RECLAIMABLE); 207 208 /* 209 * Leave reserved pages. The pages are not for anonymous pages. 210 */ 211 if (free <= totalreserve_pages) 212 goto error; 213 else 214 free -= totalreserve_pages; 215 216 /* 217 * Reserve some for root 218 */ 219 if (!cap_sys_admin) 220 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 221 222 if (free > pages) 223 return 0; 224 225 goto error; 226 } 227 228 allowed = vm_commit_limit(); 229 /* 230 * Reserve some for root 231 */ 232 if (!cap_sys_admin) 233 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 234 235 /* 236 * Don't let a single process grow so big a user can't recover 237 */ 238 if (mm) { 239 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 240 allowed -= min_t(long, mm->total_vm / 32, reserve); 241 } 242 243 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 244 return 0; 245 error: 246 vm_unacct_memory(pages); 247 248 return -ENOMEM; 249 } 250 251 /* 252 * Requires inode->i_mapping->i_mmap_rwsem 253 */ 254 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 255 struct file *file, struct address_space *mapping) 256 { 257 if (vma->vm_flags & VM_DENYWRITE) 258 atomic_inc(&file_inode(file)->i_writecount); 259 if (vma->vm_flags & VM_SHARED) 260 mapping_unmap_writable(mapping); 261 262 flush_dcache_mmap_lock(mapping); 263 vma_interval_tree_remove(vma, &mapping->i_mmap); 264 flush_dcache_mmap_unlock(mapping); 265 } 266 267 /* 268 * Unlink a file-based vm structure from its interval tree, to hide 269 * vma from rmap and vmtruncate before freeing its page tables. 270 */ 271 void unlink_file_vma(struct vm_area_struct *vma) 272 { 273 struct file *file = vma->vm_file; 274 275 if (file) { 276 struct address_space *mapping = file->f_mapping; 277 i_mmap_lock_write(mapping); 278 __remove_shared_vm_struct(vma, file, mapping); 279 i_mmap_unlock_write(mapping); 280 } 281 } 282 283 /* 284 * Close a vm structure and free it, returning the next. 285 */ 286 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) 287 { 288 struct vm_area_struct *next = vma->vm_next; 289 290 might_sleep(); 291 if (vma->vm_ops && vma->vm_ops->close) 292 vma->vm_ops->close(vma); 293 if (vma->vm_file) 294 fput(vma->vm_file); 295 mpol_put(vma_policy(vma)); 296 kmem_cache_free(vm_area_cachep, vma); 297 return next; 298 } 299 300 static unsigned long do_brk(unsigned long addr, unsigned long len); 301 302 SYSCALL_DEFINE1(brk, unsigned long, brk) 303 { 304 unsigned long retval; 305 unsigned long newbrk, oldbrk; 306 struct mm_struct *mm = current->mm; 307 unsigned long min_brk; 308 bool populate; 309 310 down_write(&mm->mmap_sem); 311 312 #ifdef CONFIG_COMPAT_BRK 313 /* 314 * CONFIG_COMPAT_BRK can still be overridden by setting 315 * randomize_va_space to 2, which will still cause mm->start_brk 316 * to be arbitrarily shifted 317 */ 318 if (current->brk_randomized) 319 min_brk = mm->start_brk; 320 else 321 min_brk = mm->end_data; 322 #else 323 min_brk = mm->start_brk; 324 #endif 325 if (brk < min_brk) 326 goto out; 327 328 /* 329 * Check against rlimit here. If this check is done later after the test 330 * of oldbrk with newbrk then it can escape the test and let the data 331 * segment grow beyond its set limit the in case where the limit is 332 * not page aligned -Ram Gupta 333 */ 334 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 335 mm->end_data, mm->start_data)) 336 goto out; 337 338 newbrk = PAGE_ALIGN(brk); 339 oldbrk = PAGE_ALIGN(mm->brk); 340 if (oldbrk == newbrk) 341 goto set_brk; 342 343 /* Always allow shrinking brk. */ 344 if (brk <= mm->brk) { 345 if (!do_munmap(mm, newbrk, oldbrk-newbrk)) 346 goto set_brk; 347 goto out; 348 } 349 350 /* Check against existing mmap mappings. */ 351 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 352 goto out; 353 354 /* Ok, looks good - let it rip. */ 355 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) 356 goto out; 357 358 set_brk: 359 mm->brk = brk; 360 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; 361 up_write(&mm->mmap_sem); 362 if (populate) 363 mm_populate(oldbrk, newbrk - oldbrk); 364 return brk; 365 366 out: 367 retval = mm->brk; 368 up_write(&mm->mmap_sem); 369 return retval; 370 } 371 372 static long vma_compute_subtree_gap(struct vm_area_struct *vma) 373 { 374 unsigned long max, subtree_gap; 375 max = vma->vm_start; 376 if (vma->vm_prev) 377 max -= vma->vm_prev->vm_end; 378 if (vma->vm_rb.rb_left) { 379 subtree_gap = rb_entry(vma->vm_rb.rb_left, 380 struct vm_area_struct, vm_rb)->rb_subtree_gap; 381 if (subtree_gap > max) 382 max = subtree_gap; 383 } 384 if (vma->vm_rb.rb_right) { 385 subtree_gap = rb_entry(vma->vm_rb.rb_right, 386 struct vm_area_struct, vm_rb)->rb_subtree_gap; 387 if (subtree_gap > max) 388 max = subtree_gap; 389 } 390 return max; 391 } 392 393 #ifdef CONFIG_DEBUG_VM_RB 394 static int browse_rb(struct mm_struct *mm) 395 { 396 struct rb_root *root = &mm->mm_rb; 397 int i = 0, j, bug = 0; 398 struct rb_node *nd, *pn = NULL; 399 unsigned long prev = 0, pend = 0; 400 401 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 402 struct vm_area_struct *vma; 403 vma = rb_entry(nd, struct vm_area_struct, vm_rb); 404 if (vma->vm_start < prev) { 405 pr_emerg("vm_start %lx < prev %lx\n", 406 vma->vm_start, prev); 407 bug = 1; 408 } 409 if (vma->vm_start < pend) { 410 pr_emerg("vm_start %lx < pend %lx\n", 411 vma->vm_start, pend); 412 bug = 1; 413 } 414 if (vma->vm_start > vma->vm_end) { 415 pr_emerg("vm_start %lx > vm_end %lx\n", 416 vma->vm_start, vma->vm_end); 417 bug = 1; 418 } 419 spin_lock(&mm->page_table_lock); 420 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { 421 pr_emerg("free gap %lx, correct %lx\n", 422 vma->rb_subtree_gap, 423 vma_compute_subtree_gap(vma)); 424 bug = 1; 425 } 426 spin_unlock(&mm->page_table_lock); 427 i++; 428 pn = nd; 429 prev = vma->vm_start; 430 pend = vma->vm_end; 431 } 432 j = 0; 433 for (nd = pn; nd; nd = rb_prev(nd)) 434 j++; 435 if (i != j) { 436 pr_emerg("backwards %d, forwards %d\n", j, i); 437 bug = 1; 438 } 439 return bug ? -1 : i; 440 } 441 442 static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) 443 { 444 struct rb_node *nd; 445 446 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 447 struct vm_area_struct *vma; 448 vma = rb_entry(nd, struct vm_area_struct, vm_rb); 449 VM_BUG_ON_VMA(vma != ignore && 450 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), 451 vma); 452 } 453 } 454 455 static void validate_mm(struct mm_struct *mm) 456 { 457 int bug = 0; 458 int i = 0; 459 unsigned long highest_address = 0; 460 struct vm_area_struct *vma = mm->mmap; 461 462 while (vma) { 463 struct anon_vma *anon_vma = vma->anon_vma; 464 struct anon_vma_chain *avc; 465 466 if (anon_vma) { 467 anon_vma_lock_read(anon_vma); 468 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 469 anon_vma_interval_tree_verify(avc); 470 anon_vma_unlock_read(anon_vma); 471 } 472 473 highest_address = vma->vm_end; 474 vma = vma->vm_next; 475 i++; 476 } 477 if (i != mm->map_count) { 478 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); 479 bug = 1; 480 } 481 if (highest_address != mm->highest_vm_end) { 482 pr_emerg("mm->highest_vm_end %lx, found %lx\n", 483 mm->highest_vm_end, highest_address); 484 bug = 1; 485 } 486 i = browse_rb(mm); 487 if (i != mm->map_count) { 488 if (i != -1) 489 pr_emerg("map_count %d rb %d\n", mm->map_count, i); 490 bug = 1; 491 } 492 VM_BUG_ON_MM(bug, mm); 493 } 494 #else 495 #define validate_mm_rb(root, ignore) do { } while (0) 496 #define validate_mm(mm) do { } while (0) 497 #endif 498 499 RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, 500 unsigned long, rb_subtree_gap, vma_compute_subtree_gap) 501 502 /* 503 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or 504 * vma->vm_prev->vm_end values changed, without modifying the vma's position 505 * in the rbtree. 506 */ 507 static void vma_gap_update(struct vm_area_struct *vma) 508 { 509 /* 510 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback 511 * function that does exacltly what we want. 512 */ 513 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); 514 } 515 516 static inline void vma_rb_insert(struct vm_area_struct *vma, 517 struct rb_root *root) 518 { 519 /* All rb_subtree_gap values must be consistent prior to insertion */ 520 validate_mm_rb(root, NULL); 521 522 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 523 } 524 525 static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) 526 { 527 /* 528 * All rb_subtree_gap values must be consistent prior to erase, 529 * with the possible exception of the vma being erased. 530 */ 531 validate_mm_rb(root, vma); 532 533 /* 534 * Note rb_erase_augmented is a fairly large inline function, 535 * so make sure we instantiate it only once with our desired 536 * augmented rbtree callbacks. 537 */ 538 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 539 } 540 541 /* 542 * vma has some anon_vma assigned, and is already inserted on that 543 * anon_vma's interval trees. 544 * 545 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 546 * vma must be removed from the anon_vma's interval trees using 547 * anon_vma_interval_tree_pre_update_vma(). 548 * 549 * After the update, the vma will be reinserted using 550 * anon_vma_interval_tree_post_update_vma(). 551 * 552 * The entire update must be protected by exclusive mmap_sem and by 553 * the root anon_vma's mutex. 554 */ 555 static inline void 556 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 557 { 558 struct anon_vma_chain *avc; 559 560 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 561 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 562 } 563 564 static inline void 565 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 566 { 567 struct anon_vma_chain *avc; 568 569 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 570 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 571 } 572 573 static int find_vma_links(struct mm_struct *mm, unsigned long addr, 574 unsigned long end, struct vm_area_struct **pprev, 575 struct rb_node ***rb_link, struct rb_node **rb_parent) 576 { 577 struct rb_node **__rb_link, *__rb_parent, *rb_prev; 578 579 __rb_link = &mm->mm_rb.rb_node; 580 rb_prev = __rb_parent = NULL; 581 582 while (*__rb_link) { 583 struct vm_area_struct *vma_tmp; 584 585 __rb_parent = *__rb_link; 586 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); 587 588 if (vma_tmp->vm_end > addr) { 589 /* Fail if an existing vma overlaps the area */ 590 if (vma_tmp->vm_start < end) 591 return -ENOMEM; 592 __rb_link = &__rb_parent->rb_left; 593 } else { 594 rb_prev = __rb_parent; 595 __rb_link = &__rb_parent->rb_right; 596 } 597 } 598 599 *pprev = NULL; 600 if (rb_prev) 601 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 602 *rb_link = __rb_link; 603 *rb_parent = __rb_parent; 604 return 0; 605 } 606 607 static unsigned long count_vma_pages_range(struct mm_struct *mm, 608 unsigned long addr, unsigned long end) 609 { 610 unsigned long nr_pages = 0; 611 struct vm_area_struct *vma; 612 613 /* Find first overlaping mapping */ 614 vma = find_vma_intersection(mm, addr, end); 615 if (!vma) 616 return 0; 617 618 nr_pages = (min(end, vma->vm_end) - 619 max(addr, vma->vm_start)) >> PAGE_SHIFT; 620 621 /* Iterate over the rest of the overlaps */ 622 for (vma = vma->vm_next; vma; vma = vma->vm_next) { 623 unsigned long overlap_len; 624 625 if (vma->vm_start > end) 626 break; 627 628 overlap_len = min(end, vma->vm_end) - vma->vm_start; 629 nr_pages += overlap_len >> PAGE_SHIFT; 630 } 631 632 return nr_pages; 633 } 634 635 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 636 struct rb_node **rb_link, struct rb_node *rb_parent) 637 { 638 /* Update tracking information for the gap following the new vma. */ 639 if (vma->vm_next) 640 vma_gap_update(vma->vm_next); 641 else 642 mm->highest_vm_end = vma->vm_end; 643 644 /* 645 * vma->vm_prev wasn't known when we followed the rbtree to find the 646 * correct insertion point for that vma. As a result, we could not 647 * update the vma vm_rb parents rb_subtree_gap values on the way down. 648 * So, we first insert the vma with a zero rb_subtree_gap value 649 * (to be consistent with what we did on the way down), and then 650 * immediately update the gap to the correct value. Finally we 651 * rebalance the rbtree after all augmented values have been set. 652 */ 653 rb_link_node(&vma->vm_rb, rb_parent, rb_link); 654 vma->rb_subtree_gap = 0; 655 vma_gap_update(vma); 656 vma_rb_insert(vma, &mm->mm_rb); 657 } 658 659 static void __vma_link_file(struct vm_area_struct *vma) 660 { 661 struct file *file; 662 663 file = vma->vm_file; 664 if (file) { 665 struct address_space *mapping = file->f_mapping; 666 667 if (vma->vm_flags & VM_DENYWRITE) 668 atomic_dec(&file_inode(file)->i_writecount); 669 if (vma->vm_flags & VM_SHARED) 670 atomic_inc(&mapping->i_mmap_writable); 671 672 flush_dcache_mmap_lock(mapping); 673 vma_interval_tree_insert(vma, &mapping->i_mmap); 674 flush_dcache_mmap_unlock(mapping); 675 } 676 } 677 678 static void 679 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 680 struct vm_area_struct *prev, struct rb_node **rb_link, 681 struct rb_node *rb_parent) 682 { 683 __vma_link_list(mm, vma, prev, rb_parent); 684 __vma_link_rb(mm, vma, rb_link, rb_parent); 685 } 686 687 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 688 struct vm_area_struct *prev, struct rb_node **rb_link, 689 struct rb_node *rb_parent) 690 { 691 struct address_space *mapping = NULL; 692 693 if (vma->vm_file) { 694 mapping = vma->vm_file->f_mapping; 695 i_mmap_lock_write(mapping); 696 } 697 698 __vma_link(mm, vma, prev, rb_link, rb_parent); 699 __vma_link_file(vma); 700 701 if (mapping) 702 i_mmap_unlock_write(mapping); 703 704 mm->map_count++; 705 validate_mm(mm); 706 } 707 708 /* 709 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the 710 * mm's list and rbtree. It has already been inserted into the interval tree. 711 */ 712 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 713 { 714 struct vm_area_struct *prev; 715 struct rb_node **rb_link, *rb_parent; 716 717 if (find_vma_links(mm, vma->vm_start, vma->vm_end, 718 &prev, &rb_link, &rb_parent)) 719 BUG(); 720 __vma_link(mm, vma, prev, rb_link, rb_parent); 721 mm->map_count++; 722 } 723 724 static inline void 725 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, 726 struct vm_area_struct *prev) 727 { 728 struct vm_area_struct *next; 729 730 vma_rb_erase(vma, &mm->mm_rb); 731 prev->vm_next = next = vma->vm_next; 732 if (next) 733 next->vm_prev = prev; 734 735 /* Kill the cache */ 736 vmacache_invalidate(mm); 737 } 738 739 /* 740 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that 741 * is already present in an i_mmap tree without adjusting the tree. 742 * The following helper function should be used when such adjustments 743 * are necessary. The "insert" vma (if any) is to be inserted 744 * before we drop the necessary locks. 745 */ 746 int vma_adjust(struct vm_area_struct *vma, unsigned long start, 747 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 748 { 749 struct mm_struct *mm = vma->vm_mm; 750 struct vm_area_struct *next = vma->vm_next; 751 struct vm_area_struct *importer = NULL; 752 struct address_space *mapping = NULL; 753 struct rb_root *root = NULL; 754 struct anon_vma *anon_vma = NULL; 755 struct file *file = vma->vm_file; 756 bool start_changed = false, end_changed = false; 757 long adjust_next = 0; 758 int remove_next = 0; 759 760 if (next && !insert) { 761 struct vm_area_struct *exporter = NULL; 762 763 if (end >= next->vm_end) { 764 /* 765 * vma expands, overlapping all the next, and 766 * perhaps the one after too (mprotect case 6). 767 */ 768 again: remove_next = 1 + (end > next->vm_end); 769 end = next->vm_end; 770 exporter = next; 771 importer = vma; 772 } else if (end > next->vm_start) { 773 /* 774 * vma expands, overlapping part of the next: 775 * mprotect case 5 shifting the boundary up. 776 */ 777 adjust_next = (end - next->vm_start) >> PAGE_SHIFT; 778 exporter = next; 779 importer = vma; 780 } else if (end < vma->vm_end) { 781 /* 782 * vma shrinks, and !insert tells it's not 783 * split_vma inserting another: so it must be 784 * mprotect case 4 shifting the boundary down. 785 */ 786 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); 787 exporter = vma; 788 importer = next; 789 } 790 791 /* 792 * Easily overlooked: when mprotect shifts the boundary, 793 * make sure the expanding vma has anon_vma set if the 794 * shrinking vma had, to cover any anon pages imported. 795 */ 796 if (exporter && exporter->anon_vma && !importer->anon_vma) { 797 int error; 798 799 importer->anon_vma = exporter->anon_vma; 800 error = anon_vma_clone(importer, exporter); 801 if (error) 802 return error; 803 } 804 } 805 806 if (file) { 807 mapping = file->f_mapping; 808 root = &mapping->i_mmap; 809 uprobe_munmap(vma, vma->vm_start, vma->vm_end); 810 811 if (adjust_next) 812 uprobe_munmap(next, next->vm_start, next->vm_end); 813 814 i_mmap_lock_write(mapping); 815 if (insert) { 816 /* 817 * Put into interval tree now, so instantiated pages 818 * are visible to arm/parisc __flush_dcache_page 819 * throughout; but we cannot insert into address 820 * space until vma start or end is updated. 821 */ 822 __vma_link_file(insert); 823 } 824 } 825 826 vma_adjust_trans_huge(vma, start, end, adjust_next); 827 828 anon_vma = vma->anon_vma; 829 if (!anon_vma && adjust_next) 830 anon_vma = next->anon_vma; 831 if (anon_vma) { 832 VM_BUG_ON_VMA(adjust_next && next->anon_vma && 833 anon_vma != next->anon_vma, next); 834 anon_vma_lock_write(anon_vma); 835 anon_vma_interval_tree_pre_update_vma(vma); 836 if (adjust_next) 837 anon_vma_interval_tree_pre_update_vma(next); 838 } 839 840 if (root) { 841 flush_dcache_mmap_lock(mapping); 842 vma_interval_tree_remove(vma, root); 843 if (adjust_next) 844 vma_interval_tree_remove(next, root); 845 } 846 847 if (start != vma->vm_start) { 848 vma->vm_start = start; 849 start_changed = true; 850 } 851 if (end != vma->vm_end) { 852 vma->vm_end = end; 853 end_changed = true; 854 } 855 vma->vm_pgoff = pgoff; 856 if (adjust_next) { 857 next->vm_start += adjust_next << PAGE_SHIFT; 858 next->vm_pgoff += adjust_next; 859 } 860 861 if (root) { 862 if (adjust_next) 863 vma_interval_tree_insert(next, root); 864 vma_interval_tree_insert(vma, root); 865 flush_dcache_mmap_unlock(mapping); 866 } 867 868 if (remove_next) { 869 /* 870 * vma_merge has merged next into vma, and needs 871 * us to remove next before dropping the locks. 872 */ 873 __vma_unlink(mm, next, vma); 874 if (file) 875 __remove_shared_vm_struct(next, file, mapping); 876 } else if (insert) { 877 /* 878 * split_vma has split insert from vma, and needs 879 * us to insert it before dropping the locks 880 * (it may either follow vma or precede it). 881 */ 882 __insert_vm_struct(mm, insert); 883 } else { 884 if (start_changed) 885 vma_gap_update(vma); 886 if (end_changed) { 887 if (!next) 888 mm->highest_vm_end = end; 889 else if (!adjust_next) 890 vma_gap_update(next); 891 } 892 } 893 894 if (anon_vma) { 895 anon_vma_interval_tree_post_update_vma(vma); 896 if (adjust_next) 897 anon_vma_interval_tree_post_update_vma(next); 898 anon_vma_unlock_write(anon_vma); 899 } 900 if (mapping) 901 i_mmap_unlock_write(mapping); 902 903 if (root) { 904 uprobe_mmap(vma); 905 906 if (adjust_next) 907 uprobe_mmap(next); 908 } 909 910 if (remove_next) { 911 if (file) { 912 uprobe_munmap(next, next->vm_start, next->vm_end); 913 fput(file); 914 } 915 if (next->anon_vma) 916 anon_vma_merge(vma, next); 917 mm->map_count--; 918 mpol_put(vma_policy(next)); 919 kmem_cache_free(vm_area_cachep, next); 920 /* 921 * In mprotect's case 6 (see comments on vma_merge), 922 * we must remove another next too. It would clutter 923 * up the code too much to do both in one go. 924 */ 925 next = vma->vm_next; 926 if (remove_next == 2) 927 goto again; 928 else if (next) 929 vma_gap_update(next); 930 else 931 mm->highest_vm_end = end; 932 } 933 if (insert && file) 934 uprobe_mmap(insert); 935 936 validate_mm(mm); 937 938 return 0; 939 } 940 941 /* 942 * If the vma has a ->close operation then the driver probably needs to release 943 * per-vma resources, so we don't attempt to merge those. 944 */ 945 static inline int is_mergeable_vma(struct vm_area_struct *vma, 946 struct file *file, unsigned long vm_flags, 947 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 948 { 949 /* 950 * VM_SOFTDIRTY should not prevent from VMA merging, if we 951 * match the flags but dirty bit -- the caller should mark 952 * merged VMA as dirty. If dirty bit won't be excluded from 953 * comparison, we increase pressue on the memory system forcing 954 * the kernel to generate new VMAs when old one could be 955 * extended instead. 956 */ 957 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 958 return 0; 959 if (vma->vm_file != file) 960 return 0; 961 if (vma->vm_ops && vma->vm_ops->close) 962 return 0; 963 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) 964 return 0; 965 return 1; 966 } 967 968 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, 969 struct anon_vma *anon_vma2, 970 struct vm_area_struct *vma) 971 { 972 /* 973 * The list_is_singular() test is to avoid merging VMA cloned from 974 * parents. This can improve scalability caused by anon_vma lock. 975 */ 976 if ((!anon_vma1 || !anon_vma2) && (!vma || 977 list_is_singular(&vma->anon_vma_chain))) 978 return 1; 979 return anon_vma1 == anon_vma2; 980 } 981 982 /* 983 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 984 * in front of (at a lower virtual address and file offset than) the vma. 985 * 986 * We cannot merge two vmas if they have differently assigned (non-NULL) 987 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 988 * 989 * We don't check here for the merged mmap wrapping around the end of pagecache 990 * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which 991 * wrap, nor mmaps which cover the final page at index -1UL. 992 */ 993 static int 994 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 995 struct anon_vma *anon_vma, struct file *file, 996 pgoff_t vm_pgoff, 997 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 998 { 999 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && 1000 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 1001 if (vma->vm_pgoff == vm_pgoff) 1002 return 1; 1003 } 1004 return 0; 1005 } 1006 1007 /* 1008 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 1009 * beyond (at a higher virtual address and file offset than) the vma. 1010 * 1011 * We cannot merge two vmas if they have differently assigned (non-NULL) 1012 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 1013 */ 1014 static int 1015 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 1016 struct anon_vma *anon_vma, struct file *file, 1017 pgoff_t vm_pgoff, 1018 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 1019 { 1020 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && 1021 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 1022 pgoff_t vm_pglen; 1023 vm_pglen = vma_pages(vma); 1024 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 1025 return 1; 1026 } 1027 return 0; 1028 } 1029 1030 /* 1031 * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out 1032 * whether that can be merged with its predecessor or its successor. 1033 * Or both (it neatly fills a hole). 1034 * 1035 * In most cases - when called for mmap, brk or mremap - [addr,end) is 1036 * certain not to be mapped by the time vma_merge is called; but when 1037 * called for mprotect, it is certain to be already mapped (either at 1038 * an offset within prev, or at the start of next), and the flags of 1039 * this area are about to be changed to vm_flags - and the no-change 1040 * case has already been eliminated. 1041 * 1042 * The following mprotect cases have to be considered, where AAAA is 1043 * the area passed down from mprotect_fixup, never extending beyond one 1044 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: 1045 * 1046 * AAAA AAAA AAAA AAAA 1047 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX 1048 * cannot merge might become might become might become 1049 * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or 1050 * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or 1051 * mremap move: PPPPNNNNNNNN 8 1052 * AAAA 1053 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN 1054 * might become case 1 below case 2 below case 3 below 1055 * 1056 * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX: 1057 * mprotect_fixup updates vm_flags & vm_page_prot on successful return. 1058 */ 1059 struct vm_area_struct *vma_merge(struct mm_struct *mm, 1060 struct vm_area_struct *prev, unsigned long addr, 1061 unsigned long end, unsigned long vm_flags, 1062 struct anon_vma *anon_vma, struct file *file, 1063 pgoff_t pgoff, struct mempolicy *policy, 1064 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 1065 { 1066 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 1067 struct vm_area_struct *area, *next; 1068 int err; 1069 1070 /* 1071 * We later require that vma->vm_flags == vm_flags, 1072 * so this tests vma->vm_flags & VM_SPECIAL, too. 1073 */ 1074 if (vm_flags & VM_SPECIAL) 1075 return NULL; 1076 1077 if (prev) 1078 next = prev->vm_next; 1079 else 1080 next = mm->mmap; 1081 area = next; 1082 if (next && next->vm_end == end) /* cases 6, 7, 8 */ 1083 next = next->vm_next; 1084 1085 /* 1086 * Can it merge with the predecessor? 1087 */ 1088 if (prev && prev->vm_end == addr && 1089 mpol_equal(vma_policy(prev), policy) && 1090 can_vma_merge_after(prev, vm_flags, 1091 anon_vma, file, pgoff, 1092 vm_userfaultfd_ctx)) { 1093 /* 1094 * OK, it can. Can we now merge in the successor as well? 1095 */ 1096 if (next && end == next->vm_start && 1097 mpol_equal(policy, vma_policy(next)) && 1098 can_vma_merge_before(next, vm_flags, 1099 anon_vma, file, 1100 pgoff+pglen, 1101 vm_userfaultfd_ctx) && 1102 is_mergeable_anon_vma(prev->anon_vma, 1103 next->anon_vma, NULL)) { 1104 /* cases 1, 6 */ 1105 err = vma_adjust(prev, prev->vm_start, 1106 next->vm_end, prev->vm_pgoff, NULL); 1107 } else /* cases 2, 5, 7 */ 1108 err = vma_adjust(prev, prev->vm_start, 1109 end, prev->vm_pgoff, NULL); 1110 if (err) 1111 return NULL; 1112 khugepaged_enter_vma_merge(prev, vm_flags); 1113 return prev; 1114 } 1115 1116 /* 1117 * Can this new request be merged in front of next? 1118 */ 1119 if (next && end == next->vm_start && 1120 mpol_equal(policy, vma_policy(next)) && 1121 can_vma_merge_before(next, vm_flags, 1122 anon_vma, file, pgoff+pglen, 1123 vm_userfaultfd_ctx)) { 1124 if (prev && addr < prev->vm_end) /* case 4 */ 1125 err = vma_adjust(prev, prev->vm_start, 1126 addr, prev->vm_pgoff, NULL); 1127 else /* cases 3, 8 */ 1128 err = vma_adjust(area, addr, next->vm_end, 1129 next->vm_pgoff - pglen, NULL); 1130 if (err) 1131 return NULL; 1132 khugepaged_enter_vma_merge(area, vm_flags); 1133 return area; 1134 } 1135 1136 return NULL; 1137 } 1138 1139 /* 1140 * Rough compatbility check to quickly see if it's even worth looking 1141 * at sharing an anon_vma. 1142 * 1143 * They need to have the same vm_file, and the flags can only differ 1144 * in things that mprotect may change. 1145 * 1146 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1147 * we can merge the two vma's. For example, we refuse to merge a vma if 1148 * there is a vm_ops->close() function, because that indicates that the 1149 * driver is doing some kind of reference counting. But that doesn't 1150 * really matter for the anon_vma sharing case. 1151 */ 1152 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1153 { 1154 return a->vm_end == b->vm_start && 1155 mpol_equal(vma_policy(a), vma_policy(b)) && 1156 a->vm_file == b->vm_file && 1157 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && 1158 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1159 } 1160 1161 /* 1162 * Do some basic sanity checking to see if we can re-use the anon_vma 1163 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1164 * the same as 'old', the other will be the new one that is trying 1165 * to share the anon_vma. 1166 * 1167 * NOTE! This runs with mm_sem held for reading, so it is possible that 1168 * the anon_vma of 'old' is concurrently in the process of being set up 1169 * by another page fault trying to merge _that_. But that's ok: if it 1170 * is being set up, that automatically means that it will be a singleton 1171 * acceptable for merging, so we can do all of this optimistically. But 1172 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1173 * 1174 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1175 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1176 * is to return an anon_vma that is "complex" due to having gone through 1177 * a fork). 1178 * 1179 * We also make sure that the two vma's are compatible (adjacent, 1180 * and with the same memory policies). That's all stable, even with just 1181 * a read lock on the mm_sem. 1182 */ 1183 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1184 { 1185 if (anon_vma_compatible(a, b)) { 1186 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1187 1188 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1189 return anon_vma; 1190 } 1191 return NULL; 1192 } 1193 1194 /* 1195 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1196 * neighbouring vmas for a suitable anon_vma, before it goes off 1197 * to allocate a new anon_vma. It checks because a repetitive 1198 * sequence of mprotects and faults may otherwise lead to distinct 1199 * anon_vmas being allocated, preventing vma merge in subsequent 1200 * mprotect. 1201 */ 1202 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1203 { 1204 struct anon_vma *anon_vma; 1205 struct vm_area_struct *near; 1206 1207 near = vma->vm_next; 1208 if (!near) 1209 goto try_prev; 1210 1211 anon_vma = reusable_anon_vma(near, vma, near); 1212 if (anon_vma) 1213 return anon_vma; 1214 try_prev: 1215 near = vma->vm_prev; 1216 if (!near) 1217 goto none; 1218 1219 anon_vma = reusable_anon_vma(near, near, vma); 1220 if (anon_vma) 1221 return anon_vma; 1222 none: 1223 /* 1224 * There's no absolute need to look only at touching neighbours: 1225 * we could search further afield for "compatible" anon_vmas. 1226 * But it would probably just be a waste of time searching, 1227 * or lead to too many vmas hanging off the same anon_vma. 1228 * We're trying to allow mprotect remerging later on, 1229 * not trying to minimize memory used for anon_vmas. 1230 */ 1231 return NULL; 1232 } 1233 1234 /* 1235 * If a hint addr is less than mmap_min_addr change hint to be as 1236 * low as possible but still greater than mmap_min_addr 1237 */ 1238 static inline unsigned long round_hint_to_min(unsigned long hint) 1239 { 1240 hint &= PAGE_MASK; 1241 if (((void *)hint != NULL) && 1242 (hint < mmap_min_addr)) 1243 return PAGE_ALIGN(mmap_min_addr); 1244 return hint; 1245 } 1246 1247 static inline int mlock_future_check(struct mm_struct *mm, 1248 unsigned long flags, 1249 unsigned long len) 1250 { 1251 unsigned long locked, lock_limit; 1252 1253 /* mlock MCL_FUTURE? */ 1254 if (flags & VM_LOCKED) { 1255 locked = len >> PAGE_SHIFT; 1256 locked += mm->locked_vm; 1257 lock_limit = rlimit(RLIMIT_MEMLOCK); 1258 lock_limit >>= PAGE_SHIFT; 1259 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 1260 return -EAGAIN; 1261 } 1262 return 0; 1263 } 1264 1265 /* 1266 * The caller must hold down_write(¤t->mm->mmap_sem). 1267 */ 1268 unsigned long do_mmap(struct file *file, unsigned long addr, 1269 unsigned long len, unsigned long prot, 1270 unsigned long flags, vm_flags_t vm_flags, 1271 unsigned long pgoff, unsigned long *populate) 1272 { 1273 struct mm_struct *mm = current->mm; 1274 int pkey = 0; 1275 1276 *populate = 0; 1277 1278 if (!len) 1279 return -EINVAL; 1280 1281 /* 1282 * Does the application expect PROT_READ to imply PROT_EXEC? 1283 * 1284 * (the exception is when the underlying filesystem is noexec 1285 * mounted, in which case we dont add PROT_EXEC.) 1286 */ 1287 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 1288 if (!(file && path_noexec(&file->f_path))) 1289 prot |= PROT_EXEC; 1290 1291 if (!(flags & MAP_FIXED)) 1292 addr = round_hint_to_min(addr); 1293 1294 /* Careful about overflows.. */ 1295 len = PAGE_ALIGN(len); 1296 if (!len) 1297 return -ENOMEM; 1298 1299 /* offset overflow? */ 1300 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 1301 return -EOVERFLOW; 1302 1303 /* Too many mappings? */ 1304 if (mm->map_count > sysctl_max_map_count) 1305 return -ENOMEM; 1306 1307 /* Obtain the address to map to. we verify (or select) it and ensure 1308 * that it represents a valid section of the address space. 1309 */ 1310 addr = get_unmapped_area(file, addr, len, pgoff, flags); 1311 if (offset_in_page(addr)) 1312 return addr; 1313 1314 if (prot == PROT_EXEC) { 1315 pkey = execute_only_pkey(mm); 1316 if (pkey < 0) 1317 pkey = 0; 1318 } 1319 1320 /* Do simple checking here so the lower-level routines won't have 1321 * to. we assume access permissions have been handled by the open 1322 * of the memory object, so we don't do any here. 1323 */ 1324 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 1325 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1326 1327 if (flags & MAP_LOCKED) 1328 if (!can_do_mlock()) 1329 return -EPERM; 1330 1331 if (mlock_future_check(mm, vm_flags, len)) 1332 return -EAGAIN; 1333 1334 if (file) { 1335 struct inode *inode = file_inode(file); 1336 1337 switch (flags & MAP_TYPE) { 1338 case MAP_SHARED: 1339 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) 1340 return -EACCES; 1341 1342 /* 1343 * Make sure we don't allow writing to an append-only 1344 * file.. 1345 */ 1346 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 1347 return -EACCES; 1348 1349 /* 1350 * Make sure there are no mandatory locks on the file. 1351 */ 1352 if (locks_verify_locked(file)) 1353 return -EAGAIN; 1354 1355 vm_flags |= VM_SHARED | VM_MAYSHARE; 1356 if (!(file->f_mode & FMODE_WRITE)) 1357 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 1358 1359 /* fall through */ 1360 case MAP_PRIVATE: 1361 if (!(file->f_mode & FMODE_READ)) 1362 return -EACCES; 1363 if (path_noexec(&file->f_path)) { 1364 if (vm_flags & VM_EXEC) 1365 return -EPERM; 1366 vm_flags &= ~VM_MAYEXEC; 1367 } 1368 1369 if (!file->f_op->mmap) 1370 return -ENODEV; 1371 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1372 return -EINVAL; 1373 break; 1374 1375 default: 1376 return -EINVAL; 1377 } 1378 } else { 1379 switch (flags & MAP_TYPE) { 1380 case MAP_SHARED: 1381 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1382 return -EINVAL; 1383 /* 1384 * Ignore pgoff. 1385 */ 1386 pgoff = 0; 1387 vm_flags |= VM_SHARED | VM_MAYSHARE; 1388 break; 1389 case MAP_PRIVATE: 1390 /* 1391 * Set pgoff according to addr for anon_vma. 1392 */ 1393 pgoff = addr >> PAGE_SHIFT; 1394 break; 1395 default: 1396 return -EINVAL; 1397 } 1398 } 1399 1400 /* 1401 * Set 'VM_NORESERVE' if we should not account for the 1402 * memory use of this mapping. 1403 */ 1404 if (flags & MAP_NORESERVE) { 1405 /* We honor MAP_NORESERVE if allowed to overcommit */ 1406 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1407 vm_flags |= VM_NORESERVE; 1408 1409 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 1410 if (file && is_file_hugepages(file)) 1411 vm_flags |= VM_NORESERVE; 1412 } 1413 1414 addr = mmap_region(file, addr, len, vm_flags, pgoff); 1415 if (!IS_ERR_VALUE(addr) && 1416 ((vm_flags & VM_LOCKED) || 1417 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 1418 *populate = len; 1419 return addr; 1420 } 1421 1422 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1423 unsigned long, prot, unsigned long, flags, 1424 unsigned long, fd, unsigned long, pgoff) 1425 { 1426 struct file *file = NULL; 1427 unsigned long retval; 1428 1429 if (!(flags & MAP_ANONYMOUS)) { 1430 audit_mmap_fd(fd, flags); 1431 file = fget(fd); 1432 if (!file) 1433 return -EBADF; 1434 if (is_file_hugepages(file)) 1435 len = ALIGN(len, huge_page_size(hstate_file(file))); 1436 retval = -EINVAL; 1437 if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) 1438 goto out_fput; 1439 } else if (flags & MAP_HUGETLB) { 1440 struct user_struct *user = NULL; 1441 struct hstate *hs; 1442 1443 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK); 1444 if (!hs) 1445 return -EINVAL; 1446 1447 len = ALIGN(len, huge_page_size(hs)); 1448 /* 1449 * VM_NORESERVE is used because the reservations will be 1450 * taken when vm_ops->mmap() is called 1451 * A dummy user value is used because we are not locking 1452 * memory so no accounting is necessary 1453 */ 1454 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 1455 VM_NORESERVE, 1456 &user, HUGETLB_ANONHUGE_INODE, 1457 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1458 if (IS_ERR(file)) 1459 return PTR_ERR(file); 1460 } 1461 1462 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1463 1464 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1465 out_fput: 1466 if (file) 1467 fput(file); 1468 return retval; 1469 } 1470 1471 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1472 struct mmap_arg_struct { 1473 unsigned long addr; 1474 unsigned long len; 1475 unsigned long prot; 1476 unsigned long flags; 1477 unsigned long fd; 1478 unsigned long offset; 1479 }; 1480 1481 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1482 { 1483 struct mmap_arg_struct a; 1484 1485 if (copy_from_user(&a, arg, sizeof(a))) 1486 return -EFAULT; 1487 if (offset_in_page(a.offset)) 1488 return -EINVAL; 1489 1490 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1491 a.offset >> PAGE_SHIFT); 1492 } 1493 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1494 1495 /* 1496 * Some shared mappigns will want the pages marked read-only 1497 * to track write events. If so, we'll downgrade vm_page_prot 1498 * to the private version (using protection_map[] without the 1499 * VM_SHARED bit). 1500 */ 1501 int vma_wants_writenotify(struct vm_area_struct *vma) 1502 { 1503 vm_flags_t vm_flags = vma->vm_flags; 1504 const struct vm_operations_struct *vm_ops = vma->vm_ops; 1505 1506 /* If it was private or non-writable, the write bit is already clear */ 1507 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) 1508 return 0; 1509 1510 /* The backer wishes to know when pages are first written to? */ 1511 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) 1512 return 1; 1513 1514 /* The open routine did something to the protections that pgprot_modify 1515 * won't preserve? */ 1516 if (pgprot_val(vma->vm_page_prot) != 1517 pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags))) 1518 return 0; 1519 1520 /* Do we need to track softdirty? */ 1521 if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) 1522 return 1; 1523 1524 /* Specialty mapping? */ 1525 if (vm_flags & VM_PFNMAP) 1526 return 0; 1527 1528 /* Can the mapping track the dirty pages? */ 1529 return vma->vm_file && vma->vm_file->f_mapping && 1530 mapping_cap_account_dirty(vma->vm_file->f_mapping); 1531 } 1532 1533 /* 1534 * We account for memory if it's a private writeable mapping, 1535 * not hugepages and VM_NORESERVE wasn't set. 1536 */ 1537 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) 1538 { 1539 /* 1540 * hugetlb has its own accounting separate from the core VM 1541 * VM_HUGETLB may not be set yet so we cannot check for that flag. 1542 */ 1543 if (file && is_file_hugepages(file)) 1544 return 0; 1545 1546 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1547 } 1548 1549 unsigned long mmap_region(struct file *file, unsigned long addr, 1550 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) 1551 { 1552 struct mm_struct *mm = current->mm; 1553 struct vm_area_struct *vma, *prev; 1554 int error; 1555 struct rb_node **rb_link, *rb_parent; 1556 unsigned long charged = 0; 1557 1558 /* Check against address space limit. */ 1559 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { 1560 unsigned long nr_pages; 1561 1562 /* 1563 * MAP_FIXED may remove pages of mappings that intersects with 1564 * requested mapping. Account for the pages it would unmap. 1565 */ 1566 nr_pages = count_vma_pages_range(mm, addr, addr + len); 1567 1568 if (!may_expand_vm(mm, vm_flags, 1569 (len >> PAGE_SHIFT) - nr_pages)) 1570 return -ENOMEM; 1571 } 1572 1573 /* Clear old maps */ 1574 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, 1575 &rb_parent)) { 1576 if (do_munmap(mm, addr, len)) 1577 return -ENOMEM; 1578 } 1579 1580 /* 1581 * Private writable mapping: check memory availability 1582 */ 1583 if (accountable_mapping(file, vm_flags)) { 1584 charged = len >> PAGE_SHIFT; 1585 if (security_vm_enough_memory_mm(mm, charged)) 1586 return -ENOMEM; 1587 vm_flags |= VM_ACCOUNT; 1588 } 1589 1590 /* 1591 * Can we just expand an old mapping? 1592 */ 1593 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, 1594 NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); 1595 if (vma) 1596 goto out; 1597 1598 /* 1599 * Determine the object being mapped and call the appropriate 1600 * specific mapper. the address has already been validated, but 1601 * not unmapped, but the maps are removed from the list. 1602 */ 1603 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 1604 if (!vma) { 1605 error = -ENOMEM; 1606 goto unacct_error; 1607 } 1608 1609 vma->vm_mm = mm; 1610 vma->vm_start = addr; 1611 vma->vm_end = addr + len; 1612 vma->vm_flags = vm_flags; 1613 vma->vm_page_prot = vm_get_page_prot(vm_flags); 1614 vma->vm_pgoff = pgoff; 1615 INIT_LIST_HEAD(&vma->anon_vma_chain); 1616 1617 if (file) { 1618 if (vm_flags & VM_DENYWRITE) { 1619 error = deny_write_access(file); 1620 if (error) 1621 goto free_vma; 1622 } 1623 if (vm_flags & VM_SHARED) { 1624 error = mapping_map_writable(file->f_mapping); 1625 if (error) 1626 goto allow_write_and_free_vma; 1627 } 1628 1629 /* ->mmap() can change vma->vm_file, but must guarantee that 1630 * vma_link() below can deny write-access if VM_DENYWRITE is set 1631 * and map writably if VM_SHARED is set. This usually means the 1632 * new file must not have been exposed to user-space, yet. 1633 */ 1634 vma->vm_file = get_file(file); 1635 error = file->f_op->mmap(file, vma); 1636 if (error) 1637 goto unmap_and_free_vma; 1638 1639 /* Can addr have changed?? 1640 * 1641 * Answer: Yes, several device drivers can do it in their 1642 * f_op->mmap method. -DaveM 1643 * Bug: If addr is changed, prev, rb_link, rb_parent should 1644 * be updated for vma_link() 1645 */ 1646 WARN_ON_ONCE(addr != vma->vm_start); 1647 1648 addr = vma->vm_start; 1649 vm_flags = vma->vm_flags; 1650 } else if (vm_flags & VM_SHARED) { 1651 error = shmem_zero_setup(vma); 1652 if (error) 1653 goto free_vma; 1654 } 1655 1656 vma_link(mm, vma, prev, rb_link, rb_parent); 1657 /* Once vma denies write, undo our temporary denial count */ 1658 if (file) { 1659 if (vm_flags & VM_SHARED) 1660 mapping_unmap_writable(file->f_mapping); 1661 if (vm_flags & VM_DENYWRITE) 1662 allow_write_access(file); 1663 } 1664 file = vma->vm_file; 1665 out: 1666 perf_event_mmap(vma); 1667 1668 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); 1669 if (vm_flags & VM_LOCKED) { 1670 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || 1671 vma == get_gate_vma(current->mm))) 1672 mm->locked_vm += (len >> PAGE_SHIFT); 1673 else 1674 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 1675 } 1676 1677 if (file) 1678 uprobe_mmap(vma); 1679 1680 /* 1681 * New (or expanded) vma always get soft dirty status. 1682 * Otherwise user-space soft-dirty page tracker won't 1683 * be able to distinguish situation when vma area unmapped, 1684 * then new mapped in-place (which must be aimed as 1685 * a completely new data area). 1686 */ 1687 vma->vm_flags |= VM_SOFTDIRTY; 1688 1689 vma_set_page_prot(vma); 1690 1691 return addr; 1692 1693 unmap_and_free_vma: 1694 vma->vm_file = NULL; 1695 fput(file); 1696 1697 /* Undo any partial mapping done by a device driver. */ 1698 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); 1699 charged = 0; 1700 if (vm_flags & VM_SHARED) 1701 mapping_unmap_writable(file->f_mapping); 1702 allow_write_and_free_vma: 1703 if (vm_flags & VM_DENYWRITE) 1704 allow_write_access(file); 1705 free_vma: 1706 kmem_cache_free(vm_area_cachep, vma); 1707 unacct_error: 1708 if (charged) 1709 vm_unacct_memory(charged); 1710 return error; 1711 } 1712 1713 unsigned long unmapped_area(struct vm_unmapped_area_info *info) 1714 { 1715 /* 1716 * We implement the search by looking for an rbtree node that 1717 * immediately follows a suitable gap. That is, 1718 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; 1719 * - gap_end = vma->vm_start >= info->low_limit + length; 1720 * - gap_end - gap_start >= length 1721 */ 1722 1723 struct mm_struct *mm = current->mm; 1724 struct vm_area_struct *vma; 1725 unsigned long length, low_limit, high_limit, gap_start, gap_end; 1726 1727 /* Adjust search length to account for worst case alignment overhead */ 1728 length = info->length + info->align_mask; 1729 if (length < info->length) 1730 return -ENOMEM; 1731 1732 /* Adjust search limits by the desired length */ 1733 if (info->high_limit < length) 1734 return -ENOMEM; 1735 high_limit = info->high_limit - length; 1736 1737 if (info->low_limit > high_limit) 1738 return -ENOMEM; 1739 low_limit = info->low_limit + length; 1740 1741 /* Check if rbtree root looks promising */ 1742 if (RB_EMPTY_ROOT(&mm->mm_rb)) 1743 goto check_highest; 1744 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); 1745 if (vma->rb_subtree_gap < length) 1746 goto check_highest; 1747 1748 while (true) { 1749 /* Visit left subtree if it looks promising */ 1750 gap_end = vma->vm_start; 1751 if (gap_end >= low_limit && vma->vm_rb.rb_left) { 1752 struct vm_area_struct *left = 1753 rb_entry(vma->vm_rb.rb_left, 1754 struct vm_area_struct, vm_rb); 1755 if (left->rb_subtree_gap >= length) { 1756 vma = left; 1757 continue; 1758 } 1759 } 1760 1761 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1762 check_current: 1763 /* Check if current node has a suitable gap */ 1764 if (gap_start > high_limit) 1765 return -ENOMEM; 1766 if (gap_end >= low_limit && gap_end - gap_start >= length) 1767 goto found; 1768 1769 /* Visit right subtree if it looks promising */ 1770 if (vma->vm_rb.rb_right) { 1771 struct vm_area_struct *right = 1772 rb_entry(vma->vm_rb.rb_right, 1773 struct vm_area_struct, vm_rb); 1774 if (right->rb_subtree_gap >= length) { 1775 vma = right; 1776 continue; 1777 } 1778 } 1779 1780 /* Go back up the rbtree to find next candidate node */ 1781 while (true) { 1782 struct rb_node *prev = &vma->vm_rb; 1783 if (!rb_parent(prev)) 1784 goto check_highest; 1785 vma = rb_entry(rb_parent(prev), 1786 struct vm_area_struct, vm_rb); 1787 if (prev == vma->vm_rb.rb_left) { 1788 gap_start = vma->vm_prev->vm_end; 1789 gap_end = vma->vm_start; 1790 goto check_current; 1791 } 1792 } 1793 } 1794 1795 check_highest: 1796 /* Check highest gap, which does not precede any rbtree node */ 1797 gap_start = mm->highest_vm_end; 1798 gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ 1799 if (gap_start > high_limit) 1800 return -ENOMEM; 1801 1802 found: 1803 /* We found a suitable gap. Clip it with the original low_limit. */ 1804 if (gap_start < info->low_limit) 1805 gap_start = info->low_limit; 1806 1807 /* Adjust gap address to the desired alignment */ 1808 gap_start += (info->align_offset - gap_start) & info->align_mask; 1809 1810 VM_BUG_ON(gap_start + info->length > info->high_limit); 1811 VM_BUG_ON(gap_start + info->length > gap_end); 1812 return gap_start; 1813 } 1814 1815 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 1816 { 1817 struct mm_struct *mm = current->mm; 1818 struct vm_area_struct *vma; 1819 unsigned long length, low_limit, high_limit, gap_start, gap_end; 1820 1821 /* Adjust search length to account for worst case alignment overhead */ 1822 length = info->length + info->align_mask; 1823 if (length < info->length) 1824 return -ENOMEM; 1825 1826 /* 1827 * Adjust search limits by the desired length. 1828 * See implementation comment at top of unmapped_area(). 1829 */ 1830 gap_end = info->high_limit; 1831 if (gap_end < length) 1832 return -ENOMEM; 1833 high_limit = gap_end - length; 1834 1835 if (info->low_limit > high_limit) 1836 return -ENOMEM; 1837 low_limit = info->low_limit + length; 1838 1839 /* Check highest gap, which does not precede any rbtree node */ 1840 gap_start = mm->highest_vm_end; 1841 if (gap_start <= high_limit) 1842 goto found_highest; 1843 1844 /* Check if rbtree root looks promising */ 1845 if (RB_EMPTY_ROOT(&mm->mm_rb)) 1846 return -ENOMEM; 1847 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); 1848 if (vma->rb_subtree_gap < length) 1849 return -ENOMEM; 1850 1851 while (true) { 1852 /* Visit right subtree if it looks promising */ 1853 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1854 if (gap_start <= high_limit && vma->vm_rb.rb_right) { 1855 struct vm_area_struct *right = 1856 rb_entry(vma->vm_rb.rb_right, 1857 struct vm_area_struct, vm_rb); 1858 if (right->rb_subtree_gap >= length) { 1859 vma = right; 1860 continue; 1861 } 1862 } 1863 1864 check_current: 1865 /* Check if current node has a suitable gap */ 1866 gap_end = vma->vm_start; 1867 if (gap_end < low_limit) 1868 return -ENOMEM; 1869 if (gap_start <= high_limit && gap_end - gap_start >= length) 1870 goto found; 1871 1872 /* Visit left subtree if it looks promising */ 1873 if (vma->vm_rb.rb_left) { 1874 struct vm_area_struct *left = 1875 rb_entry(vma->vm_rb.rb_left, 1876 struct vm_area_struct, vm_rb); 1877 if (left->rb_subtree_gap >= length) { 1878 vma = left; 1879 continue; 1880 } 1881 } 1882 1883 /* Go back up the rbtree to find next candidate node */ 1884 while (true) { 1885 struct rb_node *prev = &vma->vm_rb; 1886 if (!rb_parent(prev)) 1887 return -ENOMEM; 1888 vma = rb_entry(rb_parent(prev), 1889 struct vm_area_struct, vm_rb); 1890 if (prev == vma->vm_rb.rb_right) { 1891 gap_start = vma->vm_prev ? 1892 vma->vm_prev->vm_end : 0; 1893 goto check_current; 1894 } 1895 } 1896 } 1897 1898 found: 1899 /* We found a suitable gap. Clip it with the original high_limit. */ 1900 if (gap_end > info->high_limit) 1901 gap_end = info->high_limit; 1902 1903 found_highest: 1904 /* Compute highest gap address at the desired alignment */ 1905 gap_end -= info->length; 1906 gap_end -= (gap_end - info->align_offset) & info->align_mask; 1907 1908 VM_BUG_ON(gap_end < info->low_limit); 1909 VM_BUG_ON(gap_end < gap_start); 1910 return gap_end; 1911 } 1912 1913 /* Get an address range which is currently unmapped. 1914 * For shmat() with addr=0. 1915 * 1916 * Ugly calling convention alert: 1917 * Return value with the low bits set means error value, 1918 * ie 1919 * if (ret & ~PAGE_MASK) 1920 * error = ret; 1921 * 1922 * This function "knows" that -ENOMEM has the bits set. 1923 */ 1924 #ifndef HAVE_ARCH_UNMAPPED_AREA 1925 unsigned long 1926 arch_get_unmapped_area(struct file *filp, unsigned long addr, 1927 unsigned long len, unsigned long pgoff, unsigned long flags) 1928 { 1929 struct mm_struct *mm = current->mm; 1930 struct vm_area_struct *vma; 1931 struct vm_unmapped_area_info info; 1932 1933 if (len > TASK_SIZE - mmap_min_addr) 1934 return -ENOMEM; 1935 1936 if (flags & MAP_FIXED) 1937 return addr; 1938 1939 if (addr) { 1940 addr = PAGE_ALIGN(addr); 1941 vma = find_vma(mm, addr); 1942 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 1943 (!vma || addr + len <= vma->vm_start)) 1944 return addr; 1945 } 1946 1947 info.flags = 0; 1948 info.length = len; 1949 info.low_limit = mm->mmap_base; 1950 info.high_limit = TASK_SIZE; 1951 info.align_mask = 0; 1952 return vm_unmapped_area(&info); 1953 } 1954 #endif 1955 1956 /* 1957 * This mmap-allocator allocates new areas top-down from below the 1958 * stack's low limit (the base): 1959 */ 1960 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1961 unsigned long 1962 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 1963 const unsigned long len, const unsigned long pgoff, 1964 const unsigned long flags) 1965 { 1966 struct vm_area_struct *vma; 1967 struct mm_struct *mm = current->mm; 1968 unsigned long addr = addr0; 1969 struct vm_unmapped_area_info info; 1970 1971 /* requested length too big for entire address space */ 1972 if (len > TASK_SIZE - mmap_min_addr) 1973 return -ENOMEM; 1974 1975 if (flags & MAP_FIXED) 1976 return addr; 1977 1978 /* requesting a specific address */ 1979 if (addr) { 1980 addr = PAGE_ALIGN(addr); 1981 vma = find_vma(mm, addr); 1982 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 1983 (!vma || addr + len <= vma->vm_start)) 1984 return addr; 1985 } 1986 1987 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 1988 info.length = len; 1989 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 1990 info.high_limit = mm->mmap_base; 1991 info.align_mask = 0; 1992 addr = vm_unmapped_area(&info); 1993 1994 /* 1995 * A failed mmap() very likely causes application failure, 1996 * so fall back to the bottom-up function here. This scenario 1997 * can happen with large stack limits and large mmap() 1998 * allocations. 1999 */ 2000 if (offset_in_page(addr)) { 2001 VM_BUG_ON(addr != -ENOMEM); 2002 info.flags = 0; 2003 info.low_limit = TASK_UNMAPPED_BASE; 2004 info.high_limit = TASK_SIZE; 2005 addr = vm_unmapped_area(&info); 2006 } 2007 2008 return addr; 2009 } 2010 #endif 2011 2012 unsigned long 2013 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 2014 unsigned long pgoff, unsigned long flags) 2015 { 2016 unsigned long (*get_area)(struct file *, unsigned long, 2017 unsigned long, unsigned long, unsigned long); 2018 2019 unsigned long error = arch_mmap_check(addr, len, flags); 2020 if (error) 2021 return error; 2022 2023 /* Careful about overflows.. */ 2024 if (len > TASK_SIZE) 2025 return -ENOMEM; 2026 2027 get_area = current->mm->get_unmapped_area; 2028 if (file && file->f_op->get_unmapped_area) 2029 get_area = file->f_op->get_unmapped_area; 2030 addr = get_area(file, addr, len, pgoff, flags); 2031 if (IS_ERR_VALUE(addr)) 2032 return addr; 2033 2034 if (addr > TASK_SIZE - len) 2035 return -ENOMEM; 2036 if (offset_in_page(addr)) 2037 return -EINVAL; 2038 2039 addr = arch_rebalance_pgtables(addr, len); 2040 error = security_mmap_addr(addr); 2041 return error ? error : addr; 2042 } 2043 2044 EXPORT_SYMBOL(get_unmapped_area); 2045 2046 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2047 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 2048 { 2049 struct rb_node *rb_node; 2050 struct vm_area_struct *vma; 2051 2052 /* Check the cache first. */ 2053 vma = vmacache_find(mm, addr); 2054 if (likely(vma)) 2055 return vma; 2056 2057 rb_node = mm->mm_rb.rb_node; 2058 2059 while (rb_node) { 2060 struct vm_area_struct *tmp; 2061 2062 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); 2063 2064 if (tmp->vm_end > addr) { 2065 vma = tmp; 2066 if (tmp->vm_start <= addr) 2067 break; 2068 rb_node = rb_node->rb_left; 2069 } else 2070 rb_node = rb_node->rb_right; 2071 } 2072 2073 if (vma) 2074 vmacache_update(addr, vma); 2075 return vma; 2076 } 2077 2078 EXPORT_SYMBOL(find_vma); 2079 2080 /* 2081 * Same as find_vma, but also return a pointer to the previous VMA in *pprev. 2082 */ 2083 struct vm_area_struct * 2084 find_vma_prev(struct mm_struct *mm, unsigned long addr, 2085 struct vm_area_struct **pprev) 2086 { 2087 struct vm_area_struct *vma; 2088 2089 vma = find_vma(mm, addr); 2090 if (vma) { 2091 *pprev = vma->vm_prev; 2092 } else { 2093 struct rb_node *rb_node = mm->mm_rb.rb_node; 2094 *pprev = NULL; 2095 while (rb_node) { 2096 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); 2097 rb_node = rb_node->rb_right; 2098 } 2099 } 2100 return vma; 2101 } 2102 2103 /* 2104 * Verify that the stack growth is acceptable and 2105 * update accounting. This is shared with both the 2106 * grow-up and grow-down cases. 2107 */ 2108 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) 2109 { 2110 struct mm_struct *mm = vma->vm_mm; 2111 struct rlimit *rlim = current->signal->rlim; 2112 unsigned long new_start, actual_size; 2113 2114 /* address space limit tests */ 2115 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2116 return -ENOMEM; 2117 2118 /* Stack limit test */ 2119 actual_size = size; 2120 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) 2121 actual_size -= PAGE_SIZE; 2122 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2123 return -ENOMEM; 2124 2125 /* mlock limit tests */ 2126 if (vma->vm_flags & VM_LOCKED) { 2127 unsigned long locked; 2128 unsigned long limit; 2129 locked = mm->locked_vm + grow; 2130 limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); 2131 limit >>= PAGE_SHIFT; 2132 if (locked > limit && !capable(CAP_IPC_LOCK)) 2133 return -ENOMEM; 2134 } 2135 2136 /* Check to ensure the stack will not grow into a hugetlb-only region */ 2137 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 2138 vma->vm_end - size; 2139 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 2140 return -EFAULT; 2141 2142 /* 2143 * Overcommit.. This must be the final test, as it will 2144 * update security statistics. 2145 */ 2146 if (security_vm_enough_memory_mm(mm, grow)) 2147 return -ENOMEM; 2148 2149 return 0; 2150 } 2151 2152 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) 2153 /* 2154 * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 2155 * vma is the last one with address > vma->vm_end. Have to extend vma. 2156 */ 2157 int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2158 { 2159 struct mm_struct *mm = vma->vm_mm; 2160 int error = 0; 2161 2162 if (!(vma->vm_flags & VM_GROWSUP)) 2163 return -EFAULT; 2164 2165 /* Guard against wrapping around to address 0. */ 2166 if (address < PAGE_ALIGN(address+4)) 2167 address = PAGE_ALIGN(address+4); 2168 else 2169 return -ENOMEM; 2170 2171 /* We must make sure the anon_vma is allocated. */ 2172 if (unlikely(anon_vma_prepare(vma))) 2173 return -ENOMEM; 2174 2175 /* 2176 * vma->vm_start/vm_end cannot change under us because the caller 2177 * is required to hold the mmap_sem in read mode. We need the 2178 * anon_vma lock to serialize against concurrent expand_stacks. 2179 */ 2180 anon_vma_lock_write(vma->anon_vma); 2181 2182 /* Somebody else might have raced and expanded it already */ 2183 if (address > vma->vm_end) { 2184 unsigned long size, grow; 2185 2186 size = address - vma->vm_start; 2187 grow = (address - vma->vm_end) >> PAGE_SHIFT; 2188 2189 error = -ENOMEM; 2190 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 2191 error = acct_stack_growth(vma, size, grow); 2192 if (!error) { 2193 /* 2194 * vma_gap_update() doesn't support concurrent 2195 * updates, but we only hold a shared mmap_sem 2196 * lock here, so we need to protect against 2197 * concurrent vma expansions. 2198 * anon_vma_lock_write() doesn't help here, as 2199 * we don't guarantee that all growable vmas 2200 * in a mm share the same root anon vma. 2201 * So, we reuse mm->page_table_lock to guard 2202 * against concurrent vma expansions. 2203 */ 2204 spin_lock(&mm->page_table_lock); 2205 if (vma->vm_flags & VM_LOCKED) 2206 mm->locked_vm += grow; 2207 vm_stat_account(mm, vma->vm_flags, grow); 2208 anon_vma_interval_tree_pre_update_vma(vma); 2209 vma->vm_end = address; 2210 anon_vma_interval_tree_post_update_vma(vma); 2211 if (vma->vm_next) 2212 vma_gap_update(vma->vm_next); 2213 else 2214 mm->highest_vm_end = address; 2215 spin_unlock(&mm->page_table_lock); 2216 2217 perf_event_mmap(vma); 2218 } 2219 } 2220 } 2221 anon_vma_unlock_write(vma->anon_vma); 2222 khugepaged_enter_vma_merge(vma, vma->vm_flags); 2223 validate_mm(mm); 2224 return error; 2225 } 2226 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 2227 2228 /* 2229 * vma is the first one with address < vma->vm_start. Have to extend vma. 2230 */ 2231 int expand_downwards(struct vm_area_struct *vma, 2232 unsigned long address) 2233 { 2234 struct mm_struct *mm = vma->vm_mm; 2235 int error; 2236 2237 address &= PAGE_MASK; 2238 error = security_mmap_addr(address); 2239 if (error) 2240 return error; 2241 2242 /* We must make sure the anon_vma is allocated. */ 2243 if (unlikely(anon_vma_prepare(vma))) 2244 return -ENOMEM; 2245 2246 /* 2247 * vma->vm_start/vm_end cannot change under us because the caller 2248 * is required to hold the mmap_sem in read mode. We need the 2249 * anon_vma lock to serialize against concurrent expand_stacks. 2250 */ 2251 anon_vma_lock_write(vma->anon_vma); 2252 2253 /* Somebody else might have raced and expanded it already */ 2254 if (address < vma->vm_start) { 2255 unsigned long size, grow; 2256 2257 size = vma->vm_end - address; 2258 grow = (vma->vm_start - address) >> PAGE_SHIFT; 2259 2260 error = -ENOMEM; 2261 if (grow <= vma->vm_pgoff) { 2262 error = acct_stack_growth(vma, size, grow); 2263 if (!error) { 2264 /* 2265 * vma_gap_update() doesn't support concurrent 2266 * updates, but we only hold a shared mmap_sem 2267 * lock here, so we need to protect against 2268 * concurrent vma expansions. 2269 * anon_vma_lock_write() doesn't help here, as 2270 * we don't guarantee that all growable vmas 2271 * in a mm share the same root anon vma. 2272 * So, we reuse mm->page_table_lock to guard 2273 * against concurrent vma expansions. 2274 */ 2275 spin_lock(&mm->page_table_lock); 2276 if (vma->vm_flags & VM_LOCKED) 2277 mm->locked_vm += grow; 2278 vm_stat_account(mm, vma->vm_flags, grow); 2279 anon_vma_interval_tree_pre_update_vma(vma); 2280 vma->vm_start = address; 2281 vma->vm_pgoff -= grow; 2282 anon_vma_interval_tree_post_update_vma(vma); 2283 vma_gap_update(vma); 2284 spin_unlock(&mm->page_table_lock); 2285 2286 perf_event_mmap(vma); 2287 } 2288 } 2289 } 2290 anon_vma_unlock_write(vma->anon_vma); 2291 khugepaged_enter_vma_merge(vma, vma->vm_flags); 2292 validate_mm(mm); 2293 return error; 2294 } 2295 2296 /* 2297 * Note how expand_stack() refuses to expand the stack all the way to 2298 * abut the next virtual mapping, *unless* that mapping itself is also 2299 * a stack mapping. We want to leave room for a guard page, after all 2300 * (the guard page itself is not added here, that is done by the 2301 * actual page faulting logic) 2302 * 2303 * This matches the behavior of the guard page logic (see mm/memory.c: 2304 * check_stack_guard_page()), which only allows the guard page to be 2305 * removed under these circumstances. 2306 */ 2307 #ifdef CONFIG_STACK_GROWSUP 2308 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2309 { 2310 struct vm_area_struct *next; 2311 2312 address &= PAGE_MASK; 2313 next = vma->vm_next; 2314 if (next && next->vm_start == address + PAGE_SIZE) { 2315 if (!(next->vm_flags & VM_GROWSUP)) 2316 return -ENOMEM; 2317 } 2318 return expand_upwards(vma, address); 2319 } 2320 2321 struct vm_area_struct * 2322 find_extend_vma(struct mm_struct *mm, unsigned long addr) 2323 { 2324 struct vm_area_struct *vma, *prev; 2325 2326 addr &= PAGE_MASK; 2327 vma = find_vma_prev(mm, addr, &prev); 2328 if (vma && (vma->vm_start <= addr)) 2329 return vma; 2330 if (!prev || expand_stack(prev, addr)) 2331 return NULL; 2332 if (prev->vm_flags & VM_LOCKED) 2333 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 2334 return prev; 2335 } 2336 #else 2337 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2338 { 2339 struct vm_area_struct *prev; 2340 2341 address &= PAGE_MASK; 2342 prev = vma->vm_prev; 2343 if (prev && prev->vm_end == address) { 2344 if (!(prev->vm_flags & VM_GROWSDOWN)) 2345 return -ENOMEM; 2346 } 2347 return expand_downwards(vma, address); 2348 } 2349 2350 struct vm_area_struct * 2351 find_extend_vma(struct mm_struct *mm, unsigned long addr) 2352 { 2353 struct vm_area_struct *vma; 2354 unsigned long start; 2355 2356 addr &= PAGE_MASK; 2357 vma = find_vma(mm, addr); 2358 if (!vma) 2359 return NULL; 2360 if (vma->vm_start <= addr) 2361 return vma; 2362 if (!(vma->vm_flags & VM_GROWSDOWN)) 2363 return NULL; 2364 start = vma->vm_start; 2365 if (expand_stack(vma, addr)) 2366 return NULL; 2367 if (vma->vm_flags & VM_LOCKED) 2368 populate_vma_page_range(vma, addr, start, NULL); 2369 return vma; 2370 } 2371 #endif 2372 2373 EXPORT_SYMBOL_GPL(find_extend_vma); 2374 2375 /* 2376 * Ok - we have the memory areas we should free on the vma list, 2377 * so release them, and do the vma updates. 2378 * 2379 * Called with the mm semaphore held. 2380 */ 2381 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 2382 { 2383 unsigned long nr_accounted = 0; 2384 2385 /* Update high watermark before we lower total_vm */ 2386 update_hiwater_vm(mm); 2387 do { 2388 long nrpages = vma_pages(vma); 2389 2390 if (vma->vm_flags & VM_ACCOUNT) 2391 nr_accounted += nrpages; 2392 vm_stat_account(mm, vma->vm_flags, -nrpages); 2393 vma = remove_vma(vma); 2394 } while (vma); 2395 vm_unacct_memory(nr_accounted); 2396 validate_mm(mm); 2397 } 2398 2399 /* 2400 * Get rid of page table information in the indicated region. 2401 * 2402 * Called with the mm semaphore held. 2403 */ 2404 static void unmap_region(struct mm_struct *mm, 2405 struct vm_area_struct *vma, struct vm_area_struct *prev, 2406 unsigned long start, unsigned long end) 2407 { 2408 struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; 2409 struct mmu_gather tlb; 2410 2411 lru_add_drain(); 2412 tlb_gather_mmu(&tlb, mm, start, end); 2413 update_hiwater_rss(mm); 2414 unmap_vmas(&tlb, vma, start, end); 2415 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2416 next ? next->vm_start : USER_PGTABLES_CEILING); 2417 tlb_finish_mmu(&tlb, start, end); 2418 } 2419 2420 /* 2421 * Create a list of vma's touched by the unmap, removing them from the mm's 2422 * vma list as we go.. 2423 */ 2424 static void 2425 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, 2426 struct vm_area_struct *prev, unsigned long end) 2427 { 2428 struct vm_area_struct **insertion_point; 2429 struct vm_area_struct *tail_vma = NULL; 2430 2431 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 2432 vma->vm_prev = NULL; 2433 do { 2434 vma_rb_erase(vma, &mm->mm_rb); 2435 mm->map_count--; 2436 tail_vma = vma; 2437 vma = vma->vm_next; 2438 } while (vma && vma->vm_start < end); 2439 *insertion_point = vma; 2440 if (vma) { 2441 vma->vm_prev = prev; 2442 vma_gap_update(vma); 2443 } else 2444 mm->highest_vm_end = prev ? prev->vm_end : 0; 2445 tail_vma->vm_next = NULL; 2446 2447 /* Kill the cache */ 2448 vmacache_invalidate(mm); 2449 } 2450 2451 /* 2452 * __split_vma() bypasses sysctl_max_map_count checking. We use this on the 2453 * munmap path where it doesn't make sense to fail. 2454 */ 2455 static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 2456 unsigned long addr, int new_below) 2457 { 2458 struct vm_area_struct *new; 2459 int err; 2460 2461 if (is_vm_hugetlb_page(vma) && (addr & 2462 ~(huge_page_mask(hstate_vma(vma))))) 2463 return -EINVAL; 2464 2465 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2466 if (!new) 2467 return -ENOMEM; 2468 2469 /* most fields are the same, copy all, and then fixup */ 2470 *new = *vma; 2471 2472 INIT_LIST_HEAD(&new->anon_vma_chain); 2473 2474 if (new_below) 2475 new->vm_end = addr; 2476 else { 2477 new->vm_start = addr; 2478 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 2479 } 2480 2481 err = vma_dup_policy(vma, new); 2482 if (err) 2483 goto out_free_vma; 2484 2485 err = anon_vma_clone(new, vma); 2486 if (err) 2487 goto out_free_mpol; 2488 2489 if (new->vm_file) 2490 get_file(new->vm_file); 2491 2492 if (new->vm_ops && new->vm_ops->open) 2493 new->vm_ops->open(new); 2494 2495 if (new_below) 2496 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + 2497 ((addr - new->vm_start) >> PAGE_SHIFT), new); 2498 else 2499 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); 2500 2501 /* Success. */ 2502 if (!err) 2503 return 0; 2504 2505 /* Clean everything up if vma_adjust failed. */ 2506 if (new->vm_ops && new->vm_ops->close) 2507 new->vm_ops->close(new); 2508 if (new->vm_file) 2509 fput(new->vm_file); 2510 unlink_anon_vmas(new); 2511 out_free_mpol: 2512 mpol_put(vma_policy(new)); 2513 out_free_vma: 2514 kmem_cache_free(vm_area_cachep, new); 2515 return err; 2516 } 2517 2518 /* 2519 * Split a vma into two pieces at address 'addr', a new vma is allocated 2520 * either for the first part or the tail. 2521 */ 2522 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 2523 unsigned long addr, int new_below) 2524 { 2525 if (mm->map_count >= sysctl_max_map_count) 2526 return -ENOMEM; 2527 2528 return __split_vma(mm, vma, addr, new_below); 2529 } 2530 2531 /* Munmap is split into 2 main parts -- this part which finds 2532 * what needs doing, and the areas themselves, which do the 2533 * work. This now handles partial unmappings. 2534 * Jeremy Fitzhardinge <jeremy@goop.org> 2535 */ 2536 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 2537 { 2538 unsigned long end; 2539 struct vm_area_struct *vma, *prev, *last; 2540 2541 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 2542 return -EINVAL; 2543 2544 len = PAGE_ALIGN(len); 2545 if (len == 0) 2546 return -EINVAL; 2547 2548 /* Find the first overlapping VMA */ 2549 vma = find_vma(mm, start); 2550 if (!vma) 2551 return 0; 2552 prev = vma->vm_prev; 2553 /* we have start < vma->vm_end */ 2554 2555 /* if it doesn't overlap, we have nothing.. */ 2556 end = start + len; 2557 if (vma->vm_start >= end) 2558 return 0; 2559 2560 /* 2561 * If we need to split any vma, do it now to save pain later. 2562 * 2563 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 2564 * unmapped vm_area_struct will remain in use: so lower split_vma 2565 * places tmp vma above, and higher split_vma places tmp vma below. 2566 */ 2567 if (start > vma->vm_start) { 2568 int error; 2569 2570 /* 2571 * Make sure that map_count on return from munmap() will 2572 * not exceed its limit; but let map_count go just above 2573 * its limit temporarily, to help free resources as expected. 2574 */ 2575 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) 2576 return -ENOMEM; 2577 2578 error = __split_vma(mm, vma, start, 0); 2579 if (error) 2580 return error; 2581 prev = vma; 2582 } 2583 2584 /* Does it split the last one? */ 2585 last = find_vma(mm, end); 2586 if (last && end > last->vm_start) { 2587 int error = __split_vma(mm, last, end, 1); 2588 if (error) 2589 return error; 2590 } 2591 vma = prev ? prev->vm_next : mm->mmap; 2592 2593 /* 2594 * unlock any mlock()ed ranges before detaching vmas 2595 */ 2596 if (mm->locked_vm) { 2597 struct vm_area_struct *tmp = vma; 2598 while (tmp && tmp->vm_start < end) { 2599 if (tmp->vm_flags & VM_LOCKED) { 2600 mm->locked_vm -= vma_pages(tmp); 2601 munlock_vma_pages_all(tmp); 2602 } 2603 tmp = tmp->vm_next; 2604 } 2605 } 2606 2607 /* 2608 * Remove the vma's, and unmap the actual pages 2609 */ 2610 detach_vmas_to_be_unmapped(mm, vma, prev, end); 2611 unmap_region(mm, vma, prev, start, end); 2612 2613 arch_unmap(mm, vma, start, end); 2614 2615 /* Fix up all other VM information */ 2616 remove_vma_list(mm, vma); 2617 2618 return 0; 2619 } 2620 2621 int vm_munmap(unsigned long start, size_t len) 2622 { 2623 int ret; 2624 struct mm_struct *mm = current->mm; 2625 2626 down_write(&mm->mmap_sem); 2627 ret = do_munmap(mm, start, len); 2628 up_write(&mm->mmap_sem); 2629 return ret; 2630 } 2631 EXPORT_SYMBOL(vm_munmap); 2632 2633 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 2634 { 2635 profile_munmap(addr); 2636 return vm_munmap(addr, len); 2637 } 2638 2639 2640 /* 2641 * Emulation of deprecated remap_file_pages() syscall. 2642 */ 2643 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 2644 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 2645 { 2646 2647 struct mm_struct *mm = current->mm; 2648 struct vm_area_struct *vma; 2649 unsigned long populate = 0; 2650 unsigned long ret = -EINVAL; 2651 struct file *file; 2652 2653 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. " 2654 "See Documentation/vm/remap_file_pages.txt.\n", 2655 current->comm, current->pid); 2656 2657 if (prot) 2658 return ret; 2659 start = start & PAGE_MASK; 2660 size = size & PAGE_MASK; 2661 2662 if (start + size <= start) 2663 return ret; 2664 2665 /* Does pgoff wrap? */ 2666 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 2667 return ret; 2668 2669 down_write(&mm->mmap_sem); 2670 vma = find_vma(mm, start); 2671 2672 if (!vma || !(vma->vm_flags & VM_SHARED)) 2673 goto out; 2674 2675 if (start < vma->vm_start || start + size > vma->vm_end) 2676 goto out; 2677 2678 if (pgoff == linear_page_index(vma, start)) { 2679 ret = 0; 2680 goto out; 2681 } 2682 2683 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 2684 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 2685 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 2686 2687 flags &= MAP_NONBLOCK; 2688 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 2689 if (vma->vm_flags & VM_LOCKED) { 2690 flags |= MAP_LOCKED; 2691 /* drop PG_Mlocked flag for over-mapped range */ 2692 munlock_vma_pages_range(vma, start, start + size); 2693 } 2694 2695 file = get_file(vma->vm_file); 2696 ret = do_mmap_pgoff(vma->vm_file, start, size, 2697 prot, flags, pgoff, &populate); 2698 fput(file); 2699 out: 2700 up_write(&mm->mmap_sem); 2701 if (populate) 2702 mm_populate(ret, populate); 2703 if (!IS_ERR_VALUE(ret)) 2704 ret = 0; 2705 return ret; 2706 } 2707 2708 static inline void verify_mm_writelocked(struct mm_struct *mm) 2709 { 2710 #ifdef CONFIG_DEBUG_VM 2711 if (unlikely(down_read_trylock(&mm->mmap_sem))) { 2712 WARN_ON(1); 2713 up_read(&mm->mmap_sem); 2714 } 2715 #endif 2716 } 2717 2718 /* 2719 * this is really a simplified "do_mmap". it only handles 2720 * anonymous maps. eventually we may be able to do some 2721 * brk-specific accounting here. 2722 */ 2723 static unsigned long do_brk(unsigned long addr, unsigned long len) 2724 { 2725 struct mm_struct *mm = current->mm; 2726 struct vm_area_struct *vma, *prev; 2727 unsigned long flags; 2728 struct rb_node **rb_link, *rb_parent; 2729 pgoff_t pgoff = addr >> PAGE_SHIFT; 2730 int error; 2731 2732 len = PAGE_ALIGN(len); 2733 if (!len) 2734 return addr; 2735 2736 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2737 2738 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 2739 if (offset_in_page(error)) 2740 return error; 2741 2742 error = mlock_future_check(mm, mm->def_flags, len); 2743 if (error) 2744 return error; 2745 2746 /* 2747 * mm->mmap_sem is required to protect against another thread 2748 * changing the mappings in case we sleep. 2749 */ 2750 verify_mm_writelocked(mm); 2751 2752 /* 2753 * Clear old maps. this also does some error checking for us 2754 */ 2755 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, 2756 &rb_parent)) { 2757 if (do_munmap(mm, addr, len)) 2758 return -ENOMEM; 2759 } 2760 2761 /* Check against address space limits *after* clearing old maps... */ 2762 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 2763 return -ENOMEM; 2764 2765 if (mm->map_count > sysctl_max_map_count) 2766 return -ENOMEM; 2767 2768 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 2769 return -ENOMEM; 2770 2771 /* Can we just expand an old private anonymous mapping? */ 2772 vma = vma_merge(mm, prev, addr, addr + len, flags, 2773 NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX); 2774 if (vma) 2775 goto out; 2776 2777 /* 2778 * create a vma struct for an anonymous mapping 2779 */ 2780 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 2781 if (!vma) { 2782 vm_unacct_memory(len >> PAGE_SHIFT); 2783 return -ENOMEM; 2784 } 2785 2786 INIT_LIST_HEAD(&vma->anon_vma_chain); 2787 vma->vm_mm = mm; 2788 vma->vm_start = addr; 2789 vma->vm_end = addr + len; 2790 vma->vm_pgoff = pgoff; 2791 vma->vm_flags = flags; 2792 vma->vm_page_prot = vm_get_page_prot(flags); 2793 vma_link(mm, vma, prev, rb_link, rb_parent); 2794 out: 2795 perf_event_mmap(vma); 2796 mm->total_vm += len >> PAGE_SHIFT; 2797 mm->data_vm += len >> PAGE_SHIFT; 2798 if (flags & VM_LOCKED) 2799 mm->locked_vm += (len >> PAGE_SHIFT); 2800 vma->vm_flags |= VM_SOFTDIRTY; 2801 return addr; 2802 } 2803 2804 unsigned long vm_brk(unsigned long addr, unsigned long len) 2805 { 2806 struct mm_struct *mm = current->mm; 2807 unsigned long ret; 2808 bool populate; 2809 2810 down_write(&mm->mmap_sem); 2811 ret = do_brk(addr, len); 2812 populate = ((mm->def_flags & VM_LOCKED) != 0); 2813 up_write(&mm->mmap_sem); 2814 if (populate) 2815 mm_populate(addr, len); 2816 return ret; 2817 } 2818 EXPORT_SYMBOL(vm_brk); 2819 2820 /* Release all mmaps. */ 2821 void exit_mmap(struct mm_struct *mm) 2822 { 2823 struct mmu_gather tlb; 2824 struct vm_area_struct *vma; 2825 unsigned long nr_accounted = 0; 2826 2827 /* mm's last user has gone, and its about to be pulled down */ 2828 mmu_notifier_release(mm); 2829 2830 if (mm->locked_vm) { 2831 vma = mm->mmap; 2832 while (vma) { 2833 if (vma->vm_flags & VM_LOCKED) 2834 munlock_vma_pages_all(vma); 2835 vma = vma->vm_next; 2836 } 2837 } 2838 2839 arch_exit_mmap(mm); 2840 2841 vma = mm->mmap; 2842 if (!vma) /* Can happen if dup_mmap() received an OOM */ 2843 return; 2844 2845 lru_add_drain(); 2846 flush_cache_mm(mm); 2847 tlb_gather_mmu(&tlb, mm, 0, -1); 2848 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2849 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2850 unmap_vmas(&tlb, vma, 0, -1); 2851 2852 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); 2853 tlb_finish_mmu(&tlb, 0, -1); 2854 2855 /* 2856 * Walk the list again, actually closing and freeing it, 2857 * with preemption enabled, without holding any MM locks. 2858 */ 2859 while (vma) { 2860 if (vma->vm_flags & VM_ACCOUNT) 2861 nr_accounted += vma_pages(vma); 2862 vma = remove_vma(vma); 2863 } 2864 vm_unacct_memory(nr_accounted); 2865 } 2866 2867 /* Insert vm structure into process list sorted by address 2868 * and into the inode's i_mmap tree. If vm_file is non-NULL 2869 * then i_mmap_rwsem is taken here. 2870 */ 2871 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 2872 { 2873 struct vm_area_struct *prev; 2874 struct rb_node **rb_link, *rb_parent; 2875 2876 if (find_vma_links(mm, vma->vm_start, vma->vm_end, 2877 &prev, &rb_link, &rb_parent)) 2878 return -ENOMEM; 2879 if ((vma->vm_flags & VM_ACCOUNT) && 2880 security_vm_enough_memory_mm(mm, vma_pages(vma))) 2881 return -ENOMEM; 2882 2883 /* 2884 * The vm_pgoff of a purely anonymous vma should be irrelevant 2885 * until its first write fault, when page's anon_vma and index 2886 * are set. But now set the vm_pgoff it will almost certainly 2887 * end up with (unless mremap moves it elsewhere before that 2888 * first wfault), so /proc/pid/maps tells a consistent story. 2889 * 2890 * By setting it to reflect the virtual start address of the 2891 * vma, merges and splits can happen in a seamless way, just 2892 * using the existing file pgoff checks and manipulations. 2893 * Similarly in do_mmap_pgoff and in do_brk. 2894 */ 2895 if (vma_is_anonymous(vma)) { 2896 BUG_ON(vma->anon_vma); 2897 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 2898 } 2899 2900 vma_link(mm, vma, prev, rb_link, rb_parent); 2901 return 0; 2902 } 2903 2904 /* 2905 * Copy the vma structure to a new location in the same mm, 2906 * prior to moving page table entries, to effect an mremap move. 2907 */ 2908 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 2909 unsigned long addr, unsigned long len, pgoff_t pgoff, 2910 bool *need_rmap_locks) 2911 { 2912 struct vm_area_struct *vma = *vmap; 2913 unsigned long vma_start = vma->vm_start; 2914 struct mm_struct *mm = vma->vm_mm; 2915 struct vm_area_struct *new_vma, *prev; 2916 struct rb_node **rb_link, *rb_parent; 2917 bool faulted_in_anon_vma = true; 2918 2919 /* 2920 * If anonymous vma has not yet been faulted, update new pgoff 2921 * to match new location, to increase its chance of merging. 2922 */ 2923 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 2924 pgoff = addr >> PAGE_SHIFT; 2925 faulted_in_anon_vma = false; 2926 } 2927 2928 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) 2929 return NULL; /* should never get here */ 2930 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, 2931 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 2932 vma->vm_userfaultfd_ctx); 2933 if (new_vma) { 2934 /* 2935 * Source vma may have been merged into new_vma 2936 */ 2937 if (unlikely(vma_start >= new_vma->vm_start && 2938 vma_start < new_vma->vm_end)) { 2939 /* 2940 * The only way we can get a vma_merge with 2941 * self during an mremap is if the vma hasn't 2942 * been faulted in yet and we were allowed to 2943 * reset the dst vma->vm_pgoff to the 2944 * destination address of the mremap to allow 2945 * the merge to happen. mremap must change the 2946 * vm_pgoff linearity between src and dst vmas 2947 * (in turn preventing a vma_merge) to be 2948 * safe. It is only safe to keep the vm_pgoff 2949 * linear if there are no pages mapped yet. 2950 */ 2951 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 2952 *vmap = vma = new_vma; 2953 } 2954 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 2955 } else { 2956 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2957 if (!new_vma) 2958 goto out; 2959 *new_vma = *vma; 2960 new_vma->vm_start = addr; 2961 new_vma->vm_end = addr + len; 2962 new_vma->vm_pgoff = pgoff; 2963 if (vma_dup_policy(vma, new_vma)) 2964 goto out_free_vma; 2965 INIT_LIST_HEAD(&new_vma->anon_vma_chain); 2966 if (anon_vma_clone(new_vma, vma)) 2967 goto out_free_mempol; 2968 if (new_vma->vm_file) 2969 get_file(new_vma->vm_file); 2970 if (new_vma->vm_ops && new_vma->vm_ops->open) 2971 new_vma->vm_ops->open(new_vma); 2972 vma_link(mm, new_vma, prev, rb_link, rb_parent); 2973 *need_rmap_locks = false; 2974 } 2975 return new_vma; 2976 2977 out_free_mempol: 2978 mpol_put(vma_policy(new_vma)); 2979 out_free_vma: 2980 kmem_cache_free(vm_area_cachep, new_vma); 2981 out: 2982 return NULL; 2983 } 2984 2985 /* 2986 * Return true if the calling process may expand its vm space by the passed 2987 * number of pages 2988 */ 2989 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 2990 { 2991 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 2992 return false; 2993 2994 if (is_data_mapping(flags) && 2995 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 2996 if (ignore_rlimit_data) 2997 pr_warn_once("%s (%d): VmData %lu exceed data ulimit " 2998 "%lu. Will be forbidden soon.\n", 2999 current->comm, current->pid, 3000 (mm->data_vm + npages) << PAGE_SHIFT, 3001 rlimit(RLIMIT_DATA)); 3002 else 3003 return false; 3004 } 3005 3006 return true; 3007 } 3008 3009 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 3010 { 3011 mm->total_vm += npages; 3012 3013 if (is_exec_mapping(flags)) 3014 mm->exec_vm += npages; 3015 else if (is_stack_mapping(flags)) 3016 mm->stack_vm += npages; 3017 else if (is_data_mapping(flags)) 3018 mm->data_vm += npages; 3019 } 3020 3021 static int special_mapping_fault(struct vm_area_struct *vma, 3022 struct vm_fault *vmf); 3023 3024 /* 3025 * Having a close hook prevents vma merging regardless of flags. 3026 */ 3027 static void special_mapping_close(struct vm_area_struct *vma) 3028 { 3029 } 3030 3031 static const char *special_mapping_name(struct vm_area_struct *vma) 3032 { 3033 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 3034 } 3035 3036 static const struct vm_operations_struct special_mapping_vmops = { 3037 .close = special_mapping_close, 3038 .fault = special_mapping_fault, 3039 .name = special_mapping_name, 3040 }; 3041 3042 static const struct vm_operations_struct legacy_special_mapping_vmops = { 3043 .close = special_mapping_close, 3044 .fault = special_mapping_fault, 3045 }; 3046 3047 static int special_mapping_fault(struct vm_area_struct *vma, 3048 struct vm_fault *vmf) 3049 { 3050 pgoff_t pgoff; 3051 struct page **pages; 3052 3053 if (vma->vm_ops == &legacy_special_mapping_vmops) { 3054 pages = vma->vm_private_data; 3055 } else { 3056 struct vm_special_mapping *sm = vma->vm_private_data; 3057 3058 if (sm->fault) 3059 return sm->fault(sm, vma, vmf); 3060 3061 pages = sm->pages; 3062 } 3063 3064 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 3065 pgoff--; 3066 3067 if (*pages) { 3068 struct page *page = *pages; 3069 get_page(page); 3070 vmf->page = page; 3071 return 0; 3072 } 3073 3074 return VM_FAULT_SIGBUS; 3075 } 3076 3077 static struct vm_area_struct *__install_special_mapping( 3078 struct mm_struct *mm, 3079 unsigned long addr, unsigned long len, 3080 unsigned long vm_flags, void *priv, 3081 const struct vm_operations_struct *ops) 3082 { 3083 int ret; 3084 struct vm_area_struct *vma; 3085 3086 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 3087 if (unlikely(vma == NULL)) 3088 return ERR_PTR(-ENOMEM); 3089 3090 INIT_LIST_HEAD(&vma->anon_vma_chain); 3091 vma->vm_mm = mm; 3092 vma->vm_start = addr; 3093 vma->vm_end = addr + len; 3094 3095 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; 3096 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 3097 3098 vma->vm_ops = ops; 3099 vma->vm_private_data = priv; 3100 3101 ret = insert_vm_struct(mm, vma); 3102 if (ret) 3103 goto out; 3104 3105 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 3106 3107 perf_event_mmap(vma); 3108 3109 return vma; 3110 3111 out: 3112 kmem_cache_free(vm_area_cachep, vma); 3113 return ERR_PTR(ret); 3114 } 3115 3116 /* 3117 * Called with mm->mmap_sem held for writing. 3118 * Insert a new vma covering the given region, with the given flags. 3119 * Its pages are supplied by the given array of struct page *. 3120 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 3121 * The region past the last page supplied will always produce SIGBUS. 3122 * The array pointer and the pages it points to are assumed to stay alive 3123 * for as long as this mapping might exist. 3124 */ 3125 struct vm_area_struct *_install_special_mapping( 3126 struct mm_struct *mm, 3127 unsigned long addr, unsigned long len, 3128 unsigned long vm_flags, const struct vm_special_mapping *spec) 3129 { 3130 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 3131 &special_mapping_vmops); 3132 } 3133 3134 int install_special_mapping(struct mm_struct *mm, 3135 unsigned long addr, unsigned long len, 3136 unsigned long vm_flags, struct page **pages) 3137 { 3138 struct vm_area_struct *vma = __install_special_mapping( 3139 mm, addr, len, vm_flags, (void *)pages, 3140 &legacy_special_mapping_vmops); 3141 3142 return PTR_ERR_OR_ZERO(vma); 3143 } 3144 3145 static DEFINE_MUTEX(mm_all_locks_mutex); 3146 3147 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 3148 { 3149 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { 3150 /* 3151 * The LSB of head.next can't change from under us 3152 * because we hold the mm_all_locks_mutex. 3153 */ 3154 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); 3155 /* 3156 * We can safely modify head.next after taking the 3157 * anon_vma->root->rwsem. If some other vma in this mm shares 3158 * the same anon_vma we won't take it again. 3159 * 3160 * No need of atomic instructions here, head.next 3161 * can't change from under us thanks to the 3162 * anon_vma->root->rwsem. 3163 */ 3164 if (__test_and_set_bit(0, (unsigned long *) 3165 &anon_vma->root->rb_root.rb_node)) 3166 BUG(); 3167 } 3168 } 3169 3170 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 3171 { 3172 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3173 /* 3174 * AS_MM_ALL_LOCKS can't change from under us because 3175 * we hold the mm_all_locks_mutex. 3176 * 3177 * Operations on ->flags have to be atomic because 3178 * even if AS_MM_ALL_LOCKS is stable thanks to the 3179 * mm_all_locks_mutex, there may be other cpus 3180 * changing other bitflags in parallel to us. 3181 */ 3182 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 3183 BUG(); 3184 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); 3185 } 3186 } 3187 3188 /* 3189 * This operation locks against the VM for all pte/vma/mm related 3190 * operations that could ever happen on a certain mm. This includes 3191 * vmtruncate, try_to_unmap, and all page faults. 3192 * 3193 * The caller must take the mmap_sem in write mode before calling 3194 * mm_take_all_locks(). The caller isn't allowed to release the 3195 * mmap_sem until mm_drop_all_locks() returns. 3196 * 3197 * mmap_sem in write mode is required in order to block all operations 3198 * that could modify pagetables and free pages without need of 3199 * altering the vma layout. It's also needed in write mode to avoid new 3200 * anon_vmas to be associated with existing vmas. 3201 * 3202 * A single task can't take more than one mm_take_all_locks() in a row 3203 * or it would deadlock. 3204 * 3205 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 3206 * mapping->flags avoid to take the same lock twice, if more than one 3207 * vma in this mm is backed by the same anon_vma or address_space. 3208 * 3209 * We take locks in following order, accordingly to comment at beginning 3210 * of mm/rmap.c: 3211 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 3212 * hugetlb mapping); 3213 * - all i_mmap_rwsem locks; 3214 * - all anon_vma->rwseml 3215 * 3216 * We can take all locks within these types randomly because the VM code 3217 * doesn't nest them and we protected from parallel mm_take_all_locks() by 3218 * mm_all_locks_mutex. 3219 * 3220 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 3221 * that may have to take thousand of locks. 3222 * 3223 * mm_take_all_locks() can fail if it's interrupted by signals. 3224 */ 3225 int mm_take_all_locks(struct mm_struct *mm) 3226 { 3227 struct vm_area_struct *vma; 3228 struct anon_vma_chain *avc; 3229 3230 BUG_ON(down_read_trylock(&mm->mmap_sem)); 3231 3232 mutex_lock(&mm_all_locks_mutex); 3233 3234 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3235 if (signal_pending(current)) 3236 goto out_unlock; 3237 if (vma->vm_file && vma->vm_file->f_mapping && 3238 is_vm_hugetlb_page(vma)) 3239 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3240 } 3241 3242 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3243 if (signal_pending(current)) 3244 goto out_unlock; 3245 if (vma->vm_file && vma->vm_file->f_mapping && 3246 !is_vm_hugetlb_page(vma)) 3247 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3248 } 3249 3250 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3251 if (signal_pending(current)) 3252 goto out_unlock; 3253 if (vma->anon_vma) 3254 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3255 vm_lock_anon_vma(mm, avc->anon_vma); 3256 } 3257 3258 return 0; 3259 3260 out_unlock: 3261 mm_drop_all_locks(mm); 3262 return -EINTR; 3263 } 3264 3265 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 3266 { 3267 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { 3268 /* 3269 * The LSB of head.next can't change to 0 from under 3270 * us because we hold the mm_all_locks_mutex. 3271 * 3272 * We must however clear the bitflag before unlocking 3273 * the vma so the users using the anon_vma->rb_root will 3274 * never see our bitflag. 3275 * 3276 * No need of atomic instructions here, head.next 3277 * can't change from under us until we release the 3278 * anon_vma->root->rwsem. 3279 */ 3280 if (!__test_and_clear_bit(0, (unsigned long *) 3281 &anon_vma->root->rb_root.rb_node)) 3282 BUG(); 3283 anon_vma_unlock_write(anon_vma); 3284 } 3285 } 3286 3287 static void vm_unlock_mapping(struct address_space *mapping) 3288 { 3289 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3290 /* 3291 * AS_MM_ALL_LOCKS can't change to 0 from under us 3292 * because we hold the mm_all_locks_mutex. 3293 */ 3294 i_mmap_unlock_write(mapping); 3295 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 3296 &mapping->flags)) 3297 BUG(); 3298 } 3299 } 3300 3301 /* 3302 * The mmap_sem cannot be released by the caller until 3303 * mm_drop_all_locks() returns. 3304 */ 3305 void mm_drop_all_locks(struct mm_struct *mm) 3306 { 3307 struct vm_area_struct *vma; 3308 struct anon_vma_chain *avc; 3309 3310 BUG_ON(down_read_trylock(&mm->mmap_sem)); 3311 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 3312 3313 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3314 if (vma->anon_vma) 3315 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3316 vm_unlock_anon_vma(avc->anon_vma); 3317 if (vma->vm_file && vma->vm_file->f_mapping) 3318 vm_unlock_mapping(vma->vm_file->f_mapping); 3319 } 3320 3321 mutex_unlock(&mm_all_locks_mutex); 3322 } 3323 3324 /* 3325 * initialise the VMA slab 3326 */ 3327 void __init mmap_init(void) 3328 { 3329 int ret; 3330 3331 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 3332 VM_BUG_ON(ret); 3333 } 3334 3335 /* 3336 * Initialise sysctl_user_reserve_kbytes. 3337 * 3338 * This is intended to prevent a user from starting a single memory hogging 3339 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 3340 * mode. 3341 * 3342 * The default value is min(3% of free memory, 128MB) 3343 * 128MB is enough to recover with sshd/login, bash, and top/kill. 3344 */ 3345 static int init_user_reserve(void) 3346 { 3347 unsigned long free_kbytes; 3348 3349 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 3350 3351 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 3352 return 0; 3353 } 3354 subsys_initcall(init_user_reserve); 3355 3356 /* 3357 * Initialise sysctl_admin_reserve_kbytes. 3358 * 3359 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 3360 * to log in and kill a memory hogging process. 3361 * 3362 * Systems with more than 256MB will reserve 8MB, enough to recover 3363 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 3364 * only reserve 3% of free pages by default. 3365 */ 3366 static int init_admin_reserve(void) 3367 { 3368 unsigned long free_kbytes; 3369 3370 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 3371 3372 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 3373 return 0; 3374 } 3375 subsys_initcall(init_admin_reserve); 3376 3377 /* 3378 * Reinititalise user and admin reserves if memory is added or removed. 3379 * 3380 * The default user reserve max is 128MB, and the default max for the 3381 * admin reserve is 8MB. These are usually, but not always, enough to 3382 * enable recovery from a memory hogging process using login/sshd, a shell, 3383 * and tools like top. It may make sense to increase or even disable the 3384 * reserve depending on the existence of swap or variations in the recovery 3385 * tools. So, the admin may have changed them. 3386 * 3387 * If memory is added and the reserves have been eliminated or increased above 3388 * the default max, then we'll trust the admin. 3389 * 3390 * If memory is removed and there isn't enough free memory, then we 3391 * need to reset the reserves. 3392 * 3393 * Otherwise keep the reserve set by the admin. 3394 */ 3395 static int reserve_mem_notifier(struct notifier_block *nb, 3396 unsigned long action, void *data) 3397 { 3398 unsigned long tmp, free_kbytes; 3399 3400 switch (action) { 3401 case MEM_ONLINE: 3402 /* Default max is 128MB. Leave alone if modified by operator. */ 3403 tmp = sysctl_user_reserve_kbytes; 3404 if (0 < tmp && tmp < (1UL << 17)) 3405 init_user_reserve(); 3406 3407 /* Default max is 8MB. Leave alone if modified by operator. */ 3408 tmp = sysctl_admin_reserve_kbytes; 3409 if (0 < tmp && tmp < (1UL << 13)) 3410 init_admin_reserve(); 3411 3412 break; 3413 case MEM_OFFLINE: 3414 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 3415 3416 if (sysctl_user_reserve_kbytes > free_kbytes) { 3417 init_user_reserve(); 3418 pr_info("vm.user_reserve_kbytes reset to %lu\n", 3419 sysctl_user_reserve_kbytes); 3420 } 3421 3422 if (sysctl_admin_reserve_kbytes > free_kbytes) { 3423 init_admin_reserve(); 3424 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 3425 sysctl_admin_reserve_kbytes); 3426 } 3427 break; 3428 default: 3429 break; 3430 } 3431 return NOTIFY_OK; 3432 } 3433 3434 static struct notifier_block reserve_mem_nb = { 3435 .notifier_call = reserve_mem_notifier, 3436 }; 3437 3438 static int __meminit init_reserve_notifier(void) 3439 { 3440 if (register_hotmemory_notifier(&reserve_mem_nb)) 3441 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 3442 3443 return 0; 3444 } 3445 subsys_initcall(init_reserve_notifier); 3446