1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/mmap.c 4 * 5 * Written by obz. 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/backing-dev.h> 15 #include <linux/mm.h> 16 #include <linux/vmacache.h> 17 #include <linux/shm.h> 18 #include <linux/mman.h> 19 #include <linux/pagemap.h> 20 #include <linux/swap.h> 21 #include <linux/syscalls.h> 22 #include <linux/capability.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/personality.h> 27 #include <linux/security.h> 28 #include <linux/hugetlb.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/profile.h> 31 #include <linux/export.h> 32 #include <linux/mount.h> 33 #include <linux/mempolicy.h> 34 #include <linux/rmap.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/mmdebug.h> 37 #include <linux/perf_event.h> 38 #include <linux/audit.h> 39 #include <linux/khugepaged.h> 40 #include <linux/uprobes.h> 41 #include <linux/rbtree_augmented.h> 42 #include <linux/notifier.h> 43 #include <linux/memory.h> 44 #include <linux/printk.h> 45 #include <linux/userfaultfd_k.h> 46 #include <linux/moduleparam.h> 47 #include <linux/pkeys.h> 48 #include <linux/oom.h> 49 #include <linux/sched/mm.h> 50 51 #include <linux/uaccess.h> 52 #include <asm/cacheflush.h> 53 #include <asm/tlb.h> 54 #include <asm/mmu_context.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/mmap.h> 58 59 #include "internal.h" 60 61 #ifndef arch_mmap_check 62 #define arch_mmap_check(addr, len, flags) (0) 63 #endif 64 65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 67 const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; 68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 69 #endif 70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 74 #endif 75 76 static bool ignore_rlimit_data; 77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 78 79 static void unmap_region(struct mm_struct *mm, 80 struct vm_area_struct *vma, struct vm_area_struct *prev, 81 unsigned long start, unsigned long end); 82 83 /* description of effects of mapping type and prot in current implementation. 84 * this is due to the limited x86 page protection hardware. The expected 85 * behavior is in parens: 86 * 87 * map_type prot 88 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 89 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 90 * w: (no) no w: (no) no w: (yes) yes w: (no) no 91 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 92 * 93 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 94 * w: (no) no w: (no) no w: (copy) copy w: (no) no 95 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 96 * 97 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and 98 * MAP_PRIVATE (with Enhanced PAN supported): 99 * r: (no) no 100 * w: (no) no 101 * x: (yes) yes 102 */ 103 pgprot_t protection_map[16] __ro_after_init = { 104 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, 105 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 106 }; 107 108 #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT 109 static inline pgprot_t arch_filter_pgprot(pgprot_t prot) 110 { 111 return prot; 112 } 113 #endif 114 115 pgprot_t vm_get_page_prot(unsigned long vm_flags) 116 { 117 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & 118 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | 119 pgprot_val(arch_vm_get_page_prot(vm_flags))); 120 121 return arch_filter_pgprot(ret); 122 } 123 EXPORT_SYMBOL(vm_get_page_prot); 124 125 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 126 { 127 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 128 } 129 130 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 131 void vma_set_page_prot(struct vm_area_struct *vma) 132 { 133 unsigned long vm_flags = vma->vm_flags; 134 pgprot_t vm_page_prot; 135 136 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 137 if (vma_wants_writenotify(vma, vm_page_prot)) { 138 vm_flags &= ~VM_SHARED; 139 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 140 } 141 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 142 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 143 } 144 145 /* 146 * Requires inode->i_mapping->i_mmap_rwsem 147 */ 148 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 149 struct file *file, struct address_space *mapping) 150 { 151 if (vma->vm_flags & VM_DENYWRITE) 152 allow_write_access(file); 153 if (vma->vm_flags & VM_SHARED) 154 mapping_unmap_writable(mapping); 155 156 flush_dcache_mmap_lock(mapping); 157 vma_interval_tree_remove(vma, &mapping->i_mmap); 158 flush_dcache_mmap_unlock(mapping); 159 } 160 161 /* 162 * Unlink a file-based vm structure from its interval tree, to hide 163 * vma from rmap and vmtruncate before freeing its page tables. 164 */ 165 void unlink_file_vma(struct vm_area_struct *vma) 166 { 167 struct file *file = vma->vm_file; 168 169 if (file) { 170 struct address_space *mapping = file->f_mapping; 171 i_mmap_lock_write(mapping); 172 __remove_shared_vm_struct(vma, file, mapping); 173 i_mmap_unlock_write(mapping); 174 } 175 } 176 177 /* 178 * Close a vm structure and free it, returning the next. 179 */ 180 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) 181 { 182 struct vm_area_struct *next = vma->vm_next; 183 184 might_sleep(); 185 if (vma->vm_ops && vma->vm_ops->close) 186 vma->vm_ops->close(vma); 187 if (vma->vm_file) 188 fput(vma->vm_file); 189 mpol_put(vma_policy(vma)); 190 vm_area_free(vma); 191 return next; 192 } 193 194 static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, 195 struct list_head *uf); 196 SYSCALL_DEFINE1(brk, unsigned long, brk) 197 { 198 unsigned long newbrk, oldbrk, origbrk; 199 struct mm_struct *mm = current->mm; 200 struct vm_area_struct *next; 201 unsigned long min_brk; 202 bool populate; 203 bool downgraded = false; 204 LIST_HEAD(uf); 205 206 if (mmap_write_lock_killable(mm)) 207 return -EINTR; 208 209 origbrk = mm->brk; 210 211 #ifdef CONFIG_COMPAT_BRK 212 /* 213 * CONFIG_COMPAT_BRK can still be overridden by setting 214 * randomize_va_space to 2, which will still cause mm->start_brk 215 * to be arbitrarily shifted 216 */ 217 if (current->brk_randomized) 218 min_brk = mm->start_brk; 219 else 220 min_brk = mm->end_data; 221 #else 222 min_brk = mm->start_brk; 223 #endif 224 if (brk < min_brk) 225 goto out; 226 227 /* 228 * Check against rlimit here. If this check is done later after the test 229 * of oldbrk with newbrk then it can escape the test and let the data 230 * segment grow beyond its set limit the in case where the limit is 231 * not page aligned -Ram Gupta 232 */ 233 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 234 mm->end_data, mm->start_data)) 235 goto out; 236 237 newbrk = PAGE_ALIGN(brk); 238 oldbrk = PAGE_ALIGN(mm->brk); 239 if (oldbrk == newbrk) { 240 mm->brk = brk; 241 goto success; 242 } 243 244 /* 245 * Always allow shrinking brk. 246 * __do_munmap() may downgrade mmap_lock to read. 247 */ 248 if (brk <= mm->brk) { 249 int ret; 250 251 /* 252 * mm->brk must to be protected by write mmap_lock so update it 253 * before downgrading mmap_lock. When __do_munmap() fails, 254 * mm->brk will be restored from origbrk. 255 */ 256 mm->brk = brk; 257 ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); 258 if (ret < 0) { 259 mm->brk = origbrk; 260 goto out; 261 } else if (ret == 1) { 262 downgraded = true; 263 } 264 goto success; 265 } 266 267 /* Check against existing mmap mappings. */ 268 next = find_vma(mm, oldbrk); 269 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 270 goto out; 271 272 /* Ok, looks good - let it rip. */ 273 if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) 274 goto out; 275 mm->brk = brk; 276 277 success: 278 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; 279 if (downgraded) 280 mmap_read_unlock(mm); 281 else 282 mmap_write_unlock(mm); 283 userfaultfd_unmap_complete(mm, &uf); 284 if (populate) 285 mm_populate(oldbrk, newbrk - oldbrk); 286 return brk; 287 288 out: 289 mmap_write_unlock(mm); 290 return origbrk; 291 } 292 293 static inline unsigned long vma_compute_gap(struct vm_area_struct *vma) 294 { 295 unsigned long gap, prev_end; 296 297 /* 298 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we 299 * allow two stack_guard_gaps between them here, and when choosing 300 * an unmapped area; whereas when expanding we only require one. 301 * That's a little inconsistent, but keeps the code here simpler. 302 */ 303 gap = vm_start_gap(vma); 304 if (vma->vm_prev) { 305 prev_end = vm_end_gap(vma->vm_prev); 306 if (gap > prev_end) 307 gap -= prev_end; 308 else 309 gap = 0; 310 } 311 return gap; 312 } 313 314 #ifdef CONFIG_DEBUG_VM_RB 315 static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma) 316 { 317 unsigned long max = vma_compute_gap(vma), subtree_gap; 318 if (vma->vm_rb.rb_left) { 319 subtree_gap = rb_entry(vma->vm_rb.rb_left, 320 struct vm_area_struct, vm_rb)->rb_subtree_gap; 321 if (subtree_gap > max) 322 max = subtree_gap; 323 } 324 if (vma->vm_rb.rb_right) { 325 subtree_gap = rb_entry(vma->vm_rb.rb_right, 326 struct vm_area_struct, vm_rb)->rb_subtree_gap; 327 if (subtree_gap > max) 328 max = subtree_gap; 329 } 330 return max; 331 } 332 333 static int browse_rb(struct mm_struct *mm) 334 { 335 struct rb_root *root = &mm->mm_rb; 336 int i = 0, j, bug = 0; 337 struct rb_node *nd, *pn = NULL; 338 unsigned long prev = 0, pend = 0; 339 340 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 341 struct vm_area_struct *vma; 342 vma = rb_entry(nd, struct vm_area_struct, vm_rb); 343 if (vma->vm_start < prev) { 344 pr_emerg("vm_start %lx < prev %lx\n", 345 vma->vm_start, prev); 346 bug = 1; 347 } 348 if (vma->vm_start < pend) { 349 pr_emerg("vm_start %lx < pend %lx\n", 350 vma->vm_start, pend); 351 bug = 1; 352 } 353 if (vma->vm_start > vma->vm_end) { 354 pr_emerg("vm_start %lx > vm_end %lx\n", 355 vma->vm_start, vma->vm_end); 356 bug = 1; 357 } 358 spin_lock(&mm->page_table_lock); 359 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { 360 pr_emerg("free gap %lx, correct %lx\n", 361 vma->rb_subtree_gap, 362 vma_compute_subtree_gap(vma)); 363 bug = 1; 364 } 365 spin_unlock(&mm->page_table_lock); 366 i++; 367 pn = nd; 368 prev = vma->vm_start; 369 pend = vma->vm_end; 370 } 371 j = 0; 372 for (nd = pn; nd; nd = rb_prev(nd)) 373 j++; 374 if (i != j) { 375 pr_emerg("backwards %d, forwards %d\n", j, i); 376 bug = 1; 377 } 378 return bug ? -1 : i; 379 } 380 381 static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) 382 { 383 struct rb_node *nd; 384 385 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 386 struct vm_area_struct *vma; 387 vma = rb_entry(nd, struct vm_area_struct, vm_rb); 388 VM_BUG_ON_VMA(vma != ignore && 389 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), 390 vma); 391 } 392 } 393 394 static void validate_mm(struct mm_struct *mm) 395 { 396 int bug = 0; 397 int i = 0; 398 unsigned long highest_address = 0; 399 struct vm_area_struct *vma = mm->mmap; 400 401 while (vma) { 402 struct anon_vma *anon_vma = vma->anon_vma; 403 struct anon_vma_chain *avc; 404 405 if (anon_vma) { 406 anon_vma_lock_read(anon_vma); 407 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 408 anon_vma_interval_tree_verify(avc); 409 anon_vma_unlock_read(anon_vma); 410 } 411 412 highest_address = vm_end_gap(vma); 413 vma = vma->vm_next; 414 i++; 415 } 416 if (i != mm->map_count) { 417 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); 418 bug = 1; 419 } 420 if (highest_address != mm->highest_vm_end) { 421 pr_emerg("mm->highest_vm_end %lx, found %lx\n", 422 mm->highest_vm_end, highest_address); 423 bug = 1; 424 } 425 i = browse_rb(mm); 426 if (i != mm->map_count) { 427 if (i != -1) 428 pr_emerg("map_count %d rb %d\n", mm->map_count, i); 429 bug = 1; 430 } 431 VM_BUG_ON_MM(bug, mm); 432 } 433 #else 434 #define validate_mm_rb(root, ignore) do { } while (0) 435 #define validate_mm(mm) do { } while (0) 436 #endif 437 438 RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks, 439 struct vm_area_struct, vm_rb, 440 unsigned long, rb_subtree_gap, vma_compute_gap) 441 442 /* 443 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or 444 * vma->vm_prev->vm_end values changed, without modifying the vma's position 445 * in the rbtree. 446 */ 447 static void vma_gap_update(struct vm_area_struct *vma) 448 { 449 /* 450 * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created 451 * a callback function that does exactly what we want. 452 */ 453 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); 454 } 455 456 static inline void vma_rb_insert(struct vm_area_struct *vma, 457 struct rb_root *root) 458 { 459 /* All rb_subtree_gap values must be consistent prior to insertion */ 460 validate_mm_rb(root, NULL); 461 462 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 463 } 464 465 static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) 466 { 467 /* 468 * Note rb_erase_augmented is a fairly large inline function, 469 * so make sure we instantiate it only once with our desired 470 * augmented rbtree callbacks. 471 */ 472 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 473 } 474 475 static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, 476 struct rb_root *root, 477 struct vm_area_struct *ignore) 478 { 479 /* 480 * All rb_subtree_gap values must be consistent prior to erase, 481 * with the possible exception of 482 * 483 * a. the "next" vma being erased if next->vm_start was reduced in 484 * __vma_adjust() -> __vma_unlink() 485 * b. the vma being erased in detach_vmas_to_be_unmapped() -> 486 * vma_rb_erase() 487 */ 488 validate_mm_rb(root, ignore); 489 490 __vma_rb_erase(vma, root); 491 } 492 493 static __always_inline void vma_rb_erase(struct vm_area_struct *vma, 494 struct rb_root *root) 495 { 496 vma_rb_erase_ignore(vma, root, vma); 497 } 498 499 /* 500 * vma has some anon_vma assigned, and is already inserted on that 501 * anon_vma's interval trees. 502 * 503 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 504 * vma must be removed from the anon_vma's interval trees using 505 * anon_vma_interval_tree_pre_update_vma(). 506 * 507 * After the update, the vma will be reinserted using 508 * anon_vma_interval_tree_post_update_vma(). 509 * 510 * The entire update must be protected by exclusive mmap_lock and by 511 * the root anon_vma's mutex. 512 */ 513 static inline void 514 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 515 { 516 struct anon_vma_chain *avc; 517 518 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 519 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 520 } 521 522 static inline void 523 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 524 { 525 struct anon_vma_chain *avc; 526 527 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 528 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 529 } 530 531 static int find_vma_links(struct mm_struct *mm, unsigned long addr, 532 unsigned long end, struct vm_area_struct **pprev, 533 struct rb_node ***rb_link, struct rb_node **rb_parent) 534 { 535 struct rb_node **__rb_link, *__rb_parent, *rb_prev; 536 537 __rb_link = &mm->mm_rb.rb_node; 538 rb_prev = __rb_parent = NULL; 539 540 while (*__rb_link) { 541 struct vm_area_struct *vma_tmp; 542 543 __rb_parent = *__rb_link; 544 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); 545 546 if (vma_tmp->vm_end > addr) { 547 /* Fail if an existing vma overlaps the area */ 548 if (vma_tmp->vm_start < end) 549 return -ENOMEM; 550 __rb_link = &__rb_parent->rb_left; 551 } else { 552 rb_prev = __rb_parent; 553 __rb_link = &__rb_parent->rb_right; 554 } 555 } 556 557 *pprev = NULL; 558 if (rb_prev) 559 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 560 *rb_link = __rb_link; 561 *rb_parent = __rb_parent; 562 return 0; 563 } 564 565 /* 566 * vma_next() - Get the next VMA. 567 * @mm: The mm_struct. 568 * @vma: The current vma. 569 * 570 * If @vma is NULL, return the first vma in the mm. 571 * 572 * Returns: The next VMA after @vma. 573 */ 574 static inline struct vm_area_struct *vma_next(struct mm_struct *mm, 575 struct vm_area_struct *vma) 576 { 577 if (!vma) 578 return mm->mmap; 579 580 return vma->vm_next; 581 } 582 583 /* 584 * munmap_vma_range() - munmap VMAs that overlap a range. 585 * @mm: The mm struct 586 * @start: The start of the range. 587 * @len: The length of the range. 588 * @pprev: pointer to the pointer that will be set to previous vm_area_struct 589 * @rb_link: the rb_node 590 * @rb_parent: the parent rb_node 591 * 592 * Find all the vm_area_struct that overlap from @start to 593 * @end and munmap them. Set @pprev to the previous vm_area_struct. 594 * 595 * Returns: -ENOMEM on munmap failure or 0 on success. 596 */ 597 static inline int 598 munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, 599 struct vm_area_struct **pprev, struct rb_node ***link, 600 struct rb_node **parent, struct list_head *uf) 601 { 602 603 while (find_vma_links(mm, start, start + len, pprev, link, parent)) 604 if (do_munmap(mm, start, len, uf)) 605 return -ENOMEM; 606 607 return 0; 608 } 609 static unsigned long count_vma_pages_range(struct mm_struct *mm, 610 unsigned long addr, unsigned long end) 611 { 612 unsigned long nr_pages = 0; 613 struct vm_area_struct *vma; 614 615 /* Find first overlapping mapping */ 616 vma = find_vma_intersection(mm, addr, end); 617 if (!vma) 618 return 0; 619 620 nr_pages = (min(end, vma->vm_end) - 621 max(addr, vma->vm_start)) >> PAGE_SHIFT; 622 623 /* Iterate over the rest of the overlaps */ 624 for (vma = vma->vm_next; vma; vma = vma->vm_next) { 625 unsigned long overlap_len; 626 627 if (vma->vm_start > end) 628 break; 629 630 overlap_len = min(end, vma->vm_end) - vma->vm_start; 631 nr_pages += overlap_len >> PAGE_SHIFT; 632 } 633 634 return nr_pages; 635 } 636 637 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 638 struct rb_node **rb_link, struct rb_node *rb_parent) 639 { 640 /* Update tracking information for the gap following the new vma. */ 641 if (vma->vm_next) 642 vma_gap_update(vma->vm_next); 643 else 644 mm->highest_vm_end = vm_end_gap(vma); 645 646 /* 647 * vma->vm_prev wasn't known when we followed the rbtree to find the 648 * correct insertion point for that vma. As a result, we could not 649 * update the vma vm_rb parents rb_subtree_gap values on the way down. 650 * So, we first insert the vma with a zero rb_subtree_gap value 651 * (to be consistent with what we did on the way down), and then 652 * immediately update the gap to the correct value. Finally we 653 * rebalance the rbtree after all augmented values have been set. 654 */ 655 rb_link_node(&vma->vm_rb, rb_parent, rb_link); 656 vma->rb_subtree_gap = 0; 657 vma_gap_update(vma); 658 vma_rb_insert(vma, &mm->mm_rb); 659 } 660 661 static void __vma_link_file(struct vm_area_struct *vma) 662 { 663 struct file *file; 664 665 file = vma->vm_file; 666 if (file) { 667 struct address_space *mapping = file->f_mapping; 668 669 if (vma->vm_flags & VM_DENYWRITE) 670 put_write_access(file_inode(file)); 671 if (vma->vm_flags & VM_SHARED) 672 mapping_allow_writable(mapping); 673 674 flush_dcache_mmap_lock(mapping); 675 vma_interval_tree_insert(vma, &mapping->i_mmap); 676 flush_dcache_mmap_unlock(mapping); 677 } 678 } 679 680 static void 681 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 682 struct vm_area_struct *prev, struct rb_node **rb_link, 683 struct rb_node *rb_parent) 684 { 685 __vma_link_list(mm, vma, prev); 686 __vma_link_rb(mm, vma, rb_link, rb_parent); 687 } 688 689 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 690 struct vm_area_struct *prev, struct rb_node **rb_link, 691 struct rb_node *rb_parent) 692 { 693 struct address_space *mapping = NULL; 694 695 if (vma->vm_file) { 696 mapping = vma->vm_file->f_mapping; 697 i_mmap_lock_write(mapping); 698 } 699 700 __vma_link(mm, vma, prev, rb_link, rb_parent); 701 __vma_link_file(vma); 702 703 if (mapping) 704 i_mmap_unlock_write(mapping); 705 706 mm->map_count++; 707 validate_mm(mm); 708 } 709 710 /* 711 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the 712 * mm's list and rbtree. It has already been inserted into the interval tree. 713 */ 714 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 715 { 716 struct vm_area_struct *prev; 717 struct rb_node **rb_link, *rb_parent; 718 719 if (find_vma_links(mm, vma->vm_start, vma->vm_end, 720 &prev, &rb_link, &rb_parent)) 721 BUG(); 722 __vma_link(mm, vma, prev, rb_link, rb_parent); 723 mm->map_count++; 724 } 725 726 static __always_inline void __vma_unlink(struct mm_struct *mm, 727 struct vm_area_struct *vma, 728 struct vm_area_struct *ignore) 729 { 730 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); 731 __vma_unlink_list(mm, vma); 732 /* Kill the cache */ 733 vmacache_invalidate(mm); 734 } 735 736 /* 737 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that 738 * is already present in an i_mmap tree without adjusting the tree. 739 * The following helper function should be used when such adjustments 740 * are necessary. The "insert" vma (if any) is to be inserted 741 * before we drop the necessary locks. 742 */ 743 int __vma_adjust(struct vm_area_struct *vma, unsigned long start, 744 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, 745 struct vm_area_struct *expand) 746 { 747 struct mm_struct *mm = vma->vm_mm; 748 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; 749 struct address_space *mapping = NULL; 750 struct rb_root_cached *root = NULL; 751 struct anon_vma *anon_vma = NULL; 752 struct file *file = vma->vm_file; 753 bool start_changed = false, end_changed = false; 754 long adjust_next = 0; 755 int remove_next = 0; 756 757 if (next && !insert) { 758 struct vm_area_struct *exporter = NULL, *importer = NULL; 759 760 if (end >= next->vm_end) { 761 /* 762 * vma expands, overlapping all the next, and 763 * perhaps the one after too (mprotect case 6). 764 * The only other cases that gets here are 765 * case 1, case 7 and case 8. 766 */ 767 if (next == expand) { 768 /* 769 * The only case where we don't expand "vma" 770 * and we expand "next" instead is case 8. 771 */ 772 VM_WARN_ON(end != next->vm_end); 773 /* 774 * remove_next == 3 means we're 775 * removing "vma" and that to do so we 776 * swapped "vma" and "next". 777 */ 778 remove_next = 3; 779 VM_WARN_ON(file != next->vm_file); 780 swap(vma, next); 781 } else { 782 VM_WARN_ON(expand != vma); 783 /* 784 * case 1, 6, 7, remove_next == 2 is case 6, 785 * remove_next == 1 is case 1 or 7. 786 */ 787 remove_next = 1 + (end > next->vm_end); 788 VM_WARN_ON(remove_next == 2 && 789 end != next->vm_next->vm_end); 790 /* trim end to next, for case 6 first pass */ 791 end = next->vm_end; 792 } 793 794 exporter = next; 795 importer = vma; 796 797 /* 798 * If next doesn't have anon_vma, import from vma after 799 * next, if the vma overlaps with it. 800 */ 801 if (remove_next == 2 && !next->anon_vma) 802 exporter = next->vm_next; 803 804 } else if (end > next->vm_start) { 805 /* 806 * vma expands, overlapping part of the next: 807 * mprotect case 5 shifting the boundary up. 808 */ 809 adjust_next = (end - next->vm_start); 810 exporter = next; 811 importer = vma; 812 VM_WARN_ON(expand != importer); 813 } else if (end < vma->vm_end) { 814 /* 815 * vma shrinks, and !insert tells it's not 816 * split_vma inserting another: so it must be 817 * mprotect case 4 shifting the boundary down. 818 */ 819 adjust_next = -(vma->vm_end - end); 820 exporter = vma; 821 importer = next; 822 VM_WARN_ON(expand != importer); 823 } 824 825 /* 826 * Easily overlooked: when mprotect shifts the boundary, 827 * make sure the expanding vma has anon_vma set if the 828 * shrinking vma had, to cover any anon pages imported. 829 */ 830 if (exporter && exporter->anon_vma && !importer->anon_vma) { 831 int error; 832 833 importer->anon_vma = exporter->anon_vma; 834 error = anon_vma_clone(importer, exporter); 835 if (error) 836 return error; 837 } 838 } 839 again: 840 vma_adjust_trans_huge(orig_vma, start, end, adjust_next); 841 842 if (file) { 843 mapping = file->f_mapping; 844 root = &mapping->i_mmap; 845 uprobe_munmap(vma, vma->vm_start, vma->vm_end); 846 847 if (adjust_next) 848 uprobe_munmap(next, next->vm_start, next->vm_end); 849 850 i_mmap_lock_write(mapping); 851 if (insert) { 852 /* 853 * Put into interval tree now, so instantiated pages 854 * are visible to arm/parisc __flush_dcache_page 855 * throughout; but we cannot insert into address 856 * space until vma start or end is updated. 857 */ 858 __vma_link_file(insert); 859 } 860 } 861 862 anon_vma = vma->anon_vma; 863 if (!anon_vma && adjust_next) 864 anon_vma = next->anon_vma; 865 if (anon_vma) { 866 VM_WARN_ON(adjust_next && next->anon_vma && 867 anon_vma != next->anon_vma); 868 anon_vma_lock_write(anon_vma); 869 anon_vma_interval_tree_pre_update_vma(vma); 870 if (adjust_next) 871 anon_vma_interval_tree_pre_update_vma(next); 872 } 873 874 if (file) { 875 flush_dcache_mmap_lock(mapping); 876 vma_interval_tree_remove(vma, root); 877 if (adjust_next) 878 vma_interval_tree_remove(next, root); 879 } 880 881 if (start != vma->vm_start) { 882 vma->vm_start = start; 883 start_changed = true; 884 } 885 if (end != vma->vm_end) { 886 vma->vm_end = end; 887 end_changed = true; 888 } 889 vma->vm_pgoff = pgoff; 890 if (adjust_next) { 891 next->vm_start += adjust_next; 892 next->vm_pgoff += adjust_next >> PAGE_SHIFT; 893 } 894 895 if (file) { 896 if (adjust_next) 897 vma_interval_tree_insert(next, root); 898 vma_interval_tree_insert(vma, root); 899 flush_dcache_mmap_unlock(mapping); 900 } 901 902 if (remove_next) { 903 /* 904 * vma_merge has merged next into vma, and needs 905 * us to remove next before dropping the locks. 906 */ 907 if (remove_next != 3) 908 __vma_unlink(mm, next, next); 909 else 910 /* 911 * vma is not before next if they've been 912 * swapped. 913 * 914 * pre-swap() next->vm_start was reduced so 915 * tell validate_mm_rb to ignore pre-swap() 916 * "next" (which is stored in post-swap() 917 * "vma"). 918 */ 919 __vma_unlink(mm, next, vma); 920 if (file) 921 __remove_shared_vm_struct(next, file, mapping); 922 } else if (insert) { 923 /* 924 * split_vma has split insert from vma, and needs 925 * us to insert it before dropping the locks 926 * (it may either follow vma or precede it). 927 */ 928 __insert_vm_struct(mm, insert); 929 } else { 930 if (start_changed) 931 vma_gap_update(vma); 932 if (end_changed) { 933 if (!next) 934 mm->highest_vm_end = vm_end_gap(vma); 935 else if (!adjust_next) 936 vma_gap_update(next); 937 } 938 } 939 940 if (anon_vma) { 941 anon_vma_interval_tree_post_update_vma(vma); 942 if (adjust_next) 943 anon_vma_interval_tree_post_update_vma(next); 944 anon_vma_unlock_write(anon_vma); 945 } 946 947 if (file) { 948 i_mmap_unlock_write(mapping); 949 uprobe_mmap(vma); 950 951 if (adjust_next) 952 uprobe_mmap(next); 953 } 954 955 if (remove_next) { 956 if (file) { 957 uprobe_munmap(next, next->vm_start, next->vm_end); 958 fput(file); 959 } 960 if (next->anon_vma) 961 anon_vma_merge(vma, next); 962 mm->map_count--; 963 mpol_put(vma_policy(next)); 964 vm_area_free(next); 965 /* 966 * In mprotect's case 6 (see comments on vma_merge), 967 * we must remove another next too. It would clutter 968 * up the code too much to do both in one go. 969 */ 970 if (remove_next != 3) { 971 /* 972 * If "next" was removed and vma->vm_end was 973 * expanded (up) over it, in turn 974 * "next->vm_prev->vm_end" changed and the 975 * "vma->vm_next" gap must be updated. 976 */ 977 next = vma->vm_next; 978 } else { 979 /* 980 * For the scope of the comment "next" and 981 * "vma" considered pre-swap(): if "vma" was 982 * removed, next->vm_start was expanded (down) 983 * over it and the "next" gap must be updated. 984 * Because of the swap() the post-swap() "vma" 985 * actually points to pre-swap() "next" 986 * (post-swap() "next" as opposed is now a 987 * dangling pointer). 988 */ 989 next = vma; 990 } 991 if (remove_next == 2) { 992 remove_next = 1; 993 end = next->vm_end; 994 goto again; 995 } 996 else if (next) 997 vma_gap_update(next); 998 else { 999 /* 1000 * If remove_next == 2 we obviously can't 1001 * reach this path. 1002 * 1003 * If remove_next == 3 we can't reach this 1004 * path because pre-swap() next is always not 1005 * NULL. pre-swap() "next" is not being 1006 * removed and its next->vm_end is not altered 1007 * (and furthermore "end" already matches 1008 * next->vm_end in remove_next == 3). 1009 * 1010 * We reach this only in the remove_next == 1 1011 * case if the "next" vma that was removed was 1012 * the highest vma of the mm. However in such 1013 * case next->vm_end == "end" and the extended 1014 * "vma" has vma->vm_end == next->vm_end so 1015 * mm->highest_vm_end doesn't need any update 1016 * in remove_next == 1 case. 1017 */ 1018 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); 1019 } 1020 } 1021 if (insert && file) 1022 uprobe_mmap(insert); 1023 1024 validate_mm(mm); 1025 1026 return 0; 1027 } 1028 1029 /* 1030 * If the vma has a ->close operation then the driver probably needs to release 1031 * per-vma resources, so we don't attempt to merge those. 1032 */ 1033 static inline int is_mergeable_vma(struct vm_area_struct *vma, 1034 struct file *file, unsigned long vm_flags, 1035 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 1036 { 1037 /* 1038 * VM_SOFTDIRTY should not prevent from VMA merging, if we 1039 * match the flags but dirty bit -- the caller should mark 1040 * merged VMA as dirty. If dirty bit won't be excluded from 1041 * comparison, we increase pressure on the memory system forcing 1042 * the kernel to generate new VMAs when old one could be 1043 * extended instead. 1044 */ 1045 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 1046 return 0; 1047 if (vma->vm_file != file) 1048 return 0; 1049 if (vma->vm_ops && vma->vm_ops->close) 1050 return 0; 1051 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) 1052 return 0; 1053 return 1; 1054 } 1055 1056 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, 1057 struct anon_vma *anon_vma2, 1058 struct vm_area_struct *vma) 1059 { 1060 /* 1061 * The list_is_singular() test is to avoid merging VMA cloned from 1062 * parents. This can improve scalability caused by anon_vma lock. 1063 */ 1064 if ((!anon_vma1 || !anon_vma2) && (!vma || 1065 list_is_singular(&vma->anon_vma_chain))) 1066 return 1; 1067 return anon_vma1 == anon_vma2; 1068 } 1069 1070 /* 1071 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 1072 * in front of (at a lower virtual address and file offset than) the vma. 1073 * 1074 * We cannot merge two vmas if they have differently assigned (non-NULL) 1075 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 1076 * 1077 * We don't check here for the merged mmap wrapping around the end of pagecache 1078 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 1079 * wrap, nor mmaps which cover the final page at index -1UL. 1080 */ 1081 static int 1082 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 1083 struct anon_vma *anon_vma, struct file *file, 1084 pgoff_t vm_pgoff, 1085 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 1086 { 1087 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && 1088 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 1089 if (vma->vm_pgoff == vm_pgoff) 1090 return 1; 1091 } 1092 return 0; 1093 } 1094 1095 /* 1096 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 1097 * beyond (at a higher virtual address and file offset than) the vma. 1098 * 1099 * We cannot merge two vmas if they have differently assigned (non-NULL) 1100 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 1101 */ 1102 static int 1103 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 1104 struct anon_vma *anon_vma, struct file *file, 1105 pgoff_t vm_pgoff, 1106 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 1107 { 1108 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && 1109 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 1110 pgoff_t vm_pglen; 1111 vm_pglen = vma_pages(vma); 1112 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 1113 return 1; 1114 } 1115 return 0; 1116 } 1117 1118 /* 1119 * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out 1120 * whether that can be merged with its predecessor or its successor. 1121 * Or both (it neatly fills a hole). 1122 * 1123 * In most cases - when called for mmap, brk or mremap - [addr,end) is 1124 * certain not to be mapped by the time vma_merge is called; but when 1125 * called for mprotect, it is certain to be already mapped (either at 1126 * an offset within prev, or at the start of next), and the flags of 1127 * this area are about to be changed to vm_flags - and the no-change 1128 * case has already been eliminated. 1129 * 1130 * The following mprotect cases have to be considered, where AAAA is 1131 * the area passed down from mprotect_fixup, never extending beyond one 1132 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: 1133 * 1134 * AAAA AAAA AAAA 1135 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN 1136 * cannot merge might become might become 1137 * PPNNNNNNNNNN PPPPPPPPPPNN 1138 * mmap, brk or case 4 below case 5 below 1139 * mremap move: 1140 * AAAA AAAA 1141 * PPPP NNNN PPPPNNNNXXXX 1142 * might become might become 1143 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 1144 * PPPPPPPPNNNN 2 or PPPPPPPPXXXX 7 or 1145 * PPPPNNNNNNNN 3 PPPPXXXXXXXX 8 1146 * 1147 * It is important for case 8 that the vma NNNN overlapping the 1148 * region AAAA is never going to extended over XXXX. Instead XXXX must 1149 * be extended in region AAAA and NNNN must be removed. This way in 1150 * all cases where vma_merge succeeds, the moment vma_adjust drops the 1151 * rmap_locks, the properties of the merged vma will be already 1152 * correct for the whole merged range. Some of those properties like 1153 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 1154 * be correct for the whole merged range immediately after the 1155 * rmap_locks are released. Otherwise if XXXX would be removed and 1156 * NNNN would be extended over the XXXX range, remove_migration_ptes 1157 * or other rmap walkers (if working on addresses beyond the "end" 1158 * parameter) may establish ptes with the wrong permissions of NNNN 1159 * instead of the right permissions of XXXX. 1160 */ 1161 struct vm_area_struct *vma_merge(struct mm_struct *mm, 1162 struct vm_area_struct *prev, unsigned long addr, 1163 unsigned long end, unsigned long vm_flags, 1164 struct anon_vma *anon_vma, struct file *file, 1165 pgoff_t pgoff, struct mempolicy *policy, 1166 struct vm_userfaultfd_ctx vm_userfaultfd_ctx) 1167 { 1168 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 1169 struct vm_area_struct *area, *next; 1170 int err; 1171 1172 /* 1173 * We later require that vma->vm_flags == vm_flags, 1174 * so this tests vma->vm_flags & VM_SPECIAL, too. 1175 */ 1176 if (vm_flags & VM_SPECIAL) 1177 return NULL; 1178 1179 next = vma_next(mm, prev); 1180 area = next; 1181 if (area && area->vm_end == end) /* cases 6, 7, 8 */ 1182 next = next->vm_next; 1183 1184 /* verify some invariant that must be enforced by the caller */ 1185 VM_WARN_ON(prev && addr <= prev->vm_start); 1186 VM_WARN_ON(area && end > area->vm_end); 1187 VM_WARN_ON(addr >= end); 1188 1189 /* 1190 * Can it merge with the predecessor? 1191 */ 1192 if (prev && prev->vm_end == addr && 1193 mpol_equal(vma_policy(prev), policy) && 1194 can_vma_merge_after(prev, vm_flags, 1195 anon_vma, file, pgoff, 1196 vm_userfaultfd_ctx)) { 1197 /* 1198 * OK, it can. Can we now merge in the successor as well? 1199 */ 1200 if (next && end == next->vm_start && 1201 mpol_equal(policy, vma_policy(next)) && 1202 can_vma_merge_before(next, vm_flags, 1203 anon_vma, file, 1204 pgoff+pglen, 1205 vm_userfaultfd_ctx) && 1206 is_mergeable_anon_vma(prev->anon_vma, 1207 next->anon_vma, NULL)) { 1208 /* cases 1, 6 */ 1209 err = __vma_adjust(prev, prev->vm_start, 1210 next->vm_end, prev->vm_pgoff, NULL, 1211 prev); 1212 } else /* cases 2, 5, 7 */ 1213 err = __vma_adjust(prev, prev->vm_start, 1214 end, prev->vm_pgoff, NULL, prev); 1215 if (err) 1216 return NULL; 1217 khugepaged_enter_vma_merge(prev, vm_flags); 1218 return prev; 1219 } 1220 1221 /* 1222 * Can this new request be merged in front of next? 1223 */ 1224 if (next && end == next->vm_start && 1225 mpol_equal(policy, vma_policy(next)) && 1226 can_vma_merge_before(next, vm_flags, 1227 anon_vma, file, pgoff+pglen, 1228 vm_userfaultfd_ctx)) { 1229 if (prev && addr < prev->vm_end) /* case 4 */ 1230 err = __vma_adjust(prev, prev->vm_start, 1231 addr, prev->vm_pgoff, NULL, next); 1232 else { /* cases 3, 8 */ 1233 err = __vma_adjust(area, addr, next->vm_end, 1234 next->vm_pgoff - pglen, NULL, next); 1235 /* 1236 * In case 3 area is already equal to next and 1237 * this is a noop, but in case 8 "area" has 1238 * been removed and next was expanded over it. 1239 */ 1240 area = next; 1241 } 1242 if (err) 1243 return NULL; 1244 khugepaged_enter_vma_merge(area, vm_flags); 1245 return area; 1246 } 1247 1248 return NULL; 1249 } 1250 1251 /* 1252 * Rough compatibility check to quickly see if it's even worth looking 1253 * at sharing an anon_vma. 1254 * 1255 * They need to have the same vm_file, and the flags can only differ 1256 * in things that mprotect may change. 1257 * 1258 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1259 * we can merge the two vma's. For example, we refuse to merge a vma if 1260 * there is a vm_ops->close() function, because that indicates that the 1261 * driver is doing some kind of reference counting. But that doesn't 1262 * really matter for the anon_vma sharing case. 1263 */ 1264 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1265 { 1266 return a->vm_end == b->vm_start && 1267 mpol_equal(vma_policy(a), vma_policy(b)) && 1268 a->vm_file == b->vm_file && 1269 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1270 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1271 } 1272 1273 /* 1274 * Do some basic sanity checking to see if we can re-use the anon_vma 1275 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1276 * the same as 'old', the other will be the new one that is trying 1277 * to share the anon_vma. 1278 * 1279 * NOTE! This runs with mm_sem held for reading, so it is possible that 1280 * the anon_vma of 'old' is concurrently in the process of being set up 1281 * by another page fault trying to merge _that_. But that's ok: if it 1282 * is being set up, that automatically means that it will be a singleton 1283 * acceptable for merging, so we can do all of this optimistically. But 1284 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1285 * 1286 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1287 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1288 * is to return an anon_vma that is "complex" due to having gone through 1289 * a fork). 1290 * 1291 * We also make sure that the two vma's are compatible (adjacent, 1292 * and with the same memory policies). That's all stable, even with just 1293 * a read lock on the mm_sem. 1294 */ 1295 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1296 { 1297 if (anon_vma_compatible(a, b)) { 1298 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1299 1300 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1301 return anon_vma; 1302 } 1303 return NULL; 1304 } 1305 1306 /* 1307 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1308 * neighbouring vmas for a suitable anon_vma, before it goes off 1309 * to allocate a new anon_vma. It checks because a repetitive 1310 * sequence of mprotects and faults may otherwise lead to distinct 1311 * anon_vmas being allocated, preventing vma merge in subsequent 1312 * mprotect. 1313 */ 1314 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1315 { 1316 struct anon_vma *anon_vma = NULL; 1317 1318 /* Try next first. */ 1319 if (vma->vm_next) { 1320 anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next); 1321 if (anon_vma) 1322 return anon_vma; 1323 } 1324 1325 /* Try prev next. */ 1326 if (vma->vm_prev) 1327 anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma); 1328 1329 /* 1330 * We might reach here with anon_vma == NULL if we can't find 1331 * any reusable anon_vma. 1332 * There's no absolute need to look only at touching neighbours: 1333 * we could search further afield for "compatible" anon_vmas. 1334 * But it would probably just be a waste of time searching, 1335 * or lead to too many vmas hanging off the same anon_vma. 1336 * We're trying to allow mprotect remerging later on, 1337 * not trying to minimize memory used for anon_vmas. 1338 */ 1339 return anon_vma; 1340 } 1341 1342 /* 1343 * If a hint addr is less than mmap_min_addr change hint to be as 1344 * low as possible but still greater than mmap_min_addr 1345 */ 1346 static inline unsigned long round_hint_to_min(unsigned long hint) 1347 { 1348 hint &= PAGE_MASK; 1349 if (((void *)hint != NULL) && 1350 (hint < mmap_min_addr)) 1351 return PAGE_ALIGN(mmap_min_addr); 1352 return hint; 1353 } 1354 1355 static inline int mlock_future_check(struct mm_struct *mm, 1356 unsigned long flags, 1357 unsigned long len) 1358 { 1359 unsigned long locked, lock_limit; 1360 1361 /* mlock MCL_FUTURE? */ 1362 if (flags & VM_LOCKED) { 1363 locked = len >> PAGE_SHIFT; 1364 locked += mm->locked_vm; 1365 lock_limit = rlimit(RLIMIT_MEMLOCK); 1366 lock_limit >>= PAGE_SHIFT; 1367 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 1368 return -EAGAIN; 1369 } 1370 return 0; 1371 } 1372 1373 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 1374 { 1375 if (S_ISREG(inode->i_mode)) 1376 return MAX_LFS_FILESIZE; 1377 1378 if (S_ISBLK(inode->i_mode)) 1379 return MAX_LFS_FILESIZE; 1380 1381 if (S_ISSOCK(inode->i_mode)) 1382 return MAX_LFS_FILESIZE; 1383 1384 /* Special "we do even unsigned file positions" case */ 1385 if (file->f_mode & FMODE_UNSIGNED_OFFSET) 1386 return 0; 1387 1388 /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 1389 return ULONG_MAX; 1390 } 1391 1392 static inline bool file_mmap_ok(struct file *file, struct inode *inode, 1393 unsigned long pgoff, unsigned long len) 1394 { 1395 u64 maxsize = file_mmap_size_max(file, inode); 1396 1397 if (maxsize && len > maxsize) 1398 return false; 1399 maxsize -= len; 1400 if (pgoff > maxsize >> PAGE_SHIFT) 1401 return false; 1402 return true; 1403 } 1404 1405 /* 1406 * The caller must write-lock current->mm->mmap_lock. 1407 */ 1408 unsigned long do_mmap(struct file *file, unsigned long addr, 1409 unsigned long len, unsigned long prot, 1410 unsigned long flags, unsigned long pgoff, 1411 unsigned long *populate, struct list_head *uf) 1412 { 1413 struct mm_struct *mm = current->mm; 1414 vm_flags_t vm_flags; 1415 int pkey = 0; 1416 1417 *populate = 0; 1418 1419 if (!len) 1420 return -EINVAL; 1421 1422 /* 1423 * Does the application expect PROT_READ to imply PROT_EXEC? 1424 * 1425 * (the exception is when the underlying filesystem is noexec 1426 * mounted, in which case we dont add PROT_EXEC.) 1427 */ 1428 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 1429 if (!(file && path_noexec(&file->f_path))) 1430 prot |= PROT_EXEC; 1431 1432 /* force arch specific MAP_FIXED handling in get_unmapped_area */ 1433 if (flags & MAP_FIXED_NOREPLACE) 1434 flags |= MAP_FIXED; 1435 1436 if (!(flags & MAP_FIXED)) 1437 addr = round_hint_to_min(addr); 1438 1439 /* Careful about overflows.. */ 1440 len = PAGE_ALIGN(len); 1441 if (!len) 1442 return -ENOMEM; 1443 1444 /* offset overflow? */ 1445 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 1446 return -EOVERFLOW; 1447 1448 /* Too many mappings? */ 1449 if (mm->map_count > sysctl_max_map_count) 1450 return -ENOMEM; 1451 1452 /* Obtain the address to map to. we verify (or select) it and ensure 1453 * that it represents a valid section of the address space. 1454 */ 1455 addr = get_unmapped_area(file, addr, len, pgoff, flags); 1456 if (IS_ERR_VALUE(addr)) 1457 return addr; 1458 1459 if (flags & MAP_FIXED_NOREPLACE) { 1460 if (find_vma_intersection(mm, addr, addr + len)) 1461 return -EEXIST; 1462 } 1463 1464 if (prot == PROT_EXEC) { 1465 pkey = execute_only_pkey(mm); 1466 if (pkey < 0) 1467 pkey = 0; 1468 } 1469 1470 /* Do simple checking here so the lower-level routines won't have 1471 * to. we assume access permissions have been handled by the open 1472 * of the memory object, so we don't do any here. 1473 */ 1474 vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 1475 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1476 1477 if (flags & MAP_LOCKED) 1478 if (!can_do_mlock()) 1479 return -EPERM; 1480 1481 if (mlock_future_check(mm, vm_flags, len)) 1482 return -EAGAIN; 1483 1484 if (file) { 1485 struct inode *inode = file_inode(file); 1486 unsigned long flags_mask; 1487 1488 if (!file_mmap_ok(file, inode, pgoff, len)) 1489 return -EOVERFLOW; 1490 1491 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; 1492 1493 switch (flags & MAP_TYPE) { 1494 case MAP_SHARED: 1495 /* 1496 * Force use of MAP_SHARED_VALIDATE with non-legacy 1497 * flags. E.g. MAP_SYNC is dangerous to use with 1498 * MAP_SHARED as you don't know which consistency model 1499 * you will get. We silently ignore unsupported flags 1500 * with MAP_SHARED to preserve backward compatibility. 1501 */ 1502 flags &= LEGACY_MAP_MASK; 1503 fallthrough; 1504 case MAP_SHARED_VALIDATE: 1505 if (flags & ~flags_mask) 1506 return -EOPNOTSUPP; 1507 if (prot & PROT_WRITE) { 1508 if (!(file->f_mode & FMODE_WRITE)) 1509 return -EACCES; 1510 if (IS_SWAPFILE(file->f_mapping->host)) 1511 return -ETXTBSY; 1512 } 1513 1514 /* 1515 * Make sure we don't allow writing to an append-only 1516 * file.. 1517 */ 1518 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 1519 return -EACCES; 1520 1521 /* 1522 * Make sure there are no mandatory locks on the file. 1523 */ 1524 if (locks_verify_locked(file)) 1525 return -EAGAIN; 1526 1527 vm_flags |= VM_SHARED | VM_MAYSHARE; 1528 if (!(file->f_mode & FMODE_WRITE)) 1529 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 1530 fallthrough; 1531 case MAP_PRIVATE: 1532 if (!(file->f_mode & FMODE_READ)) 1533 return -EACCES; 1534 if (path_noexec(&file->f_path)) { 1535 if (vm_flags & VM_EXEC) 1536 return -EPERM; 1537 vm_flags &= ~VM_MAYEXEC; 1538 } 1539 1540 if (!file->f_op->mmap) 1541 return -ENODEV; 1542 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1543 return -EINVAL; 1544 break; 1545 1546 default: 1547 return -EINVAL; 1548 } 1549 } else { 1550 switch (flags & MAP_TYPE) { 1551 case MAP_SHARED: 1552 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1553 return -EINVAL; 1554 /* 1555 * Ignore pgoff. 1556 */ 1557 pgoff = 0; 1558 vm_flags |= VM_SHARED | VM_MAYSHARE; 1559 break; 1560 case MAP_PRIVATE: 1561 /* 1562 * Set pgoff according to addr for anon_vma. 1563 */ 1564 pgoff = addr >> PAGE_SHIFT; 1565 break; 1566 default: 1567 return -EINVAL; 1568 } 1569 } 1570 1571 /* 1572 * Set 'VM_NORESERVE' if we should not account for the 1573 * memory use of this mapping. 1574 */ 1575 if (flags & MAP_NORESERVE) { 1576 /* We honor MAP_NORESERVE if allowed to overcommit */ 1577 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1578 vm_flags |= VM_NORESERVE; 1579 1580 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 1581 if (file && is_file_hugepages(file)) 1582 vm_flags |= VM_NORESERVE; 1583 } 1584 1585 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 1586 if (!IS_ERR_VALUE(addr) && 1587 ((vm_flags & VM_LOCKED) || 1588 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 1589 *populate = len; 1590 return addr; 1591 } 1592 1593 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 1594 unsigned long prot, unsigned long flags, 1595 unsigned long fd, unsigned long pgoff) 1596 { 1597 struct file *file = NULL; 1598 unsigned long retval; 1599 1600 if (!(flags & MAP_ANONYMOUS)) { 1601 audit_mmap_fd(fd, flags); 1602 file = fget(fd); 1603 if (!file) 1604 return -EBADF; 1605 if (is_file_hugepages(file)) { 1606 len = ALIGN(len, huge_page_size(hstate_file(file))); 1607 } else if (unlikely(flags & MAP_HUGETLB)) { 1608 retval = -EINVAL; 1609 goto out_fput; 1610 } 1611 } else if (flags & MAP_HUGETLB) { 1612 struct ucounts *ucounts = NULL; 1613 struct hstate *hs; 1614 1615 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1616 if (!hs) 1617 return -EINVAL; 1618 1619 len = ALIGN(len, huge_page_size(hs)); 1620 /* 1621 * VM_NORESERVE is used because the reservations will be 1622 * taken when vm_ops->mmap() is called 1623 * A dummy user value is used because we are not locking 1624 * memory so no accounting is necessary 1625 */ 1626 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 1627 VM_NORESERVE, 1628 &ucounts, HUGETLB_ANONHUGE_INODE, 1629 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1630 if (IS_ERR(file)) 1631 return PTR_ERR(file); 1632 } 1633 1634 flags &= ~MAP_DENYWRITE; 1635 1636 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1637 out_fput: 1638 if (file) 1639 fput(file); 1640 return retval; 1641 } 1642 1643 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1644 unsigned long, prot, unsigned long, flags, 1645 unsigned long, fd, unsigned long, pgoff) 1646 { 1647 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 1648 } 1649 1650 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1651 struct mmap_arg_struct { 1652 unsigned long addr; 1653 unsigned long len; 1654 unsigned long prot; 1655 unsigned long flags; 1656 unsigned long fd; 1657 unsigned long offset; 1658 }; 1659 1660 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1661 { 1662 struct mmap_arg_struct a; 1663 1664 if (copy_from_user(&a, arg, sizeof(a))) 1665 return -EFAULT; 1666 if (offset_in_page(a.offset)) 1667 return -EINVAL; 1668 1669 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1670 a.offset >> PAGE_SHIFT); 1671 } 1672 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1673 1674 /* 1675 * Some shared mappings will want the pages marked read-only 1676 * to track write events. If so, we'll downgrade vm_page_prot 1677 * to the private version (using protection_map[] without the 1678 * VM_SHARED bit). 1679 */ 1680 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1681 { 1682 vm_flags_t vm_flags = vma->vm_flags; 1683 const struct vm_operations_struct *vm_ops = vma->vm_ops; 1684 1685 /* If it was private or non-writable, the write bit is already clear */ 1686 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) 1687 return 0; 1688 1689 /* The backer wishes to know when pages are first written to? */ 1690 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) 1691 return 1; 1692 1693 /* The open routine did something to the protections that pgprot_modify 1694 * won't preserve? */ 1695 if (pgprot_val(vm_page_prot) != 1696 pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) 1697 return 0; 1698 1699 /* Do we need to track softdirty? */ 1700 if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY)) 1701 return 1; 1702 1703 /* Specialty mapping? */ 1704 if (vm_flags & VM_PFNMAP) 1705 return 0; 1706 1707 /* Can the mapping track the dirty pages? */ 1708 return vma->vm_file && vma->vm_file->f_mapping && 1709 mapping_can_writeback(vma->vm_file->f_mapping); 1710 } 1711 1712 /* 1713 * We account for memory if it's a private writeable mapping, 1714 * not hugepages and VM_NORESERVE wasn't set. 1715 */ 1716 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) 1717 { 1718 /* 1719 * hugetlb has its own accounting separate from the core VM 1720 * VM_HUGETLB may not be set yet so we cannot check for that flag. 1721 */ 1722 if (file && is_file_hugepages(file)) 1723 return 0; 1724 1725 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1726 } 1727 1728 unsigned long mmap_region(struct file *file, unsigned long addr, 1729 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 1730 struct list_head *uf) 1731 { 1732 struct mm_struct *mm = current->mm; 1733 struct vm_area_struct *vma, *prev, *merge; 1734 int error; 1735 struct rb_node **rb_link, *rb_parent; 1736 unsigned long charged = 0; 1737 1738 /* Check against address space limit. */ 1739 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { 1740 unsigned long nr_pages; 1741 1742 /* 1743 * MAP_FIXED may remove pages of mappings that intersects with 1744 * requested mapping. Account for the pages it would unmap. 1745 */ 1746 nr_pages = count_vma_pages_range(mm, addr, addr + len); 1747 1748 if (!may_expand_vm(mm, vm_flags, 1749 (len >> PAGE_SHIFT) - nr_pages)) 1750 return -ENOMEM; 1751 } 1752 1753 /* Clear old maps, set up prev, rb_link, rb_parent, and uf */ 1754 if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) 1755 return -ENOMEM; 1756 /* 1757 * Private writable mapping: check memory availability 1758 */ 1759 if (accountable_mapping(file, vm_flags)) { 1760 charged = len >> PAGE_SHIFT; 1761 if (security_vm_enough_memory_mm(mm, charged)) 1762 return -ENOMEM; 1763 vm_flags |= VM_ACCOUNT; 1764 } 1765 1766 /* 1767 * Can we just expand an old mapping? 1768 */ 1769 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, 1770 NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX); 1771 if (vma) 1772 goto out; 1773 1774 /* 1775 * Determine the object being mapped and call the appropriate 1776 * specific mapper. the address has already been validated, but 1777 * not unmapped, but the maps are removed from the list. 1778 */ 1779 vma = vm_area_alloc(mm); 1780 if (!vma) { 1781 error = -ENOMEM; 1782 goto unacct_error; 1783 } 1784 1785 vma->vm_start = addr; 1786 vma->vm_end = addr + len; 1787 vma->vm_flags = vm_flags; 1788 vma->vm_page_prot = vm_get_page_prot(vm_flags); 1789 vma->vm_pgoff = pgoff; 1790 1791 if (file) { 1792 if (vm_flags & VM_DENYWRITE) { 1793 error = deny_write_access(file); 1794 if (error) 1795 goto free_vma; 1796 } 1797 if (vm_flags & VM_SHARED) { 1798 error = mapping_map_writable(file->f_mapping); 1799 if (error) 1800 goto allow_write_and_free_vma; 1801 } 1802 1803 /* ->mmap() can change vma->vm_file, but must guarantee that 1804 * vma_link() below can deny write-access if VM_DENYWRITE is set 1805 * and map writably if VM_SHARED is set. This usually means the 1806 * new file must not have been exposed to user-space, yet. 1807 */ 1808 vma->vm_file = get_file(file); 1809 error = call_mmap(file, vma); 1810 if (error) 1811 goto unmap_and_free_vma; 1812 1813 /* Can addr have changed?? 1814 * 1815 * Answer: Yes, several device drivers can do it in their 1816 * f_op->mmap method. -DaveM 1817 * Bug: If addr is changed, prev, rb_link, rb_parent should 1818 * be updated for vma_link() 1819 */ 1820 WARN_ON_ONCE(addr != vma->vm_start); 1821 1822 addr = vma->vm_start; 1823 1824 /* If vm_flags changed after call_mmap(), we should try merge vma again 1825 * as we may succeed this time. 1826 */ 1827 if (unlikely(vm_flags != vma->vm_flags && prev)) { 1828 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, 1829 NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX); 1830 if (merge) { 1831 /* ->mmap() can change vma->vm_file and fput the original file. So 1832 * fput the vma->vm_file here or we would add an extra fput for file 1833 * and cause general protection fault ultimately. 1834 */ 1835 fput(vma->vm_file); 1836 vm_area_free(vma); 1837 vma = merge; 1838 /* Update vm_flags to pick up the change. */ 1839 vm_flags = vma->vm_flags; 1840 goto unmap_writable; 1841 } 1842 } 1843 1844 vm_flags = vma->vm_flags; 1845 } else if (vm_flags & VM_SHARED) { 1846 error = shmem_zero_setup(vma); 1847 if (error) 1848 goto free_vma; 1849 } else { 1850 vma_set_anonymous(vma); 1851 } 1852 1853 /* Allow architectures to sanity-check the vm_flags */ 1854 if (!arch_validate_flags(vma->vm_flags)) { 1855 error = -EINVAL; 1856 if (file) 1857 goto unmap_and_free_vma; 1858 else 1859 goto free_vma; 1860 } 1861 1862 vma_link(mm, vma, prev, rb_link, rb_parent); 1863 /* Once vma denies write, undo our temporary denial count */ 1864 if (file) { 1865 unmap_writable: 1866 if (vm_flags & VM_SHARED) 1867 mapping_unmap_writable(file->f_mapping); 1868 if (vm_flags & VM_DENYWRITE) 1869 allow_write_access(file); 1870 } 1871 file = vma->vm_file; 1872 out: 1873 perf_event_mmap(vma); 1874 1875 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); 1876 if (vm_flags & VM_LOCKED) { 1877 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 1878 is_vm_hugetlb_page(vma) || 1879 vma == get_gate_vma(current->mm)) 1880 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 1881 else 1882 mm->locked_vm += (len >> PAGE_SHIFT); 1883 } 1884 1885 if (file) 1886 uprobe_mmap(vma); 1887 1888 /* 1889 * New (or expanded) vma always get soft dirty status. 1890 * Otherwise user-space soft-dirty page tracker won't 1891 * be able to distinguish situation when vma area unmapped, 1892 * then new mapped in-place (which must be aimed as 1893 * a completely new data area). 1894 */ 1895 vma->vm_flags |= VM_SOFTDIRTY; 1896 1897 vma_set_page_prot(vma); 1898 1899 return addr; 1900 1901 unmap_and_free_vma: 1902 fput(vma->vm_file); 1903 vma->vm_file = NULL; 1904 1905 /* Undo any partial mapping done by a device driver. */ 1906 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); 1907 charged = 0; 1908 if (vm_flags & VM_SHARED) 1909 mapping_unmap_writable(file->f_mapping); 1910 allow_write_and_free_vma: 1911 if (vm_flags & VM_DENYWRITE) 1912 allow_write_access(file); 1913 free_vma: 1914 vm_area_free(vma); 1915 unacct_error: 1916 if (charged) 1917 vm_unacct_memory(charged); 1918 return error; 1919 } 1920 1921 static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 1922 { 1923 /* 1924 * We implement the search by looking for an rbtree node that 1925 * immediately follows a suitable gap. That is, 1926 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; 1927 * - gap_end = vma->vm_start >= info->low_limit + length; 1928 * - gap_end - gap_start >= length 1929 */ 1930 1931 struct mm_struct *mm = current->mm; 1932 struct vm_area_struct *vma; 1933 unsigned long length, low_limit, high_limit, gap_start, gap_end; 1934 1935 /* Adjust search length to account for worst case alignment overhead */ 1936 length = info->length + info->align_mask; 1937 if (length < info->length) 1938 return -ENOMEM; 1939 1940 /* Adjust search limits by the desired length */ 1941 if (info->high_limit < length) 1942 return -ENOMEM; 1943 high_limit = info->high_limit - length; 1944 1945 if (info->low_limit > high_limit) 1946 return -ENOMEM; 1947 low_limit = info->low_limit + length; 1948 1949 /* Check if rbtree root looks promising */ 1950 if (RB_EMPTY_ROOT(&mm->mm_rb)) 1951 goto check_highest; 1952 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); 1953 if (vma->rb_subtree_gap < length) 1954 goto check_highest; 1955 1956 while (true) { 1957 /* Visit left subtree if it looks promising */ 1958 gap_end = vm_start_gap(vma); 1959 if (gap_end >= low_limit && vma->vm_rb.rb_left) { 1960 struct vm_area_struct *left = 1961 rb_entry(vma->vm_rb.rb_left, 1962 struct vm_area_struct, vm_rb); 1963 if (left->rb_subtree_gap >= length) { 1964 vma = left; 1965 continue; 1966 } 1967 } 1968 1969 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 1970 check_current: 1971 /* Check if current node has a suitable gap */ 1972 if (gap_start > high_limit) 1973 return -ENOMEM; 1974 if (gap_end >= low_limit && 1975 gap_end > gap_start && gap_end - gap_start >= length) 1976 goto found; 1977 1978 /* Visit right subtree if it looks promising */ 1979 if (vma->vm_rb.rb_right) { 1980 struct vm_area_struct *right = 1981 rb_entry(vma->vm_rb.rb_right, 1982 struct vm_area_struct, vm_rb); 1983 if (right->rb_subtree_gap >= length) { 1984 vma = right; 1985 continue; 1986 } 1987 } 1988 1989 /* Go back up the rbtree to find next candidate node */ 1990 while (true) { 1991 struct rb_node *prev = &vma->vm_rb; 1992 if (!rb_parent(prev)) 1993 goto check_highest; 1994 vma = rb_entry(rb_parent(prev), 1995 struct vm_area_struct, vm_rb); 1996 if (prev == vma->vm_rb.rb_left) { 1997 gap_start = vm_end_gap(vma->vm_prev); 1998 gap_end = vm_start_gap(vma); 1999 goto check_current; 2000 } 2001 } 2002 } 2003 2004 check_highest: 2005 /* Check highest gap, which does not precede any rbtree node */ 2006 gap_start = mm->highest_vm_end; 2007 gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ 2008 if (gap_start > high_limit) 2009 return -ENOMEM; 2010 2011 found: 2012 /* We found a suitable gap. Clip it with the original low_limit. */ 2013 if (gap_start < info->low_limit) 2014 gap_start = info->low_limit; 2015 2016 /* Adjust gap address to the desired alignment */ 2017 gap_start += (info->align_offset - gap_start) & info->align_mask; 2018 2019 VM_BUG_ON(gap_start + info->length > info->high_limit); 2020 VM_BUG_ON(gap_start + info->length > gap_end); 2021 return gap_start; 2022 } 2023 2024 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 2025 { 2026 struct mm_struct *mm = current->mm; 2027 struct vm_area_struct *vma; 2028 unsigned long length, low_limit, high_limit, gap_start, gap_end; 2029 2030 /* Adjust search length to account for worst case alignment overhead */ 2031 length = info->length + info->align_mask; 2032 if (length < info->length) 2033 return -ENOMEM; 2034 2035 /* 2036 * Adjust search limits by the desired length. 2037 * See implementation comment at top of unmapped_area(). 2038 */ 2039 gap_end = info->high_limit; 2040 if (gap_end < length) 2041 return -ENOMEM; 2042 high_limit = gap_end - length; 2043 2044 if (info->low_limit > high_limit) 2045 return -ENOMEM; 2046 low_limit = info->low_limit + length; 2047 2048 /* Check highest gap, which does not precede any rbtree node */ 2049 gap_start = mm->highest_vm_end; 2050 if (gap_start <= high_limit) 2051 goto found_highest; 2052 2053 /* Check if rbtree root looks promising */ 2054 if (RB_EMPTY_ROOT(&mm->mm_rb)) 2055 return -ENOMEM; 2056 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); 2057 if (vma->rb_subtree_gap < length) 2058 return -ENOMEM; 2059 2060 while (true) { 2061 /* Visit right subtree if it looks promising */ 2062 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 2063 if (gap_start <= high_limit && vma->vm_rb.rb_right) { 2064 struct vm_area_struct *right = 2065 rb_entry(vma->vm_rb.rb_right, 2066 struct vm_area_struct, vm_rb); 2067 if (right->rb_subtree_gap >= length) { 2068 vma = right; 2069 continue; 2070 } 2071 } 2072 2073 check_current: 2074 /* Check if current node has a suitable gap */ 2075 gap_end = vm_start_gap(vma); 2076 if (gap_end < low_limit) 2077 return -ENOMEM; 2078 if (gap_start <= high_limit && 2079 gap_end > gap_start && gap_end - gap_start >= length) 2080 goto found; 2081 2082 /* Visit left subtree if it looks promising */ 2083 if (vma->vm_rb.rb_left) { 2084 struct vm_area_struct *left = 2085 rb_entry(vma->vm_rb.rb_left, 2086 struct vm_area_struct, vm_rb); 2087 if (left->rb_subtree_gap >= length) { 2088 vma = left; 2089 continue; 2090 } 2091 } 2092 2093 /* Go back up the rbtree to find next candidate node */ 2094 while (true) { 2095 struct rb_node *prev = &vma->vm_rb; 2096 if (!rb_parent(prev)) 2097 return -ENOMEM; 2098 vma = rb_entry(rb_parent(prev), 2099 struct vm_area_struct, vm_rb); 2100 if (prev == vma->vm_rb.rb_right) { 2101 gap_start = vma->vm_prev ? 2102 vm_end_gap(vma->vm_prev) : 0; 2103 goto check_current; 2104 } 2105 } 2106 } 2107 2108 found: 2109 /* We found a suitable gap. Clip it with the original high_limit. */ 2110 if (gap_end > info->high_limit) 2111 gap_end = info->high_limit; 2112 2113 found_highest: 2114 /* Compute highest gap address at the desired alignment */ 2115 gap_end -= info->length; 2116 gap_end -= (gap_end - info->align_offset) & info->align_mask; 2117 2118 VM_BUG_ON(gap_end < info->low_limit); 2119 VM_BUG_ON(gap_end < gap_start); 2120 return gap_end; 2121 } 2122 2123 /* 2124 * Search for an unmapped address range. 2125 * 2126 * We are looking for a range that: 2127 * - does not intersect with any VMA; 2128 * - is contained within the [low_limit, high_limit) interval; 2129 * - is at least the desired size. 2130 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 2131 */ 2132 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 2133 { 2134 unsigned long addr; 2135 2136 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 2137 addr = unmapped_area_topdown(info); 2138 else 2139 addr = unmapped_area(info); 2140 2141 trace_vm_unmapped_area(addr, info); 2142 return addr; 2143 } 2144 2145 #ifndef arch_get_mmap_end 2146 #define arch_get_mmap_end(addr) (TASK_SIZE) 2147 #endif 2148 2149 #ifndef arch_get_mmap_base 2150 #define arch_get_mmap_base(addr, base) (base) 2151 #endif 2152 2153 /* Get an address range which is currently unmapped. 2154 * For shmat() with addr=0. 2155 * 2156 * Ugly calling convention alert: 2157 * Return value with the low bits set means error value, 2158 * ie 2159 * if (ret & ~PAGE_MASK) 2160 * error = ret; 2161 * 2162 * This function "knows" that -ENOMEM has the bits set. 2163 */ 2164 #ifndef HAVE_ARCH_UNMAPPED_AREA 2165 unsigned long 2166 arch_get_unmapped_area(struct file *filp, unsigned long addr, 2167 unsigned long len, unsigned long pgoff, unsigned long flags) 2168 { 2169 struct mm_struct *mm = current->mm; 2170 struct vm_area_struct *vma, *prev; 2171 struct vm_unmapped_area_info info; 2172 const unsigned long mmap_end = arch_get_mmap_end(addr); 2173 2174 if (len > mmap_end - mmap_min_addr) 2175 return -ENOMEM; 2176 2177 if (flags & MAP_FIXED) 2178 return addr; 2179 2180 if (addr) { 2181 addr = PAGE_ALIGN(addr); 2182 vma = find_vma_prev(mm, addr, &prev); 2183 if (mmap_end - len >= addr && addr >= mmap_min_addr && 2184 (!vma || addr + len <= vm_start_gap(vma)) && 2185 (!prev || addr >= vm_end_gap(prev))) 2186 return addr; 2187 } 2188 2189 info.flags = 0; 2190 info.length = len; 2191 info.low_limit = mm->mmap_base; 2192 info.high_limit = mmap_end; 2193 info.align_mask = 0; 2194 info.align_offset = 0; 2195 return vm_unmapped_area(&info); 2196 } 2197 #endif 2198 2199 /* 2200 * This mmap-allocator allocates new areas top-down from below the 2201 * stack's low limit (the base): 2202 */ 2203 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 2204 unsigned long 2205 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 2206 unsigned long len, unsigned long pgoff, 2207 unsigned long flags) 2208 { 2209 struct vm_area_struct *vma, *prev; 2210 struct mm_struct *mm = current->mm; 2211 struct vm_unmapped_area_info info; 2212 const unsigned long mmap_end = arch_get_mmap_end(addr); 2213 2214 /* requested length too big for entire address space */ 2215 if (len > mmap_end - mmap_min_addr) 2216 return -ENOMEM; 2217 2218 if (flags & MAP_FIXED) 2219 return addr; 2220 2221 /* requesting a specific address */ 2222 if (addr) { 2223 addr = PAGE_ALIGN(addr); 2224 vma = find_vma_prev(mm, addr, &prev); 2225 if (mmap_end - len >= addr && addr >= mmap_min_addr && 2226 (!vma || addr + len <= vm_start_gap(vma)) && 2227 (!prev || addr >= vm_end_gap(prev))) 2228 return addr; 2229 } 2230 2231 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 2232 info.length = len; 2233 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 2234 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 2235 info.align_mask = 0; 2236 info.align_offset = 0; 2237 addr = vm_unmapped_area(&info); 2238 2239 /* 2240 * A failed mmap() very likely causes application failure, 2241 * so fall back to the bottom-up function here. This scenario 2242 * can happen with large stack limits and large mmap() 2243 * allocations. 2244 */ 2245 if (offset_in_page(addr)) { 2246 VM_BUG_ON(addr != -ENOMEM); 2247 info.flags = 0; 2248 info.low_limit = TASK_UNMAPPED_BASE; 2249 info.high_limit = mmap_end; 2250 addr = vm_unmapped_area(&info); 2251 } 2252 2253 return addr; 2254 } 2255 #endif 2256 2257 unsigned long 2258 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 2259 unsigned long pgoff, unsigned long flags) 2260 { 2261 unsigned long (*get_area)(struct file *, unsigned long, 2262 unsigned long, unsigned long, unsigned long); 2263 2264 unsigned long error = arch_mmap_check(addr, len, flags); 2265 if (error) 2266 return error; 2267 2268 /* Careful about overflows.. */ 2269 if (len > TASK_SIZE) 2270 return -ENOMEM; 2271 2272 get_area = current->mm->get_unmapped_area; 2273 if (file) { 2274 if (file->f_op->get_unmapped_area) 2275 get_area = file->f_op->get_unmapped_area; 2276 } else if (flags & MAP_SHARED) { 2277 /* 2278 * mmap_region() will call shmem_zero_setup() to create a file, 2279 * so use shmem's get_unmapped_area in case it can be huge. 2280 * do_mmap() will clear pgoff, so match alignment. 2281 */ 2282 pgoff = 0; 2283 get_area = shmem_get_unmapped_area; 2284 } 2285 2286 addr = get_area(file, addr, len, pgoff, flags); 2287 if (IS_ERR_VALUE(addr)) 2288 return addr; 2289 2290 if (addr > TASK_SIZE - len) 2291 return -ENOMEM; 2292 if (offset_in_page(addr)) 2293 return -EINVAL; 2294 2295 error = security_mmap_addr(addr); 2296 return error ? error : addr; 2297 } 2298 2299 EXPORT_SYMBOL(get_unmapped_area); 2300 2301 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2302 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 2303 { 2304 struct rb_node *rb_node; 2305 struct vm_area_struct *vma; 2306 2307 /* Check the cache first. */ 2308 vma = vmacache_find(mm, addr); 2309 if (likely(vma)) 2310 return vma; 2311 2312 rb_node = mm->mm_rb.rb_node; 2313 2314 while (rb_node) { 2315 struct vm_area_struct *tmp; 2316 2317 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); 2318 2319 if (tmp->vm_end > addr) { 2320 vma = tmp; 2321 if (tmp->vm_start <= addr) 2322 break; 2323 rb_node = rb_node->rb_left; 2324 } else 2325 rb_node = rb_node->rb_right; 2326 } 2327 2328 if (vma) 2329 vmacache_update(addr, vma); 2330 return vma; 2331 } 2332 2333 EXPORT_SYMBOL(find_vma); 2334 2335 /* 2336 * Same as find_vma, but also return a pointer to the previous VMA in *pprev. 2337 */ 2338 struct vm_area_struct * 2339 find_vma_prev(struct mm_struct *mm, unsigned long addr, 2340 struct vm_area_struct **pprev) 2341 { 2342 struct vm_area_struct *vma; 2343 2344 vma = find_vma(mm, addr); 2345 if (vma) { 2346 *pprev = vma->vm_prev; 2347 } else { 2348 struct rb_node *rb_node = rb_last(&mm->mm_rb); 2349 2350 *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL; 2351 } 2352 return vma; 2353 } 2354 2355 /* 2356 * Verify that the stack growth is acceptable and 2357 * update accounting. This is shared with both the 2358 * grow-up and grow-down cases. 2359 */ 2360 static int acct_stack_growth(struct vm_area_struct *vma, 2361 unsigned long size, unsigned long grow) 2362 { 2363 struct mm_struct *mm = vma->vm_mm; 2364 unsigned long new_start; 2365 2366 /* address space limit tests */ 2367 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2368 return -ENOMEM; 2369 2370 /* Stack limit test */ 2371 if (size > rlimit(RLIMIT_STACK)) 2372 return -ENOMEM; 2373 2374 /* mlock limit tests */ 2375 if (vma->vm_flags & VM_LOCKED) { 2376 unsigned long locked; 2377 unsigned long limit; 2378 locked = mm->locked_vm + grow; 2379 limit = rlimit(RLIMIT_MEMLOCK); 2380 limit >>= PAGE_SHIFT; 2381 if (locked > limit && !capable(CAP_IPC_LOCK)) 2382 return -ENOMEM; 2383 } 2384 2385 /* Check to ensure the stack will not grow into a hugetlb-only region */ 2386 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 2387 vma->vm_end - size; 2388 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 2389 return -EFAULT; 2390 2391 /* 2392 * Overcommit.. This must be the final test, as it will 2393 * update security statistics. 2394 */ 2395 if (security_vm_enough_memory_mm(mm, grow)) 2396 return -ENOMEM; 2397 2398 return 0; 2399 } 2400 2401 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) 2402 /* 2403 * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 2404 * vma is the last one with address > vma->vm_end. Have to extend vma. 2405 */ 2406 int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2407 { 2408 struct mm_struct *mm = vma->vm_mm; 2409 struct vm_area_struct *next; 2410 unsigned long gap_addr; 2411 int error = 0; 2412 2413 if (!(vma->vm_flags & VM_GROWSUP)) 2414 return -EFAULT; 2415 2416 /* Guard against exceeding limits of the address space. */ 2417 address &= PAGE_MASK; 2418 if (address >= (TASK_SIZE & PAGE_MASK)) 2419 return -ENOMEM; 2420 address += PAGE_SIZE; 2421 2422 /* Enforce stack_guard_gap */ 2423 gap_addr = address + stack_guard_gap; 2424 2425 /* Guard against overflow */ 2426 if (gap_addr < address || gap_addr > TASK_SIZE) 2427 gap_addr = TASK_SIZE; 2428 2429 next = vma->vm_next; 2430 if (next && next->vm_start < gap_addr && vma_is_accessible(next)) { 2431 if (!(next->vm_flags & VM_GROWSUP)) 2432 return -ENOMEM; 2433 /* Check that both stack segments have the same anon_vma? */ 2434 } 2435 2436 /* We must make sure the anon_vma is allocated. */ 2437 if (unlikely(anon_vma_prepare(vma))) 2438 return -ENOMEM; 2439 2440 /* 2441 * vma->vm_start/vm_end cannot change under us because the caller 2442 * is required to hold the mmap_lock in read mode. We need the 2443 * anon_vma lock to serialize against concurrent expand_stacks. 2444 */ 2445 anon_vma_lock_write(vma->anon_vma); 2446 2447 /* Somebody else might have raced and expanded it already */ 2448 if (address > vma->vm_end) { 2449 unsigned long size, grow; 2450 2451 size = address - vma->vm_start; 2452 grow = (address - vma->vm_end) >> PAGE_SHIFT; 2453 2454 error = -ENOMEM; 2455 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 2456 error = acct_stack_growth(vma, size, grow); 2457 if (!error) { 2458 /* 2459 * vma_gap_update() doesn't support concurrent 2460 * updates, but we only hold a shared mmap_lock 2461 * lock here, so we need to protect against 2462 * concurrent vma expansions. 2463 * anon_vma_lock_write() doesn't help here, as 2464 * we don't guarantee that all growable vmas 2465 * in a mm share the same root anon vma. 2466 * So, we reuse mm->page_table_lock to guard 2467 * against concurrent vma expansions. 2468 */ 2469 spin_lock(&mm->page_table_lock); 2470 if (vma->vm_flags & VM_LOCKED) 2471 mm->locked_vm += grow; 2472 vm_stat_account(mm, vma->vm_flags, grow); 2473 anon_vma_interval_tree_pre_update_vma(vma); 2474 vma->vm_end = address; 2475 anon_vma_interval_tree_post_update_vma(vma); 2476 if (vma->vm_next) 2477 vma_gap_update(vma->vm_next); 2478 else 2479 mm->highest_vm_end = vm_end_gap(vma); 2480 spin_unlock(&mm->page_table_lock); 2481 2482 perf_event_mmap(vma); 2483 } 2484 } 2485 } 2486 anon_vma_unlock_write(vma->anon_vma); 2487 khugepaged_enter_vma_merge(vma, vma->vm_flags); 2488 validate_mm(mm); 2489 return error; 2490 } 2491 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 2492 2493 /* 2494 * vma is the first one with address < vma->vm_start. Have to extend vma. 2495 */ 2496 int expand_downwards(struct vm_area_struct *vma, 2497 unsigned long address) 2498 { 2499 struct mm_struct *mm = vma->vm_mm; 2500 struct vm_area_struct *prev; 2501 int error = 0; 2502 2503 address &= PAGE_MASK; 2504 if (address < mmap_min_addr) 2505 return -EPERM; 2506 2507 /* Enforce stack_guard_gap */ 2508 prev = vma->vm_prev; 2509 /* Check that both stack segments have the same anon_vma? */ 2510 if (prev && !(prev->vm_flags & VM_GROWSDOWN) && 2511 vma_is_accessible(prev)) { 2512 if (address - prev->vm_end < stack_guard_gap) 2513 return -ENOMEM; 2514 } 2515 2516 /* We must make sure the anon_vma is allocated. */ 2517 if (unlikely(anon_vma_prepare(vma))) 2518 return -ENOMEM; 2519 2520 /* 2521 * vma->vm_start/vm_end cannot change under us because the caller 2522 * is required to hold the mmap_lock in read mode. We need the 2523 * anon_vma lock to serialize against concurrent expand_stacks. 2524 */ 2525 anon_vma_lock_write(vma->anon_vma); 2526 2527 /* Somebody else might have raced and expanded it already */ 2528 if (address < vma->vm_start) { 2529 unsigned long size, grow; 2530 2531 size = vma->vm_end - address; 2532 grow = (vma->vm_start - address) >> PAGE_SHIFT; 2533 2534 error = -ENOMEM; 2535 if (grow <= vma->vm_pgoff) { 2536 error = acct_stack_growth(vma, size, grow); 2537 if (!error) { 2538 /* 2539 * vma_gap_update() doesn't support concurrent 2540 * updates, but we only hold a shared mmap_lock 2541 * lock here, so we need to protect against 2542 * concurrent vma expansions. 2543 * anon_vma_lock_write() doesn't help here, as 2544 * we don't guarantee that all growable vmas 2545 * in a mm share the same root anon vma. 2546 * So, we reuse mm->page_table_lock to guard 2547 * against concurrent vma expansions. 2548 */ 2549 spin_lock(&mm->page_table_lock); 2550 if (vma->vm_flags & VM_LOCKED) 2551 mm->locked_vm += grow; 2552 vm_stat_account(mm, vma->vm_flags, grow); 2553 anon_vma_interval_tree_pre_update_vma(vma); 2554 vma->vm_start = address; 2555 vma->vm_pgoff -= grow; 2556 anon_vma_interval_tree_post_update_vma(vma); 2557 vma_gap_update(vma); 2558 spin_unlock(&mm->page_table_lock); 2559 2560 perf_event_mmap(vma); 2561 } 2562 } 2563 } 2564 anon_vma_unlock_write(vma->anon_vma); 2565 khugepaged_enter_vma_merge(vma, vma->vm_flags); 2566 validate_mm(mm); 2567 return error; 2568 } 2569 2570 /* enforced gap between the expanding stack and other mappings. */ 2571 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 2572 2573 static int __init cmdline_parse_stack_guard_gap(char *p) 2574 { 2575 unsigned long val; 2576 char *endptr; 2577 2578 val = simple_strtoul(p, &endptr, 10); 2579 if (!*endptr) 2580 stack_guard_gap = val << PAGE_SHIFT; 2581 2582 return 0; 2583 } 2584 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 2585 2586 #ifdef CONFIG_STACK_GROWSUP 2587 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2588 { 2589 return expand_upwards(vma, address); 2590 } 2591 2592 struct vm_area_struct * 2593 find_extend_vma(struct mm_struct *mm, unsigned long addr) 2594 { 2595 struct vm_area_struct *vma, *prev; 2596 2597 addr &= PAGE_MASK; 2598 vma = find_vma_prev(mm, addr, &prev); 2599 if (vma && (vma->vm_start <= addr)) 2600 return vma; 2601 /* don't alter vm_end if the coredump is running */ 2602 if (!prev || expand_stack(prev, addr)) 2603 return NULL; 2604 if (prev->vm_flags & VM_LOCKED) 2605 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 2606 return prev; 2607 } 2608 #else 2609 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2610 { 2611 return expand_downwards(vma, address); 2612 } 2613 2614 struct vm_area_struct * 2615 find_extend_vma(struct mm_struct *mm, unsigned long addr) 2616 { 2617 struct vm_area_struct *vma; 2618 unsigned long start; 2619 2620 addr &= PAGE_MASK; 2621 vma = find_vma(mm, addr); 2622 if (!vma) 2623 return NULL; 2624 if (vma->vm_start <= addr) 2625 return vma; 2626 if (!(vma->vm_flags & VM_GROWSDOWN)) 2627 return NULL; 2628 start = vma->vm_start; 2629 if (expand_stack(vma, addr)) 2630 return NULL; 2631 if (vma->vm_flags & VM_LOCKED) 2632 populate_vma_page_range(vma, addr, start, NULL); 2633 return vma; 2634 } 2635 #endif 2636 2637 EXPORT_SYMBOL_GPL(find_extend_vma); 2638 2639 /* 2640 * Ok - we have the memory areas we should free on the vma list, 2641 * so release them, and do the vma updates. 2642 * 2643 * Called with the mm semaphore held. 2644 */ 2645 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 2646 { 2647 unsigned long nr_accounted = 0; 2648 2649 /* Update high watermark before we lower total_vm */ 2650 update_hiwater_vm(mm); 2651 do { 2652 long nrpages = vma_pages(vma); 2653 2654 if (vma->vm_flags & VM_ACCOUNT) 2655 nr_accounted += nrpages; 2656 vm_stat_account(mm, vma->vm_flags, -nrpages); 2657 vma = remove_vma(vma); 2658 } while (vma); 2659 vm_unacct_memory(nr_accounted); 2660 validate_mm(mm); 2661 } 2662 2663 /* 2664 * Get rid of page table information in the indicated region. 2665 * 2666 * Called with the mm semaphore held. 2667 */ 2668 static void unmap_region(struct mm_struct *mm, 2669 struct vm_area_struct *vma, struct vm_area_struct *prev, 2670 unsigned long start, unsigned long end) 2671 { 2672 struct vm_area_struct *next = vma_next(mm, prev); 2673 struct mmu_gather tlb; 2674 2675 lru_add_drain(); 2676 tlb_gather_mmu(&tlb, mm); 2677 update_hiwater_rss(mm); 2678 unmap_vmas(&tlb, vma, start, end); 2679 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2680 next ? next->vm_start : USER_PGTABLES_CEILING); 2681 tlb_finish_mmu(&tlb); 2682 } 2683 2684 /* 2685 * Create a list of vma's touched by the unmap, removing them from the mm's 2686 * vma list as we go.. 2687 */ 2688 static bool 2689 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, 2690 struct vm_area_struct *prev, unsigned long end) 2691 { 2692 struct vm_area_struct **insertion_point; 2693 struct vm_area_struct *tail_vma = NULL; 2694 2695 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 2696 vma->vm_prev = NULL; 2697 do { 2698 vma_rb_erase(vma, &mm->mm_rb); 2699 mm->map_count--; 2700 tail_vma = vma; 2701 vma = vma->vm_next; 2702 } while (vma && vma->vm_start < end); 2703 *insertion_point = vma; 2704 if (vma) { 2705 vma->vm_prev = prev; 2706 vma_gap_update(vma); 2707 } else 2708 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; 2709 tail_vma->vm_next = NULL; 2710 2711 /* Kill the cache */ 2712 vmacache_invalidate(mm); 2713 2714 /* 2715 * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or 2716 * VM_GROWSUP VMA. Such VMAs can change their size under 2717 * down_read(mmap_lock) and collide with the VMA we are about to unmap. 2718 */ 2719 if (vma && (vma->vm_flags & VM_GROWSDOWN)) 2720 return false; 2721 if (prev && (prev->vm_flags & VM_GROWSUP)) 2722 return false; 2723 return true; 2724 } 2725 2726 /* 2727 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 2728 * has already been checked or doesn't make sense to fail. 2729 */ 2730 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 2731 unsigned long addr, int new_below) 2732 { 2733 struct vm_area_struct *new; 2734 int err; 2735 2736 if (vma->vm_ops && vma->vm_ops->may_split) { 2737 err = vma->vm_ops->may_split(vma, addr); 2738 if (err) 2739 return err; 2740 } 2741 2742 new = vm_area_dup(vma); 2743 if (!new) 2744 return -ENOMEM; 2745 2746 if (new_below) 2747 new->vm_end = addr; 2748 else { 2749 new->vm_start = addr; 2750 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 2751 } 2752 2753 err = vma_dup_policy(vma, new); 2754 if (err) 2755 goto out_free_vma; 2756 2757 err = anon_vma_clone(new, vma); 2758 if (err) 2759 goto out_free_mpol; 2760 2761 if (new->vm_file) 2762 get_file(new->vm_file); 2763 2764 if (new->vm_ops && new->vm_ops->open) 2765 new->vm_ops->open(new); 2766 2767 if (new_below) 2768 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + 2769 ((addr - new->vm_start) >> PAGE_SHIFT), new); 2770 else 2771 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); 2772 2773 /* Success. */ 2774 if (!err) 2775 return 0; 2776 2777 /* Clean everything up if vma_adjust failed. */ 2778 if (new->vm_ops && new->vm_ops->close) 2779 new->vm_ops->close(new); 2780 if (new->vm_file) 2781 fput(new->vm_file); 2782 unlink_anon_vmas(new); 2783 out_free_mpol: 2784 mpol_put(vma_policy(new)); 2785 out_free_vma: 2786 vm_area_free(new); 2787 return err; 2788 } 2789 2790 /* 2791 * Split a vma into two pieces at address 'addr', a new vma is allocated 2792 * either for the first part or the tail. 2793 */ 2794 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 2795 unsigned long addr, int new_below) 2796 { 2797 if (mm->map_count >= sysctl_max_map_count) 2798 return -ENOMEM; 2799 2800 return __split_vma(mm, vma, addr, new_below); 2801 } 2802 2803 static inline void 2804 unlock_range(struct vm_area_struct *start, unsigned long limit) 2805 { 2806 struct mm_struct *mm = start->vm_mm; 2807 struct vm_area_struct *tmp = start; 2808 2809 while (tmp && tmp->vm_start < limit) { 2810 if (tmp->vm_flags & VM_LOCKED) { 2811 mm->locked_vm -= vma_pages(tmp); 2812 munlock_vma_pages_all(tmp); 2813 } 2814 2815 tmp = tmp->vm_next; 2816 } 2817 } 2818 2819 /* Munmap is split into 2 main parts -- this part which finds 2820 * what needs doing, and the areas themselves, which do the 2821 * work. This now handles partial unmappings. 2822 * Jeremy Fitzhardinge <jeremy@goop.org> 2823 */ 2824 int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 2825 struct list_head *uf, bool downgrade) 2826 { 2827 unsigned long end; 2828 struct vm_area_struct *vma, *prev, *last; 2829 2830 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 2831 return -EINVAL; 2832 2833 len = PAGE_ALIGN(len); 2834 end = start + len; 2835 if (len == 0) 2836 return -EINVAL; 2837 2838 /* 2839 * arch_unmap() might do unmaps itself. It must be called 2840 * and finish any rbtree manipulation before this code 2841 * runs and also starts to manipulate the rbtree. 2842 */ 2843 arch_unmap(mm, start, end); 2844 2845 /* Find the first overlapping VMA where start < vma->vm_end */ 2846 vma = find_vma_intersection(mm, start, end); 2847 if (!vma) 2848 return 0; 2849 prev = vma->vm_prev; 2850 2851 /* 2852 * If we need to split any vma, do it now to save pain later. 2853 * 2854 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 2855 * unmapped vm_area_struct will remain in use: so lower split_vma 2856 * places tmp vma above, and higher split_vma places tmp vma below. 2857 */ 2858 if (start > vma->vm_start) { 2859 int error; 2860 2861 /* 2862 * Make sure that map_count on return from munmap() will 2863 * not exceed its limit; but let map_count go just above 2864 * its limit temporarily, to help free resources as expected. 2865 */ 2866 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) 2867 return -ENOMEM; 2868 2869 error = __split_vma(mm, vma, start, 0); 2870 if (error) 2871 return error; 2872 prev = vma; 2873 } 2874 2875 /* Does it split the last one? */ 2876 last = find_vma(mm, end); 2877 if (last && end > last->vm_start) { 2878 int error = __split_vma(mm, last, end, 1); 2879 if (error) 2880 return error; 2881 } 2882 vma = vma_next(mm, prev); 2883 2884 if (unlikely(uf)) { 2885 /* 2886 * If userfaultfd_unmap_prep returns an error the vmas 2887 * will remain split, but userland will get a 2888 * highly unexpected error anyway. This is no 2889 * different than the case where the first of the two 2890 * __split_vma fails, but we don't undo the first 2891 * split, despite we could. This is unlikely enough 2892 * failure that it's not worth optimizing it for. 2893 */ 2894 int error = userfaultfd_unmap_prep(vma, start, end, uf); 2895 if (error) 2896 return error; 2897 } 2898 2899 /* 2900 * unlock any mlock()ed ranges before detaching vmas 2901 */ 2902 if (mm->locked_vm) 2903 unlock_range(vma, end); 2904 2905 /* Detach vmas from rbtree */ 2906 if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) 2907 downgrade = false; 2908 2909 if (downgrade) 2910 mmap_write_downgrade(mm); 2911 2912 unmap_region(mm, vma, prev, start, end); 2913 2914 /* Fix up all other VM information */ 2915 remove_vma_list(mm, vma); 2916 2917 return downgrade ? 1 : 0; 2918 } 2919 2920 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 2921 struct list_head *uf) 2922 { 2923 return __do_munmap(mm, start, len, uf, false); 2924 } 2925 2926 static int __vm_munmap(unsigned long start, size_t len, bool downgrade) 2927 { 2928 int ret; 2929 struct mm_struct *mm = current->mm; 2930 LIST_HEAD(uf); 2931 2932 if (mmap_write_lock_killable(mm)) 2933 return -EINTR; 2934 2935 ret = __do_munmap(mm, start, len, &uf, downgrade); 2936 /* 2937 * Returning 1 indicates mmap_lock is downgraded. 2938 * But 1 is not legal return value of vm_munmap() and munmap(), reset 2939 * it to 0 before return. 2940 */ 2941 if (ret == 1) { 2942 mmap_read_unlock(mm); 2943 ret = 0; 2944 } else 2945 mmap_write_unlock(mm); 2946 2947 userfaultfd_unmap_complete(mm, &uf); 2948 return ret; 2949 } 2950 2951 int vm_munmap(unsigned long start, size_t len) 2952 { 2953 return __vm_munmap(start, len, false); 2954 } 2955 EXPORT_SYMBOL(vm_munmap); 2956 2957 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 2958 { 2959 addr = untagged_addr(addr); 2960 profile_munmap(addr); 2961 return __vm_munmap(addr, len, true); 2962 } 2963 2964 2965 /* 2966 * Emulation of deprecated remap_file_pages() syscall. 2967 */ 2968 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 2969 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 2970 { 2971 2972 struct mm_struct *mm = current->mm; 2973 struct vm_area_struct *vma; 2974 unsigned long populate = 0; 2975 unsigned long ret = -EINVAL; 2976 struct file *file; 2977 2978 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", 2979 current->comm, current->pid); 2980 2981 if (prot) 2982 return ret; 2983 start = start & PAGE_MASK; 2984 size = size & PAGE_MASK; 2985 2986 if (start + size <= start) 2987 return ret; 2988 2989 /* Does pgoff wrap? */ 2990 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 2991 return ret; 2992 2993 if (mmap_write_lock_killable(mm)) 2994 return -EINTR; 2995 2996 vma = find_vma(mm, start); 2997 2998 if (!vma || !(vma->vm_flags & VM_SHARED)) 2999 goto out; 3000 3001 if (start < vma->vm_start) 3002 goto out; 3003 3004 if (start + size > vma->vm_end) { 3005 struct vm_area_struct *next; 3006 3007 for (next = vma->vm_next; next; next = next->vm_next) { 3008 /* hole between vmas ? */ 3009 if (next->vm_start != next->vm_prev->vm_end) 3010 goto out; 3011 3012 if (next->vm_file != vma->vm_file) 3013 goto out; 3014 3015 if (next->vm_flags != vma->vm_flags) 3016 goto out; 3017 3018 if (start + size <= next->vm_end) 3019 break; 3020 } 3021 3022 if (!next) 3023 goto out; 3024 } 3025 3026 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 3027 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 3028 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 3029 3030 flags &= MAP_NONBLOCK; 3031 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 3032 if (vma->vm_flags & VM_LOCKED) 3033 flags |= MAP_LOCKED; 3034 3035 file = get_file(vma->vm_file); 3036 ret = do_mmap(vma->vm_file, start, size, 3037 prot, flags, pgoff, &populate, NULL); 3038 fput(file); 3039 out: 3040 mmap_write_unlock(mm); 3041 if (populate) 3042 mm_populate(ret, populate); 3043 if (!IS_ERR_VALUE(ret)) 3044 ret = 0; 3045 return ret; 3046 } 3047 3048 /* 3049 * this is really a simplified "do_mmap". it only handles 3050 * anonymous maps. eventually we may be able to do some 3051 * brk-specific accounting here. 3052 */ 3053 static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) 3054 { 3055 struct mm_struct *mm = current->mm; 3056 struct vm_area_struct *vma, *prev; 3057 struct rb_node **rb_link, *rb_parent; 3058 pgoff_t pgoff = addr >> PAGE_SHIFT; 3059 int error; 3060 unsigned long mapped_addr; 3061 3062 /* Until we need other flags, refuse anything except VM_EXEC. */ 3063 if ((flags & (~VM_EXEC)) != 0) 3064 return -EINVAL; 3065 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 3066 3067 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 3068 if (IS_ERR_VALUE(mapped_addr)) 3069 return mapped_addr; 3070 3071 error = mlock_future_check(mm, mm->def_flags, len); 3072 if (error) 3073 return error; 3074 3075 /* Clear old maps, set up prev, rb_link, rb_parent, and uf */ 3076 if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) 3077 return -ENOMEM; 3078 3079 /* Check against address space limits *after* clearing old maps... */ 3080 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 3081 return -ENOMEM; 3082 3083 if (mm->map_count > sysctl_max_map_count) 3084 return -ENOMEM; 3085 3086 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 3087 return -ENOMEM; 3088 3089 /* Can we just expand an old private anonymous mapping? */ 3090 vma = vma_merge(mm, prev, addr, addr + len, flags, 3091 NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX); 3092 if (vma) 3093 goto out; 3094 3095 /* 3096 * create a vma struct for an anonymous mapping 3097 */ 3098 vma = vm_area_alloc(mm); 3099 if (!vma) { 3100 vm_unacct_memory(len >> PAGE_SHIFT); 3101 return -ENOMEM; 3102 } 3103 3104 vma_set_anonymous(vma); 3105 vma->vm_start = addr; 3106 vma->vm_end = addr + len; 3107 vma->vm_pgoff = pgoff; 3108 vma->vm_flags = flags; 3109 vma->vm_page_prot = vm_get_page_prot(flags); 3110 vma_link(mm, vma, prev, rb_link, rb_parent); 3111 out: 3112 perf_event_mmap(vma); 3113 mm->total_vm += len >> PAGE_SHIFT; 3114 mm->data_vm += len >> PAGE_SHIFT; 3115 if (flags & VM_LOCKED) 3116 mm->locked_vm += (len >> PAGE_SHIFT); 3117 vma->vm_flags |= VM_SOFTDIRTY; 3118 return 0; 3119 } 3120 3121 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 3122 { 3123 struct mm_struct *mm = current->mm; 3124 unsigned long len; 3125 int ret; 3126 bool populate; 3127 LIST_HEAD(uf); 3128 3129 len = PAGE_ALIGN(request); 3130 if (len < request) 3131 return -ENOMEM; 3132 if (!len) 3133 return 0; 3134 3135 if (mmap_write_lock_killable(mm)) 3136 return -EINTR; 3137 3138 ret = do_brk_flags(addr, len, flags, &uf); 3139 populate = ((mm->def_flags & VM_LOCKED) != 0); 3140 mmap_write_unlock(mm); 3141 userfaultfd_unmap_complete(mm, &uf); 3142 if (populate && !ret) 3143 mm_populate(addr, len); 3144 return ret; 3145 } 3146 EXPORT_SYMBOL(vm_brk_flags); 3147 3148 int vm_brk(unsigned long addr, unsigned long len) 3149 { 3150 return vm_brk_flags(addr, len, 0); 3151 } 3152 EXPORT_SYMBOL(vm_brk); 3153 3154 /* Release all mmaps. */ 3155 void exit_mmap(struct mm_struct *mm) 3156 { 3157 struct mmu_gather tlb; 3158 struct vm_area_struct *vma; 3159 unsigned long nr_accounted = 0; 3160 3161 /* mm's last user has gone, and its about to be pulled down */ 3162 mmu_notifier_release(mm); 3163 3164 if (unlikely(mm_is_oom_victim(mm))) { 3165 /* 3166 * Manually reap the mm to free as much memory as possible. 3167 * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard 3168 * this mm from further consideration. Taking mm->mmap_lock for 3169 * write after setting MMF_OOM_SKIP will guarantee that the oom 3170 * reaper will not run on this mm again after mmap_lock is 3171 * dropped. 3172 * 3173 * Nothing can be holding mm->mmap_lock here and the above call 3174 * to mmu_notifier_release(mm) ensures mmu notifier callbacks in 3175 * __oom_reap_task_mm() will not block. 3176 * 3177 * This needs to be done before calling munlock_vma_pages_all(), 3178 * which clears VM_LOCKED, otherwise the oom reaper cannot 3179 * reliably test it. 3180 */ 3181 (void)__oom_reap_task_mm(mm); 3182 3183 set_bit(MMF_OOM_SKIP, &mm->flags); 3184 mmap_write_lock(mm); 3185 mmap_write_unlock(mm); 3186 } 3187 3188 if (mm->locked_vm) 3189 unlock_range(mm->mmap, ULONG_MAX); 3190 3191 arch_exit_mmap(mm); 3192 3193 vma = mm->mmap; 3194 if (!vma) /* Can happen if dup_mmap() received an OOM */ 3195 return; 3196 3197 lru_add_drain(); 3198 flush_cache_mm(mm); 3199 tlb_gather_mmu_fullmm(&tlb, mm); 3200 /* update_hiwater_rss(mm) here? but nobody should be looking */ 3201 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 3202 unmap_vmas(&tlb, vma, 0, -1); 3203 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); 3204 tlb_finish_mmu(&tlb); 3205 3206 /* 3207 * Walk the list again, actually closing and freeing it, 3208 * with preemption enabled, without holding any MM locks. 3209 */ 3210 while (vma) { 3211 if (vma->vm_flags & VM_ACCOUNT) 3212 nr_accounted += vma_pages(vma); 3213 vma = remove_vma(vma); 3214 cond_resched(); 3215 } 3216 vm_unacct_memory(nr_accounted); 3217 } 3218 3219 /* Insert vm structure into process list sorted by address 3220 * and into the inode's i_mmap tree. If vm_file is non-NULL 3221 * then i_mmap_rwsem is taken here. 3222 */ 3223 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 3224 { 3225 struct vm_area_struct *prev; 3226 struct rb_node **rb_link, *rb_parent; 3227 3228 if (find_vma_links(mm, vma->vm_start, vma->vm_end, 3229 &prev, &rb_link, &rb_parent)) 3230 return -ENOMEM; 3231 if ((vma->vm_flags & VM_ACCOUNT) && 3232 security_vm_enough_memory_mm(mm, vma_pages(vma))) 3233 return -ENOMEM; 3234 3235 /* 3236 * The vm_pgoff of a purely anonymous vma should be irrelevant 3237 * until its first write fault, when page's anon_vma and index 3238 * are set. But now set the vm_pgoff it will almost certainly 3239 * end up with (unless mremap moves it elsewhere before that 3240 * first wfault), so /proc/pid/maps tells a consistent story. 3241 * 3242 * By setting it to reflect the virtual start address of the 3243 * vma, merges and splits can happen in a seamless way, just 3244 * using the existing file pgoff checks and manipulations. 3245 * Similarly in do_mmap and in do_brk_flags. 3246 */ 3247 if (vma_is_anonymous(vma)) { 3248 BUG_ON(vma->anon_vma); 3249 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 3250 } 3251 3252 vma_link(mm, vma, prev, rb_link, rb_parent); 3253 return 0; 3254 } 3255 3256 /* 3257 * Copy the vma structure to a new location in the same mm, 3258 * prior to moving page table entries, to effect an mremap move. 3259 */ 3260 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 3261 unsigned long addr, unsigned long len, pgoff_t pgoff, 3262 bool *need_rmap_locks) 3263 { 3264 struct vm_area_struct *vma = *vmap; 3265 unsigned long vma_start = vma->vm_start; 3266 struct mm_struct *mm = vma->vm_mm; 3267 struct vm_area_struct *new_vma, *prev; 3268 struct rb_node **rb_link, *rb_parent; 3269 bool faulted_in_anon_vma = true; 3270 3271 /* 3272 * If anonymous vma has not yet been faulted, update new pgoff 3273 * to match new location, to increase its chance of merging. 3274 */ 3275 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 3276 pgoff = addr >> PAGE_SHIFT; 3277 faulted_in_anon_vma = false; 3278 } 3279 3280 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) 3281 return NULL; /* should never get here */ 3282 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, 3283 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 3284 vma->vm_userfaultfd_ctx); 3285 if (new_vma) { 3286 /* 3287 * Source vma may have been merged into new_vma 3288 */ 3289 if (unlikely(vma_start >= new_vma->vm_start && 3290 vma_start < new_vma->vm_end)) { 3291 /* 3292 * The only way we can get a vma_merge with 3293 * self during an mremap is if the vma hasn't 3294 * been faulted in yet and we were allowed to 3295 * reset the dst vma->vm_pgoff to the 3296 * destination address of the mremap to allow 3297 * the merge to happen. mremap must change the 3298 * vm_pgoff linearity between src and dst vmas 3299 * (in turn preventing a vma_merge) to be 3300 * safe. It is only safe to keep the vm_pgoff 3301 * linear if there are no pages mapped yet. 3302 */ 3303 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 3304 *vmap = vma = new_vma; 3305 } 3306 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 3307 } else { 3308 new_vma = vm_area_dup(vma); 3309 if (!new_vma) 3310 goto out; 3311 new_vma->vm_start = addr; 3312 new_vma->vm_end = addr + len; 3313 new_vma->vm_pgoff = pgoff; 3314 if (vma_dup_policy(vma, new_vma)) 3315 goto out_free_vma; 3316 if (anon_vma_clone(new_vma, vma)) 3317 goto out_free_mempol; 3318 if (new_vma->vm_file) 3319 get_file(new_vma->vm_file); 3320 if (new_vma->vm_ops && new_vma->vm_ops->open) 3321 new_vma->vm_ops->open(new_vma); 3322 vma_link(mm, new_vma, prev, rb_link, rb_parent); 3323 *need_rmap_locks = false; 3324 } 3325 return new_vma; 3326 3327 out_free_mempol: 3328 mpol_put(vma_policy(new_vma)); 3329 out_free_vma: 3330 vm_area_free(new_vma); 3331 out: 3332 return NULL; 3333 } 3334 3335 /* 3336 * Return true if the calling process may expand its vm space by the passed 3337 * number of pages 3338 */ 3339 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 3340 { 3341 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 3342 return false; 3343 3344 if (is_data_mapping(flags) && 3345 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 3346 /* Workaround for Valgrind */ 3347 if (rlimit(RLIMIT_DATA) == 0 && 3348 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 3349 return true; 3350 3351 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 3352 current->comm, current->pid, 3353 (mm->data_vm + npages) << PAGE_SHIFT, 3354 rlimit(RLIMIT_DATA), 3355 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 3356 3357 if (!ignore_rlimit_data) 3358 return false; 3359 } 3360 3361 return true; 3362 } 3363 3364 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 3365 { 3366 mm->total_vm += npages; 3367 3368 if (is_exec_mapping(flags)) 3369 mm->exec_vm += npages; 3370 else if (is_stack_mapping(flags)) 3371 mm->stack_vm += npages; 3372 else if (is_data_mapping(flags)) 3373 mm->data_vm += npages; 3374 } 3375 3376 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 3377 3378 /* 3379 * Having a close hook prevents vma merging regardless of flags. 3380 */ 3381 static void special_mapping_close(struct vm_area_struct *vma) 3382 { 3383 } 3384 3385 static const char *special_mapping_name(struct vm_area_struct *vma) 3386 { 3387 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 3388 } 3389 3390 static int special_mapping_mremap(struct vm_area_struct *new_vma) 3391 { 3392 struct vm_special_mapping *sm = new_vma->vm_private_data; 3393 3394 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 3395 return -EFAULT; 3396 3397 if (sm->mremap) 3398 return sm->mremap(sm, new_vma); 3399 3400 return 0; 3401 } 3402 3403 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 3404 { 3405 /* 3406 * Forbid splitting special mappings - kernel has expectations over 3407 * the number of pages in mapping. Together with VM_DONTEXPAND 3408 * the size of vma should stay the same over the special mapping's 3409 * lifetime. 3410 */ 3411 return -EINVAL; 3412 } 3413 3414 static const struct vm_operations_struct special_mapping_vmops = { 3415 .close = special_mapping_close, 3416 .fault = special_mapping_fault, 3417 .mremap = special_mapping_mremap, 3418 .name = special_mapping_name, 3419 /* vDSO code relies that VVAR can't be accessed remotely */ 3420 .access = NULL, 3421 .may_split = special_mapping_split, 3422 }; 3423 3424 static const struct vm_operations_struct legacy_special_mapping_vmops = { 3425 .close = special_mapping_close, 3426 .fault = special_mapping_fault, 3427 }; 3428 3429 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 3430 { 3431 struct vm_area_struct *vma = vmf->vma; 3432 pgoff_t pgoff; 3433 struct page **pages; 3434 3435 if (vma->vm_ops == &legacy_special_mapping_vmops) { 3436 pages = vma->vm_private_data; 3437 } else { 3438 struct vm_special_mapping *sm = vma->vm_private_data; 3439 3440 if (sm->fault) 3441 return sm->fault(sm, vmf->vma, vmf); 3442 3443 pages = sm->pages; 3444 } 3445 3446 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 3447 pgoff--; 3448 3449 if (*pages) { 3450 struct page *page = *pages; 3451 get_page(page); 3452 vmf->page = page; 3453 return 0; 3454 } 3455 3456 return VM_FAULT_SIGBUS; 3457 } 3458 3459 static struct vm_area_struct *__install_special_mapping( 3460 struct mm_struct *mm, 3461 unsigned long addr, unsigned long len, 3462 unsigned long vm_flags, void *priv, 3463 const struct vm_operations_struct *ops) 3464 { 3465 int ret; 3466 struct vm_area_struct *vma; 3467 3468 vma = vm_area_alloc(mm); 3469 if (unlikely(vma == NULL)) 3470 return ERR_PTR(-ENOMEM); 3471 3472 vma->vm_start = addr; 3473 vma->vm_end = addr + len; 3474 3475 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; 3476 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 3477 3478 vma->vm_ops = ops; 3479 vma->vm_private_data = priv; 3480 3481 ret = insert_vm_struct(mm, vma); 3482 if (ret) 3483 goto out; 3484 3485 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 3486 3487 perf_event_mmap(vma); 3488 3489 return vma; 3490 3491 out: 3492 vm_area_free(vma); 3493 return ERR_PTR(ret); 3494 } 3495 3496 bool vma_is_special_mapping(const struct vm_area_struct *vma, 3497 const struct vm_special_mapping *sm) 3498 { 3499 return vma->vm_private_data == sm && 3500 (vma->vm_ops == &special_mapping_vmops || 3501 vma->vm_ops == &legacy_special_mapping_vmops); 3502 } 3503 3504 /* 3505 * Called with mm->mmap_lock held for writing. 3506 * Insert a new vma covering the given region, with the given flags. 3507 * Its pages are supplied by the given array of struct page *. 3508 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 3509 * The region past the last page supplied will always produce SIGBUS. 3510 * The array pointer and the pages it points to are assumed to stay alive 3511 * for as long as this mapping might exist. 3512 */ 3513 struct vm_area_struct *_install_special_mapping( 3514 struct mm_struct *mm, 3515 unsigned long addr, unsigned long len, 3516 unsigned long vm_flags, const struct vm_special_mapping *spec) 3517 { 3518 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 3519 &special_mapping_vmops); 3520 } 3521 3522 int install_special_mapping(struct mm_struct *mm, 3523 unsigned long addr, unsigned long len, 3524 unsigned long vm_flags, struct page **pages) 3525 { 3526 struct vm_area_struct *vma = __install_special_mapping( 3527 mm, addr, len, vm_flags, (void *)pages, 3528 &legacy_special_mapping_vmops); 3529 3530 return PTR_ERR_OR_ZERO(vma); 3531 } 3532 3533 static DEFINE_MUTEX(mm_all_locks_mutex); 3534 3535 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 3536 { 3537 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3538 /* 3539 * The LSB of head.next can't change from under us 3540 * because we hold the mm_all_locks_mutex. 3541 */ 3542 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 3543 /* 3544 * We can safely modify head.next after taking the 3545 * anon_vma->root->rwsem. If some other vma in this mm shares 3546 * the same anon_vma we won't take it again. 3547 * 3548 * No need of atomic instructions here, head.next 3549 * can't change from under us thanks to the 3550 * anon_vma->root->rwsem. 3551 */ 3552 if (__test_and_set_bit(0, (unsigned long *) 3553 &anon_vma->root->rb_root.rb_root.rb_node)) 3554 BUG(); 3555 } 3556 } 3557 3558 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 3559 { 3560 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3561 /* 3562 * AS_MM_ALL_LOCKS can't change from under us because 3563 * we hold the mm_all_locks_mutex. 3564 * 3565 * Operations on ->flags have to be atomic because 3566 * even if AS_MM_ALL_LOCKS is stable thanks to the 3567 * mm_all_locks_mutex, there may be other cpus 3568 * changing other bitflags in parallel to us. 3569 */ 3570 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 3571 BUG(); 3572 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 3573 } 3574 } 3575 3576 /* 3577 * This operation locks against the VM for all pte/vma/mm related 3578 * operations that could ever happen on a certain mm. This includes 3579 * vmtruncate, try_to_unmap, and all page faults. 3580 * 3581 * The caller must take the mmap_lock in write mode before calling 3582 * mm_take_all_locks(). The caller isn't allowed to release the 3583 * mmap_lock until mm_drop_all_locks() returns. 3584 * 3585 * mmap_lock in write mode is required in order to block all operations 3586 * that could modify pagetables and free pages without need of 3587 * altering the vma layout. It's also needed in write mode to avoid new 3588 * anon_vmas to be associated with existing vmas. 3589 * 3590 * A single task can't take more than one mm_take_all_locks() in a row 3591 * or it would deadlock. 3592 * 3593 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 3594 * mapping->flags avoid to take the same lock twice, if more than one 3595 * vma in this mm is backed by the same anon_vma or address_space. 3596 * 3597 * We take locks in following order, accordingly to comment at beginning 3598 * of mm/rmap.c: 3599 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 3600 * hugetlb mapping); 3601 * - all i_mmap_rwsem locks; 3602 * - all anon_vma->rwseml 3603 * 3604 * We can take all locks within these types randomly because the VM code 3605 * doesn't nest them and we protected from parallel mm_take_all_locks() by 3606 * mm_all_locks_mutex. 3607 * 3608 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 3609 * that may have to take thousand of locks. 3610 * 3611 * mm_take_all_locks() can fail if it's interrupted by signals. 3612 */ 3613 int mm_take_all_locks(struct mm_struct *mm) 3614 { 3615 struct vm_area_struct *vma; 3616 struct anon_vma_chain *avc; 3617 3618 BUG_ON(mmap_read_trylock(mm)); 3619 3620 mutex_lock(&mm_all_locks_mutex); 3621 3622 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3623 if (signal_pending(current)) 3624 goto out_unlock; 3625 if (vma->vm_file && vma->vm_file->f_mapping && 3626 is_vm_hugetlb_page(vma)) 3627 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3628 } 3629 3630 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3631 if (signal_pending(current)) 3632 goto out_unlock; 3633 if (vma->vm_file && vma->vm_file->f_mapping && 3634 !is_vm_hugetlb_page(vma)) 3635 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3636 } 3637 3638 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3639 if (signal_pending(current)) 3640 goto out_unlock; 3641 if (vma->anon_vma) 3642 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3643 vm_lock_anon_vma(mm, avc->anon_vma); 3644 } 3645 3646 return 0; 3647 3648 out_unlock: 3649 mm_drop_all_locks(mm); 3650 return -EINTR; 3651 } 3652 3653 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 3654 { 3655 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3656 /* 3657 * The LSB of head.next can't change to 0 from under 3658 * us because we hold the mm_all_locks_mutex. 3659 * 3660 * We must however clear the bitflag before unlocking 3661 * the vma so the users using the anon_vma->rb_root will 3662 * never see our bitflag. 3663 * 3664 * No need of atomic instructions here, head.next 3665 * can't change from under us until we release the 3666 * anon_vma->root->rwsem. 3667 */ 3668 if (!__test_and_clear_bit(0, (unsigned long *) 3669 &anon_vma->root->rb_root.rb_root.rb_node)) 3670 BUG(); 3671 anon_vma_unlock_write(anon_vma); 3672 } 3673 } 3674 3675 static void vm_unlock_mapping(struct address_space *mapping) 3676 { 3677 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3678 /* 3679 * AS_MM_ALL_LOCKS can't change to 0 from under us 3680 * because we hold the mm_all_locks_mutex. 3681 */ 3682 i_mmap_unlock_write(mapping); 3683 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 3684 &mapping->flags)) 3685 BUG(); 3686 } 3687 } 3688 3689 /* 3690 * The mmap_lock cannot be released by the caller until 3691 * mm_drop_all_locks() returns. 3692 */ 3693 void mm_drop_all_locks(struct mm_struct *mm) 3694 { 3695 struct vm_area_struct *vma; 3696 struct anon_vma_chain *avc; 3697 3698 BUG_ON(mmap_read_trylock(mm)); 3699 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 3700 3701 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3702 if (vma->anon_vma) 3703 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3704 vm_unlock_anon_vma(avc->anon_vma); 3705 if (vma->vm_file && vma->vm_file->f_mapping) 3706 vm_unlock_mapping(vma->vm_file->f_mapping); 3707 } 3708 3709 mutex_unlock(&mm_all_locks_mutex); 3710 } 3711 3712 /* 3713 * initialise the percpu counter for VM 3714 */ 3715 void __init mmap_init(void) 3716 { 3717 int ret; 3718 3719 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 3720 VM_BUG_ON(ret); 3721 } 3722 3723 /* 3724 * Initialise sysctl_user_reserve_kbytes. 3725 * 3726 * This is intended to prevent a user from starting a single memory hogging 3727 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 3728 * mode. 3729 * 3730 * The default value is min(3% of free memory, 128MB) 3731 * 128MB is enough to recover with sshd/login, bash, and top/kill. 3732 */ 3733 static int init_user_reserve(void) 3734 { 3735 unsigned long free_kbytes; 3736 3737 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 3738 3739 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 3740 return 0; 3741 } 3742 subsys_initcall(init_user_reserve); 3743 3744 /* 3745 * Initialise sysctl_admin_reserve_kbytes. 3746 * 3747 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 3748 * to log in and kill a memory hogging process. 3749 * 3750 * Systems with more than 256MB will reserve 8MB, enough to recover 3751 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 3752 * only reserve 3% of free pages by default. 3753 */ 3754 static int init_admin_reserve(void) 3755 { 3756 unsigned long free_kbytes; 3757 3758 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 3759 3760 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 3761 return 0; 3762 } 3763 subsys_initcall(init_admin_reserve); 3764 3765 /* 3766 * Reinititalise user and admin reserves if memory is added or removed. 3767 * 3768 * The default user reserve max is 128MB, and the default max for the 3769 * admin reserve is 8MB. These are usually, but not always, enough to 3770 * enable recovery from a memory hogging process using login/sshd, a shell, 3771 * and tools like top. It may make sense to increase or even disable the 3772 * reserve depending on the existence of swap or variations in the recovery 3773 * tools. So, the admin may have changed them. 3774 * 3775 * If memory is added and the reserves have been eliminated or increased above 3776 * the default max, then we'll trust the admin. 3777 * 3778 * If memory is removed and there isn't enough free memory, then we 3779 * need to reset the reserves. 3780 * 3781 * Otherwise keep the reserve set by the admin. 3782 */ 3783 static int reserve_mem_notifier(struct notifier_block *nb, 3784 unsigned long action, void *data) 3785 { 3786 unsigned long tmp, free_kbytes; 3787 3788 switch (action) { 3789 case MEM_ONLINE: 3790 /* Default max is 128MB. Leave alone if modified by operator. */ 3791 tmp = sysctl_user_reserve_kbytes; 3792 if (0 < tmp && tmp < (1UL << 17)) 3793 init_user_reserve(); 3794 3795 /* Default max is 8MB. Leave alone if modified by operator. */ 3796 tmp = sysctl_admin_reserve_kbytes; 3797 if (0 < tmp && tmp < (1UL << 13)) 3798 init_admin_reserve(); 3799 3800 break; 3801 case MEM_OFFLINE: 3802 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 3803 3804 if (sysctl_user_reserve_kbytes > free_kbytes) { 3805 init_user_reserve(); 3806 pr_info("vm.user_reserve_kbytes reset to %lu\n", 3807 sysctl_user_reserve_kbytes); 3808 } 3809 3810 if (sysctl_admin_reserve_kbytes > free_kbytes) { 3811 init_admin_reserve(); 3812 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 3813 sysctl_admin_reserve_kbytes); 3814 } 3815 break; 3816 default: 3817 break; 3818 } 3819 return NOTIFY_OK; 3820 } 3821 3822 static struct notifier_block reserve_mem_nb = { 3823 .notifier_call = reserve_mem_notifier, 3824 }; 3825 3826 static int __meminit init_reserve_notifier(void) 3827 { 3828 if (register_hotmemory_notifier(&reserve_mem_nb)) 3829 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 3830 3831 return 0; 3832 } 3833 subsys_initcall(init_reserve_notifier); 3834