1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/mmap.c 4 * 5 * Written by obz. 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/backing-dev.h> 15 #include <linux/mm.h> 16 #include <linux/mm_inline.h> 17 #include <linux/shm.h> 18 #include <linux/mman.h> 19 #include <linux/pagemap.h> 20 #include <linux/swap.h> 21 #include <linux/syscalls.h> 22 #include <linux/capability.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/personality.h> 27 #include <linux/security.h> 28 #include <linux/hugetlb.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/profile.h> 31 #include <linux/export.h> 32 #include <linux/mount.h> 33 #include <linux/mempolicy.h> 34 #include <linux/rmap.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/mmdebug.h> 37 #include <linux/perf_event.h> 38 #include <linux/audit.h> 39 #include <linux/khugepaged.h> 40 #include <linux/uprobes.h> 41 #include <linux/notifier.h> 42 #include <linux/memory.h> 43 #include <linux/printk.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/moduleparam.h> 46 #include <linux/pkeys.h> 47 #include <linux/oom.h> 48 #include <linux/sched/mm.h> 49 #include <linux/ksm.h> 50 51 #include <linux/uaccess.h> 52 #include <asm/cacheflush.h> 53 #include <asm/tlb.h> 54 #include <asm/mmu_context.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/mmap.h> 58 59 #include "internal.h" 60 61 #ifndef arch_mmap_check 62 #define arch_mmap_check(addr, len, flags) (0) 63 #endif 64 65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 67 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; 68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 69 #endif 70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 74 #endif 75 76 static bool ignore_rlimit_data; 77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 78 79 static void unmap_region(struct mm_struct *mm, struct ma_state *mas, 80 struct vm_area_struct *vma, struct vm_area_struct *prev, 81 struct vm_area_struct *next, unsigned long start, 82 unsigned long end, unsigned long tree_end, bool mm_wr_locked); 83 84 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 85 { 86 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 87 } 88 89 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 90 void vma_set_page_prot(struct vm_area_struct *vma) 91 { 92 unsigned long vm_flags = vma->vm_flags; 93 pgprot_t vm_page_prot; 94 95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 96 if (vma_wants_writenotify(vma, vm_page_prot)) { 97 vm_flags &= ~VM_SHARED; 98 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 99 } 100 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 102 } 103 104 /* 105 * Requires inode->i_mapping->i_mmap_rwsem 106 */ 107 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 108 struct address_space *mapping) 109 { 110 if (vma_is_shared_maywrite(vma)) 111 mapping_unmap_writable(mapping); 112 113 flush_dcache_mmap_lock(mapping); 114 vma_interval_tree_remove(vma, &mapping->i_mmap); 115 flush_dcache_mmap_unlock(mapping); 116 } 117 118 /* 119 * Unlink a file-based vm structure from its interval tree, to hide 120 * vma from rmap and vmtruncate before freeing its page tables. 121 */ 122 void unlink_file_vma(struct vm_area_struct *vma) 123 { 124 struct file *file = vma->vm_file; 125 126 if (file) { 127 struct address_space *mapping = file->f_mapping; 128 i_mmap_lock_write(mapping); 129 __remove_shared_vm_struct(vma, mapping); 130 i_mmap_unlock_write(mapping); 131 } 132 } 133 134 /* 135 * Close a vm structure and free it. 136 */ 137 static void remove_vma(struct vm_area_struct *vma, bool unreachable) 138 { 139 might_sleep(); 140 if (vma->vm_ops && vma->vm_ops->close) 141 vma->vm_ops->close(vma); 142 if (vma->vm_file) 143 fput(vma->vm_file); 144 mpol_put(vma_policy(vma)); 145 if (unreachable) 146 __vm_area_free(vma); 147 else 148 vm_area_free(vma); 149 } 150 151 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 152 unsigned long min) 153 { 154 return mas_prev(&vmi->mas, min); 155 } 156 157 /* 158 * check_brk_limits() - Use platform specific check of range & verify mlock 159 * limits. 160 * @addr: The address to check 161 * @len: The size of increase. 162 * 163 * Return: 0 on success. 164 */ 165 static int check_brk_limits(unsigned long addr, unsigned long len) 166 { 167 unsigned long mapped_addr; 168 169 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 170 if (IS_ERR_VALUE(mapped_addr)) 171 return mapped_addr; 172 173 return mlock_future_ok(current->mm, current->mm->def_flags, len) 174 ? 0 : -EAGAIN; 175 } 176 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 177 unsigned long addr, unsigned long request, unsigned long flags); 178 SYSCALL_DEFINE1(brk, unsigned long, brk) 179 { 180 unsigned long newbrk, oldbrk, origbrk; 181 struct mm_struct *mm = current->mm; 182 struct vm_area_struct *brkvma, *next = NULL; 183 unsigned long min_brk; 184 bool populate = false; 185 LIST_HEAD(uf); 186 struct vma_iterator vmi; 187 188 if (mmap_write_lock_killable(mm)) 189 return -EINTR; 190 191 origbrk = mm->brk; 192 193 #ifdef CONFIG_COMPAT_BRK 194 /* 195 * CONFIG_COMPAT_BRK can still be overridden by setting 196 * randomize_va_space to 2, which will still cause mm->start_brk 197 * to be arbitrarily shifted 198 */ 199 if (current->brk_randomized) 200 min_brk = mm->start_brk; 201 else 202 min_brk = mm->end_data; 203 #else 204 min_brk = mm->start_brk; 205 #endif 206 if (brk < min_brk) 207 goto out; 208 209 /* 210 * Check against rlimit here. If this check is done later after the test 211 * of oldbrk with newbrk then it can escape the test and let the data 212 * segment grow beyond its set limit the in case where the limit is 213 * not page aligned -Ram Gupta 214 */ 215 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 216 mm->end_data, mm->start_data)) 217 goto out; 218 219 newbrk = PAGE_ALIGN(brk); 220 oldbrk = PAGE_ALIGN(mm->brk); 221 if (oldbrk == newbrk) { 222 mm->brk = brk; 223 goto success; 224 } 225 226 /* Always allow shrinking brk. */ 227 if (brk <= mm->brk) { 228 /* Search one past newbrk */ 229 vma_iter_init(&vmi, mm, newbrk); 230 brkvma = vma_find(&vmi, oldbrk); 231 if (!brkvma || brkvma->vm_start >= oldbrk) 232 goto out; /* mapping intersects with an existing non-brk vma. */ 233 /* 234 * mm->brk must be protected by write mmap_lock. 235 * do_vma_munmap() will drop the lock on success, so update it 236 * before calling do_vma_munmap(). 237 */ 238 mm->brk = brk; 239 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true)) 240 goto out; 241 242 goto success_unlocked; 243 } 244 245 if (check_brk_limits(oldbrk, newbrk - oldbrk)) 246 goto out; 247 248 /* 249 * Only check if the next VMA is within the stack_guard_gap of the 250 * expansion area 251 */ 252 vma_iter_init(&vmi, mm, oldbrk); 253 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); 254 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 255 goto out; 256 257 brkvma = vma_prev_limit(&vmi, mm->start_brk); 258 /* Ok, looks good - let it rip. */ 259 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) 260 goto out; 261 262 mm->brk = brk; 263 if (mm->def_flags & VM_LOCKED) 264 populate = true; 265 266 success: 267 mmap_write_unlock(mm); 268 success_unlocked: 269 userfaultfd_unmap_complete(mm, &uf); 270 if (populate) 271 mm_populate(oldbrk, newbrk - oldbrk); 272 return brk; 273 274 out: 275 mm->brk = origbrk; 276 mmap_write_unlock(mm); 277 return origbrk; 278 } 279 280 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 281 static void validate_mm(struct mm_struct *mm) 282 { 283 int bug = 0; 284 int i = 0; 285 struct vm_area_struct *vma; 286 VMA_ITERATOR(vmi, mm, 0); 287 288 mt_validate(&mm->mm_mt); 289 for_each_vma(vmi, vma) { 290 #ifdef CONFIG_DEBUG_VM_RB 291 struct anon_vma *anon_vma = vma->anon_vma; 292 struct anon_vma_chain *avc; 293 #endif 294 unsigned long vmi_start, vmi_end; 295 bool warn = 0; 296 297 vmi_start = vma_iter_addr(&vmi); 298 vmi_end = vma_iter_end(&vmi); 299 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 300 warn = 1; 301 302 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 303 warn = 1; 304 305 if (warn) { 306 pr_emerg("issue in %s\n", current->comm); 307 dump_stack(); 308 dump_vma(vma); 309 pr_emerg("tree range: %px start %lx end %lx\n", vma, 310 vmi_start, vmi_end - 1); 311 vma_iter_dump_tree(&vmi); 312 } 313 314 #ifdef CONFIG_DEBUG_VM_RB 315 if (anon_vma) { 316 anon_vma_lock_read(anon_vma); 317 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 318 anon_vma_interval_tree_verify(avc); 319 anon_vma_unlock_read(anon_vma); 320 } 321 #endif 322 i++; 323 } 324 if (i != mm->map_count) { 325 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 326 bug = 1; 327 } 328 VM_BUG_ON_MM(bug, mm); 329 } 330 331 #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ 332 #define validate_mm(mm) do { } while (0) 333 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 334 335 /* 336 * vma has some anon_vma assigned, and is already inserted on that 337 * anon_vma's interval trees. 338 * 339 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 340 * vma must be removed from the anon_vma's interval trees using 341 * anon_vma_interval_tree_pre_update_vma(). 342 * 343 * After the update, the vma will be reinserted using 344 * anon_vma_interval_tree_post_update_vma(). 345 * 346 * The entire update must be protected by exclusive mmap_lock and by 347 * the root anon_vma's mutex. 348 */ 349 static inline void 350 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 351 { 352 struct anon_vma_chain *avc; 353 354 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 355 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 356 } 357 358 static inline void 359 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 360 { 361 struct anon_vma_chain *avc; 362 363 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 364 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 365 } 366 367 static unsigned long count_vma_pages_range(struct mm_struct *mm, 368 unsigned long addr, unsigned long end) 369 { 370 VMA_ITERATOR(vmi, mm, addr); 371 struct vm_area_struct *vma; 372 unsigned long nr_pages = 0; 373 374 for_each_vma_range(vmi, vma, end) { 375 unsigned long vm_start = max(addr, vma->vm_start); 376 unsigned long vm_end = min(end, vma->vm_end); 377 378 nr_pages += PHYS_PFN(vm_end - vm_start); 379 } 380 381 return nr_pages; 382 } 383 384 static void __vma_link_file(struct vm_area_struct *vma, 385 struct address_space *mapping) 386 { 387 if (vma_is_shared_maywrite(vma)) 388 mapping_allow_writable(mapping); 389 390 flush_dcache_mmap_lock(mapping); 391 vma_interval_tree_insert(vma, &mapping->i_mmap); 392 flush_dcache_mmap_unlock(mapping); 393 } 394 395 static void vma_link_file(struct vm_area_struct *vma) 396 { 397 struct file *file = vma->vm_file; 398 struct address_space *mapping; 399 400 if (file) { 401 mapping = file->f_mapping; 402 i_mmap_lock_write(mapping); 403 __vma_link_file(vma, mapping); 404 i_mmap_unlock_write(mapping); 405 } 406 } 407 408 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 409 { 410 VMA_ITERATOR(vmi, mm, 0); 411 412 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 413 if (vma_iter_prealloc(&vmi, vma)) 414 return -ENOMEM; 415 416 vma_start_write(vma); 417 vma_iter_store(&vmi, vma); 418 vma_link_file(vma); 419 mm->map_count++; 420 validate_mm(mm); 421 return 0; 422 } 423 424 /* 425 * init_multi_vma_prep() - Initializer for struct vma_prepare 426 * @vp: The vma_prepare struct 427 * @vma: The vma that will be altered once locked 428 * @next: The next vma if it is to be adjusted 429 * @remove: The first vma to be removed 430 * @remove2: The second vma to be removed 431 */ 432 static inline void init_multi_vma_prep(struct vma_prepare *vp, 433 struct vm_area_struct *vma, struct vm_area_struct *next, 434 struct vm_area_struct *remove, struct vm_area_struct *remove2) 435 { 436 memset(vp, 0, sizeof(struct vma_prepare)); 437 vp->vma = vma; 438 vp->anon_vma = vma->anon_vma; 439 vp->remove = remove; 440 vp->remove2 = remove2; 441 vp->adj_next = next; 442 if (!vp->anon_vma && next) 443 vp->anon_vma = next->anon_vma; 444 445 vp->file = vma->vm_file; 446 if (vp->file) 447 vp->mapping = vma->vm_file->f_mapping; 448 449 } 450 451 /* 452 * init_vma_prep() - Initializer wrapper for vma_prepare struct 453 * @vp: The vma_prepare struct 454 * @vma: The vma that will be altered once locked 455 */ 456 static inline void init_vma_prep(struct vma_prepare *vp, 457 struct vm_area_struct *vma) 458 { 459 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 460 } 461 462 463 /* 464 * vma_prepare() - Helper function for handling locking VMAs prior to altering 465 * @vp: The initialized vma_prepare struct 466 */ 467 static inline void vma_prepare(struct vma_prepare *vp) 468 { 469 if (vp->file) { 470 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 471 472 if (vp->adj_next) 473 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 474 vp->adj_next->vm_end); 475 476 i_mmap_lock_write(vp->mapping); 477 if (vp->insert && vp->insert->vm_file) { 478 /* 479 * Put into interval tree now, so instantiated pages 480 * are visible to arm/parisc __flush_dcache_page 481 * throughout; but we cannot insert into address 482 * space until vma start or end is updated. 483 */ 484 __vma_link_file(vp->insert, 485 vp->insert->vm_file->f_mapping); 486 } 487 } 488 489 if (vp->anon_vma) { 490 anon_vma_lock_write(vp->anon_vma); 491 anon_vma_interval_tree_pre_update_vma(vp->vma); 492 if (vp->adj_next) 493 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 494 } 495 496 if (vp->file) { 497 flush_dcache_mmap_lock(vp->mapping); 498 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 499 if (vp->adj_next) 500 vma_interval_tree_remove(vp->adj_next, 501 &vp->mapping->i_mmap); 502 } 503 504 } 505 506 /* 507 * vma_complete- Helper function for handling the unlocking after altering VMAs, 508 * or for inserting a VMA. 509 * 510 * @vp: The vma_prepare struct 511 * @vmi: The vma iterator 512 * @mm: The mm_struct 513 */ 514 static inline void vma_complete(struct vma_prepare *vp, 515 struct vma_iterator *vmi, struct mm_struct *mm) 516 { 517 if (vp->file) { 518 if (vp->adj_next) 519 vma_interval_tree_insert(vp->adj_next, 520 &vp->mapping->i_mmap); 521 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 522 flush_dcache_mmap_unlock(vp->mapping); 523 } 524 525 if (vp->remove && vp->file) { 526 __remove_shared_vm_struct(vp->remove, vp->mapping); 527 if (vp->remove2) 528 __remove_shared_vm_struct(vp->remove2, vp->mapping); 529 } else if (vp->insert) { 530 /* 531 * split_vma has split insert from vma, and needs 532 * us to insert it before dropping the locks 533 * (it may either follow vma or precede it). 534 */ 535 vma_iter_store(vmi, vp->insert); 536 mm->map_count++; 537 } 538 539 if (vp->anon_vma) { 540 anon_vma_interval_tree_post_update_vma(vp->vma); 541 if (vp->adj_next) 542 anon_vma_interval_tree_post_update_vma(vp->adj_next); 543 anon_vma_unlock_write(vp->anon_vma); 544 } 545 546 if (vp->file) { 547 i_mmap_unlock_write(vp->mapping); 548 uprobe_mmap(vp->vma); 549 550 if (vp->adj_next) 551 uprobe_mmap(vp->adj_next); 552 } 553 554 if (vp->remove) { 555 again: 556 vma_mark_detached(vp->remove, true); 557 if (vp->file) { 558 uprobe_munmap(vp->remove, vp->remove->vm_start, 559 vp->remove->vm_end); 560 fput(vp->file); 561 } 562 if (vp->remove->anon_vma) 563 anon_vma_merge(vp->vma, vp->remove); 564 mm->map_count--; 565 mpol_put(vma_policy(vp->remove)); 566 if (!vp->remove2) 567 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 568 vm_area_free(vp->remove); 569 570 /* 571 * In mprotect's case 6 (see comments on vma_merge), 572 * we are removing both mid and next vmas 573 */ 574 if (vp->remove2) { 575 vp->remove = vp->remove2; 576 vp->remove2 = NULL; 577 goto again; 578 } 579 } 580 if (vp->insert && vp->file) 581 uprobe_mmap(vp->insert); 582 validate_mm(mm); 583 } 584 585 /* 586 * dup_anon_vma() - Helper function to duplicate anon_vma 587 * @dst: The destination VMA 588 * @src: The source VMA 589 * @dup: Pointer to the destination VMA when successful. 590 * 591 * Returns: 0 on success. 592 */ 593 static inline int dup_anon_vma(struct vm_area_struct *dst, 594 struct vm_area_struct *src, struct vm_area_struct **dup) 595 { 596 /* 597 * Easily overlooked: when mprotect shifts the boundary, make sure the 598 * expanding vma has anon_vma set if the shrinking vma had, to cover any 599 * anon pages imported. 600 */ 601 if (src->anon_vma && !dst->anon_vma) { 602 int ret; 603 604 vma_assert_write_locked(dst); 605 dst->anon_vma = src->anon_vma; 606 ret = anon_vma_clone(dst, src); 607 if (ret) 608 return ret; 609 610 *dup = dst; 611 } 612 613 return 0; 614 } 615 616 /* 617 * vma_expand - Expand an existing VMA 618 * 619 * @vmi: The vma iterator 620 * @vma: The vma to expand 621 * @start: The start of the vma 622 * @end: The exclusive end of the vma 623 * @pgoff: The page offset of vma 624 * @next: The current of next vma. 625 * 626 * Expand @vma to @start and @end. Can expand off the start and end. Will 627 * expand over @next if it's different from @vma and @end == @next->vm_end. 628 * Checking if the @vma can expand and merge with @next needs to be handled by 629 * the caller. 630 * 631 * Returns: 0 on success 632 */ 633 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, 634 unsigned long start, unsigned long end, pgoff_t pgoff, 635 struct vm_area_struct *next) 636 { 637 struct vm_area_struct *anon_dup = NULL; 638 bool remove_next = false; 639 struct vma_prepare vp; 640 641 vma_start_write(vma); 642 if (next && (vma != next) && (end == next->vm_end)) { 643 int ret; 644 645 remove_next = true; 646 vma_start_write(next); 647 ret = dup_anon_vma(vma, next, &anon_dup); 648 if (ret) 649 return ret; 650 } 651 652 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); 653 /* Not merging but overwriting any part of next is not handled. */ 654 VM_WARN_ON(next && !vp.remove && 655 next != vma && end > next->vm_start); 656 /* Only handles expanding */ 657 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end); 658 659 /* Note: vma iterator must be pointing to 'start' */ 660 vma_iter_config(vmi, start, end); 661 if (vma_iter_prealloc(vmi, vma)) 662 goto nomem; 663 664 vma_prepare(&vp); 665 vma_adjust_trans_huge(vma, start, end, 0); 666 vma_set_range(vma, start, end, pgoff); 667 vma_iter_store(vmi, vma); 668 669 vma_complete(&vp, vmi, vma->vm_mm); 670 return 0; 671 672 nomem: 673 if (anon_dup) 674 unlink_anon_vmas(anon_dup); 675 return -ENOMEM; 676 } 677 678 /* 679 * vma_shrink() - Reduce an existing VMAs memory area 680 * @vmi: The vma iterator 681 * @vma: The VMA to modify 682 * @start: The new start 683 * @end: The new end 684 * 685 * Returns: 0 on success, -ENOMEM otherwise 686 */ 687 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 688 unsigned long start, unsigned long end, pgoff_t pgoff) 689 { 690 struct vma_prepare vp; 691 692 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 693 694 if (vma->vm_start < start) 695 vma_iter_config(vmi, vma->vm_start, start); 696 else 697 vma_iter_config(vmi, end, vma->vm_end); 698 699 if (vma_iter_prealloc(vmi, NULL)) 700 return -ENOMEM; 701 702 vma_start_write(vma); 703 704 init_vma_prep(&vp, vma); 705 vma_prepare(&vp); 706 vma_adjust_trans_huge(vma, start, end, 0); 707 708 vma_iter_clear(vmi); 709 vma_set_range(vma, start, end, pgoff); 710 vma_complete(&vp, vmi, vma->vm_mm); 711 return 0; 712 } 713 714 /* 715 * If the vma has a ->close operation then the driver probably needs to release 716 * per-vma resources, so we don't attempt to merge those if the caller indicates 717 * the current vma may be removed as part of the merge. 718 */ 719 static inline bool is_mergeable_vma(struct vm_area_struct *vma, 720 struct file *file, unsigned long vm_flags, 721 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 722 struct anon_vma_name *anon_name, bool may_remove_vma) 723 { 724 /* 725 * VM_SOFTDIRTY should not prevent from VMA merging, if we 726 * match the flags but dirty bit -- the caller should mark 727 * merged VMA as dirty. If dirty bit won't be excluded from 728 * comparison, we increase pressure on the memory system forcing 729 * the kernel to generate new VMAs when old one could be 730 * extended instead. 731 */ 732 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 733 return false; 734 if (vma->vm_file != file) 735 return false; 736 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) 737 return false; 738 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) 739 return false; 740 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) 741 return false; 742 return true; 743 } 744 745 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 746 struct anon_vma *anon_vma2, struct vm_area_struct *vma) 747 { 748 /* 749 * The list_is_singular() test is to avoid merging VMA cloned from 750 * parents. This can improve scalability caused by anon_vma lock. 751 */ 752 if ((!anon_vma1 || !anon_vma2) && (!vma || 753 list_is_singular(&vma->anon_vma_chain))) 754 return true; 755 return anon_vma1 == anon_vma2; 756 } 757 758 /* 759 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 760 * in front of (at a lower virtual address and file offset than) the vma. 761 * 762 * We cannot merge two vmas if they have differently assigned (non-NULL) 763 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 764 * 765 * We don't check here for the merged mmap wrapping around the end of pagecache 766 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 767 * wrap, nor mmaps which cover the final page at index -1UL. 768 * 769 * We assume the vma may be removed as part of the merge. 770 */ 771 static bool 772 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 773 struct anon_vma *anon_vma, struct file *file, 774 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 775 struct anon_vma_name *anon_name) 776 { 777 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) && 778 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 779 if (vma->vm_pgoff == vm_pgoff) 780 return true; 781 } 782 return false; 783 } 784 785 /* 786 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 787 * beyond (at a higher virtual address and file offset than) the vma. 788 * 789 * We cannot merge two vmas if they have differently assigned (non-NULL) 790 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 791 * 792 * We assume that vma is not removed as part of the merge. 793 */ 794 static bool 795 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 796 struct anon_vma *anon_vma, struct file *file, 797 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 798 struct anon_vma_name *anon_name) 799 { 800 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) && 801 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 802 pgoff_t vm_pglen; 803 vm_pglen = vma_pages(vma); 804 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 805 return true; 806 } 807 return false; 808 } 809 810 /* 811 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 812 * figure out whether that can be merged with its predecessor or its 813 * successor. Or both (it neatly fills a hole). 814 * 815 * In most cases - when called for mmap, brk or mremap - [addr,end) is 816 * certain not to be mapped by the time vma_merge is called; but when 817 * called for mprotect, it is certain to be already mapped (either at 818 * an offset within prev, or at the start of next), and the flags of 819 * this area are about to be changed to vm_flags - and the no-change 820 * case has already been eliminated. 821 * 822 * The following mprotect cases have to be considered, where **** is 823 * the area passed down from mprotect_fixup, never extending beyond one 824 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts 825 * at the same address as **** and is of the same or larger span, and 826 * NNNN the next vma after ****: 827 * 828 * **** **** **** 829 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC 830 * cannot merge might become might become 831 * PPNNNNNNNNNN PPPPPPPPPPCC 832 * mmap, brk or case 4 below case 5 below 833 * mremap move: 834 * **** **** 835 * PPPP NNNN PPPPCCCCNNNN 836 * might become might become 837 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 838 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or 839 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 840 * 841 * It is important for case 8 that the vma CCCC overlapping the 842 * region **** is never going to extended over NNNN. Instead NNNN must 843 * be extended in region **** and CCCC must be removed. This way in 844 * all cases where vma_merge succeeds, the moment vma_merge drops the 845 * rmap_locks, the properties of the merged vma will be already 846 * correct for the whole merged range. Some of those properties like 847 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 848 * be correct for the whole merged range immediately after the 849 * rmap_locks are released. Otherwise if NNNN would be removed and 850 * CCCC would be extended over the NNNN range, remove_migration_ptes 851 * or other rmap walkers (if working on addresses beyond the "end" 852 * parameter) may establish ptes with the wrong permissions of CCCC 853 * instead of the right permissions of NNNN. 854 * 855 * In the code below: 856 * PPPP is represented by *prev 857 * CCCC is represented by *curr or not represented at all (NULL) 858 * NNNN is represented by *next or not represented at all (NULL) 859 * **** is not represented - it will be merged and the vma containing the 860 * area is returned, or the function will return NULL 861 */ 862 static struct vm_area_struct 863 *vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev, 864 struct vm_area_struct *src, unsigned long addr, unsigned long end, 865 unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy, 866 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 867 struct anon_vma_name *anon_name) 868 { 869 struct mm_struct *mm = src->vm_mm; 870 struct anon_vma *anon_vma = src->anon_vma; 871 struct file *file = src->vm_file; 872 struct vm_area_struct *curr, *next, *res; 873 struct vm_area_struct *vma, *adjust, *remove, *remove2; 874 struct vm_area_struct *anon_dup = NULL; 875 struct vma_prepare vp; 876 pgoff_t vma_pgoff; 877 int err = 0; 878 bool merge_prev = false; 879 bool merge_next = false; 880 bool vma_expanded = false; 881 unsigned long vma_start = addr; 882 unsigned long vma_end = end; 883 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 884 long adj_start = 0; 885 886 /* 887 * We later require that vma->vm_flags == vm_flags, 888 * so this tests vma->vm_flags & VM_SPECIAL, too. 889 */ 890 if (vm_flags & VM_SPECIAL) 891 return NULL; 892 893 /* Does the input range span an existing VMA? (cases 5 - 8) */ 894 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); 895 896 if (!curr || /* cases 1 - 4 */ 897 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ 898 next = vma_lookup(mm, end); 899 else 900 next = NULL; /* case 5 */ 901 902 if (prev) { 903 vma_start = prev->vm_start; 904 vma_pgoff = prev->vm_pgoff; 905 906 /* Can we merge the predecessor? */ 907 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy) 908 && can_vma_merge_after(prev, vm_flags, anon_vma, file, 909 pgoff, vm_userfaultfd_ctx, anon_name)) { 910 merge_prev = true; 911 vma_prev(vmi); 912 } 913 } 914 915 /* Can we merge the successor? */ 916 if (next && mpol_equal(policy, vma_policy(next)) && 917 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, 918 vm_userfaultfd_ctx, anon_name)) { 919 merge_next = true; 920 } 921 922 /* Verify some invariant that must be enforced by the caller. */ 923 VM_WARN_ON(prev && addr <= prev->vm_start); 924 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); 925 VM_WARN_ON(addr >= end); 926 927 if (!merge_prev && !merge_next) 928 return NULL; /* Not mergeable. */ 929 930 if (merge_prev) 931 vma_start_write(prev); 932 933 res = vma = prev; 934 remove = remove2 = adjust = NULL; 935 936 /* Can we merge both the predecessor and the successor? */ 937 if (merge_prev && merge_next && 938 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 939 vma_start_write(next); 940 remove = next; /* case 1 */ 941 vma_end = next->vm_end; 942 err = dup_anon_vma(prev, next, &anon_dup); 943 if (curr) { /* case 6 */ 944 vma_start_write(curr); 945 remove = curr; 946 remove2 = next; 947 /* 948 * Note that the dup_anon_vma below cannot overwrite err 949 * since the first caller would do nothing unless next 950 * has an anon_vma. 951 */ 952 if (!next->anon_vma) 953 err = dup_anon_vma(prev, curr, &anon_dup); 954 } 955 } else if (merge_prev) { /* case 2 */ 956 if (curr) { 957 vma_start_write(curr); 958 if (end == curr->vm_end) { /* case 7 */ 959 /* 960 * can_vma_merge_after() assumed we would not be 961 * removing prev vma, so it skipped the check 962 * for vm_ops->close, but we are removing curr 963 */ 964 if (curr->vm_ops && curr->vm_ops->close) 965 err = -EINVAL; 966 remove = curr; 967 } else { /* case 5 */ 968 adjust = curr; 969 adj_start = (end - curr->vm_start); 970 } 971 if (!err) 972 err = dup_anon_vma(prev, curr, &anon_dup); 973 } 974 } else { /* merge_next */ 975 vma_start_write(next); 976 res = next; 977 if (prev && addr < prev->vm_end) { /* case 4 */ 978 vma_start_write(prev); 979 vma_end = addr; 980 adjust = next; 981 adj_start = -(prev->vm_end - addr); 982 err = dup_anon_vma(next, prev, &anon_dup); 983 } else { 984 /* 985 * Note that cases 3 and 8 are the ONLY ones where prev 986 * is permitted to be (but is not necessarily) NULL. 987 */ 988 vma = next; /* case 3 */ 989 vma_start = addr; 990 vma_end = next->vm_end; 991 vma_pgoff = next->vm_pgoff - pglen; 992 if (curr) { /* case 8 */ 993 vma_pgoff = curr->vm_pgoff; 994 vma_start_write(curr); 995 remove = curr; 996 err = dup_anon_vma(next, curr, &anon_dup); 997 } 998 } 999 } 1000 1001 /* Error in anon_vma clone. */ 1002 if (err) 1003 goto anon_vma_fail; 1004 1005 if (vma_start < vma->vm_start || vma_end > vma->vm_end) 1006 vma_expanded = true; 1007 1008 if (vma_expanded) { 1009 vma_iter_config(vmi, vma_start, vma_end); 1010 } else { 1011 vma_iter_config(vmi, adjust->vm_start + adj_start, 1012 adjust->vm_end); 1013 } 1014 1015 if (vma_iter_prealloc(vmi, vma)) 1016 goto prealloc_fail; 1017 1018 init_multi_vma_prep(&vp, vma, adjust, remove, remove2); 1019 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 1020 vp.anon_vma != adjust->anon_vma); 1021 1022 vma_prepare(&vp); 1023 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); 1024 vma_set_range(vma, vma_start, vma_end, vma_pgoff); 1025 1026 if (vma_expanded) 1027 vma_iter_store(vmi, vma); 1028 1029 if (adj_start) { 1030 adjust->vm_start += adj_start; 1031 adjust->vm_pgoff += adj_start >> PAGE_SHIFT; 1032 if (adj_start < 0) { 1033 WARN_ON(vma_expanded); 1034 vma_iter_store(vmi, next); 1035 } 1036 } 1037 1038 vma_complete(&vp, vmi, mm); 1039 khugepaged_enter_vma(res, vm_flags); 1040 return res; 1041 1042 prealloc_fail: 1043 if (anon_dup) 1044 unlink_anon_vmas(anon_dup); 1045 1046 anon_vma_fail: 1047 vma_iter_set(vmi, addr); 1048 vma_iter_load(vmi); 1049 return NULL; 1050 } 1051 1052 /* 1053 * Rough compatibility check to quickly see if it's even worth looking 1054 * at sharing an anon_vma. 1055 * 1056 * They need to have the same vm_file, and the flags can only differ 1057 * in things that mprotect may change. 1058 * 1059 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1060 * we can merge the two vma's. For example, we refuse to merge a vma if 1061 * there is a vm_ops->close() function, because that indicates that the 1062 * driver is doing some kind of reference counting. But that doesn't 1063 * really matter for the anon_vma sharing case. 1064 */ 1065 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1066 { 1067 return a->vm_end == b->vm_start && 1068 mpol_equal(vma_policy(a), vma_policy(b)) && 1069 a->vm_file == b->vm_file && 1070 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1071 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1072 } 1073 1074 /* 1075 * Do some basic sanity checking to see if we can re-use the anon_vma 1076 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1077 * the same as 'old', the other will be the new one that is trying 1078 * to share the anon_vma. 1079 * 1080 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1081 * the anon_vma of 'old' is concurrently in the process of being set up 1082 * by another page fault trying to merge _that_. But that's ok: if it 1083 * is being set up, that automatically means that it will be a singleton 1084 * acceptable for merging, so we can do all of this optimistically. But 1085 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1086 * 1087 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1088 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1089 * is to return an anon_vma that is "complex" due to having gone through 1090 * a fork). 1091 * 1092 * We also make sure that the two vma's are compatible (adjacent, 1093 * and with the same memory policies). That's all stable, even with just 1094 * a read lock on the mmap_lock. 1095 */ 1096 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1097 { 1098 if (anon_vma_compatible(a, b)) { 1099 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1100 1101 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1102 return anon_vma; 1103 } 1104 return NULL; 1105 } 1106 1107 /* 1108 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1109 * neighbouring vmas for a suitable anon_vma, before it goes off 1110 * to allocate a new anon_vma. It checks because a repetitive 1111 * sequence of mprotects and faults may otherwise lead to distinct 1112 * anon_vmas being allocated, preventing vma merge in subsequent 1113 * mprotect. 1114 */ 1115 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1116 { 1117 struct anon_vma *anon_vma = NULL; 1118 struct vm_area_struct *prev, *next; 1119 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 1120 1121 /* Try next first. */ 1122 next = vma_iter_load(&vmi); 1123 if (next) { 1124 anon_vma = reusable_anon_vma(next, vma, next); 1125 if (anon_vma) 1126 return anon_vma; 1127 } 1128 1129 prev = vma_prev(&vmi); 1130 VM_BUG_ON_VMA(prev != vma, vma); 1131 prev = vma_prev(&vmi); 1132 /* Try prev next. */ 1133 if (prev) 1134 anon_vma = reusable_anon_vma(prev, prev, vma); 1135 1136 /* 1137 * We might reach here with anon_vma == NULL if we can't find 1138 * any reusable anon_vma. 1139 * There's no absolute need to look only at touching neighbours: 1140 * we could search further afield for "compatible" anon_vmas. 1141 * But it would probably just be a waste of time searching, 1142 * or lead to too many vmas hanging off the same anon_vma. 1143 * We're trying to allow mprotect remerging later on, 1144 * not trying to minimize memory used for anon_vmas. 1145 */ 1146 return anon_vma; 1147 } 1148 1149 /* 1150 * If a hint addr is less than mmap_min_addr change hint to be as 1151 * low as possible but still greater than mmap_min_addr 1152 */ 1153 static inline unsigned long round_hint_to_min(unsigned long hint) 1154 { 1155 hint &= PAGE_MASK; 1156 if (((void *)hint != NULL) && 1157 (hint < mmap_min_addr)) 1158 return PAGE_ALIGN(mmap_min_addr); 1159 return hint; 1160 } 1161 1162 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 1163 unsigned long bytes) 1164 { 1165 unsigned long locked_pages, limit_pages; 1166 1167 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1168 return true; 1169 1170 locked_pages = bytes >> PAGE_SHIFT; 1171 locked_pages += mm->locked_vm; 1172 1173 limit_pages = rlimit(RLIMIT_MEMLOCK); 1174 limit_pages >>= PAGE_SHIFT; 1175 1176 return locked_pages <= limit_pages; 1177 } 1178 1179 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 1180 { 1181 if (S_ISREG(inode->i_mode)) 1182 return MAX_LFS_FILESIZE; 1183 1184 if (S_ISBLK(inode->i_mode)) 1185 return MAX_LFS_FILESIZE; 1186 1187 if (S_ISSOCK(inode->i_mode)) 1188 return MAX_LFS_FILESIZE; 1189 1190 /* Special "we do even unsigned file positions" case */ 1191 if (file->f_mode & FMODE_UNSIGNED_OFFSET) 1192 return 0; 1193 1194 /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 1195 return ULONG_MAX; 1196 } 1197 1198 static inline bool file_mmap_ok(struct file *file, struct inode *inode, 1199 unsigned long pgoff, unsigned long len) 1200 { 1201 u64 maxsize = file_mmap_size_max(file, inode); 1202 1203 if (maxsize && len > maxsize) 1204 return false; 1205 maxsize -= len; 1206 if (pgoff > maxsize >> PAGE_SHIFT) 1207 return false; 1208 return true; 1209 } 1210 1211 /* 1212 * The caller must write-lock current->mm->mmap_lock. 1213 */ 1214 unsigned long do_mmap(struct file *file, unsigned long addr, 1215 unsigned long len, unsigned long prot, 1216 unsigned long flags, vm_flags_t vm_flags, 1217 unsigned long pgoff, unsigned long *populate, 1218 struct list_head *uf) 1219 { 1220 struct mm_struct *mm = current->mm; 1221 int pkey = 0; 1222 1223 *populate = 0; 1224 1225 if (!len) 1226 return -EINVAL; 1227 1228 /* 1229 * Does the application expect PROT_READ to imply PROT_EXEC? 1230 * 1231 * (the exception is when the underlying filesystem is noexec 1232 * mounted, in which case we don't add PROT_EXEC.) 1233 */ 1234 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 1235 if (!(file && path_noexec(&file->f_path))) 1236 prot |= PROT_EXEC; 1237 1238 /* force arch specific MAP_FIXED handling in get_unmapped_area */ 1239 if (flags & MAP_FIXED_NOREPLACE) 1240 flags |= MAP_FIXED; 1241 1242 if (!(flags & MAP_FIXED)) 1243 addr = round_hint_to_min(addr); 1244 1245 /* Careful about overflows.. */ 1246 len = PAGE_ALIGN(len); 1247 if (!len) 1248 return -ENOMEM; 1249 1250 /* offset overflow? */ 1251 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 1252 return -EOVERFLOW; 1253 1254 /* Too many mappings? */ 1255 if (mm->map_count > sysctl_max_map_count) 1256 return -ENOMEM; 1257 1258 /* 1259 * addr is returned from get_unmapped_area, 1260 * There are two cases: 1261 * 1> MAP_FIXED == false 1262 * unallocated memory, no need to check sealing. 1263 * 1> MAP_FIXED == true 1264 * sealing is checked inside mmap_region when 1265 * do_vmi_munmap is called. 1266 */ 1267 1268 if (prot == PROT_EXEC) { 1269 pkey = execute_only_pkey(mm); 1270 if (pkey < 0) 1271 pkey = 0; 1272 } 1273 1274 /* Do simple checking here so the lower-level routines won't have 1275 * to. we assume access permissions have been handled by the open 1276 * of the memory object, so we don't do any here. 1277 */ 1278 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 1279 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1280 1281 /* Obtain the address to map to. we verify (or select) it and ensure 1282 * that it represents a valid section of the address space. 1283 */ 1284 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); 1285 if (IS_ERR_VALUE(addr)) 1286 return addr; 1287 1288 if (flags & MAP_FIXED_NOREPLACE) { 1289 if (find_vma_intersection(mm, addr, addr + len)) 1290 return -EEXIST; 1291 } 1292 1293 if (flags & MAP_LOCKED) 1294 if (!can_do_mlock()) 1295 return -EPERM; 1296 1297 if (!mlock_future_ok(mm, vm_flags, len)) 1298 return -EAGAIN; 1299 1300 if (file) { 1301 struct inode *inode = file_inode(file); 1302 unsigned long flags_mask; 1303 1304 if (!file_mmap_ok(file, inode, pgoff, len)) 1305 return -EOVERFLOW; 1306 1307 flags_mask = LEGACY_MAP_MASK; 1308 if (file->f_op->fop_flags & FOP_MMAP_SYNC) 1309 flags_mask |= MAP_SYNC; 1310 1311 switch (flags & MAP_TYPE) { 1312 case MAP_SHARED: 1313 /* 1314 * Force use of MAP_SHARED_VALIDATE with non-legacy 1315 * flags. E.g. MAP_SYNC is dangerous to use with 1316 * MAP_SHARED as you don't know which consistency model 1317 * you will get. We silently ignore unsupported flags 1318 * with MAP_SHARED to preserve backward compatibility. 1319 */ 1320 flags &= LEGACY_MAP_MASK; 1321 fallthrough; 1322 case MAP_SHARED_VALIDATE: 1323 if (flags & ~flags_mask) 1324 return -EOPNOTSUPP; 1325 if (prot & PROT_WRITE) { 1326 if (!(file->f_mode & FMODE_WRITE)) 1327 return -EACCES; 1328 if (IS_SWAPFILE(file->f_mapping->host)) 1329 return -ETXTBSY; 1330 } 1331 1332 /* 1333 * Make sure we don't allow writing to an append-only 1334 * file.. 1335 */ 1336 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 1337 return -EACCES; 1338 1339 vm_flags |= VM_SHARED | VM_MAYSHARE; 1340 if (!(file->f_mode & FMODE_WRITE)) 1341 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 1342 fallthrough; 1343 case MAP_PRIVATE: 1344 if (!(file->f_mode & FMODE_READ)) 1345 return -EACCES; 1346 if (path_noexec(&file->f_path)) { 1347 if (vm_flags & VM_EXEC) 1348 return -EPERM; 1349 vm_flags &= ~VM_MAYEXEC; 1350 } 1351 1352 if (!file->f_op->mmap) 1353 return -ENODEV; 1354 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1355 return -EINVAL; 1356 break; 1357 1358 default: 1359 return -EINVAL; 1360 } 1361 } else { 1362 switch (flags & MAP_TYPE) { 1363 case MAP_SHARED: 1364 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1365 return -EINVAL; 1366 /* 1367 * Ignore pgoff. 1368 */ 1369 pgoff = 0; 1370 vm_flags |= VM_SHARED | VM_MAYSHARE; 1371 break; 1372 case MAP_DROPPABLE: 1373 if (VM_DROPPABLE == VM_NONE) 1374 return -ENOTSUPP; 1375 /* 1376 * A locked or stack area makes no sense to be droppable. 1377 * 1378 * Also, since droppable pages can just go away at any time 1379 * it makes no sense to copy them on fork or dump them. 1380 * 1381 * And don't attempt to combine with hugetlb for now. 1382 */ 1383 if (flags & (MAP_LOCKED | MAP_HUGETLB)) 1384 return -EINVAL; 1385 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) 1386 return -EINVAL; 1387 1388 vm_flags |= VM_DROPPABLE; 1389 1390 /* 1391 * If the pages can be dropped, then it doesn't make 1392 * sense to reserve them. 1393 */ 1394 vm_flags |= VM_NORESERVE; 1395 1396 /* 1397 * Likewise, they're volatile enough that they 1398 * shouldn't survive forks or coredumps. 1399 */ 1400 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; 1401 fallthrough; 1402 case MAP_PRIVATE: 1403 /* 1404 * Set pgoff according to addr for anon_vma. 1405 */ 1406 pgoff = addr >> PAGE_SHIFT; 1407 break; 1408 default: 1409 return -EINVAL; 1410 } 1411 } 1412 1413 /* 1414 * Set 'VM_NORESERVE' if we should not account for the 1415 * memory use of this mapping. 1416 */ 1417 if (flags & MAP_NORESERVE) { 1418 /* We honor MAP_NORESERVE if allowed to overcommit */ 1419 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1420 vm_flags |= VM_NORESERVE; 1421 1422 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 1423 if (file && is_file_hugepages(file)) 1424 vm_flags |= VM_NORESERVE; 1425 } 1426 1427 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 1428 if (!IS_ERR_VALUE(addr) && 1429 ((vm_flags & VM_LOCKED) || 1430 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 1431 *populate = len; 1432 return addr; 1433 } 1434 1435 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 1436 unsigned long prot, unsigned long flags, 1437 unsigned long fd, unsigned long pgoff) 1438 { 1439 struct file *file = NULL; 1440 unsigned long retval; 1441 1442 if (!(flags & MAP_ANONYMOUS)) { 1443 audit_mmap_fd(fd, flags); 1444 file = fget(fd); 1445 if (!file) 1446 return -EBADF; 1447 if (is_file_hugepages(file)) { 1448 len = ALIGN(len, huge_page_size(hstate_file(file))); 1449 } else if (unlikely(flags & MAP_HUGETLB)) { 1450 retval = -EINVAL; 1451 goto out_fput; 1452 } 1453 } else if (flags & MAP_HUGETLB) { 1454 struct hstate *hs; 1455 1456 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1457 if (!hs) 1458 return -EINVAL; 1459 1460 len = ALIGN(len, huge_page_size(hs)); 1461 /* 1462 * VM_NORESERVE is used because the reservations will be 1463 * taken when vm_ops->mmap() is called 1464 */ 1465 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 1466 VM_NORESERVE, 1467 HUGETLB_ANONHUGE_INODE, 1468 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1469 if (IS_ERR(file)) 1470 return PTR_ERR(file); 1471 } 1472 1473 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1474 out_fput: 1475 if (file) 1476 fput(file); 1477 return retval; 1478 } 1479 1480 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1481 unsigned long, prot, unsigned long, flags, 1482 unsigned long, fd, unsigned long, pgoff) 1483 { 1484 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 1485 } 1486 1487 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1488 struct mmap_arg_struct { 1489 unsigned long addr; 1490 unsigned long len; 1491 unsigned long prot; 1492 unsigned long flags; 1493 unsigned long fd; 1494 unsigned long offset; 1495 }; 1496 1497 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1498 { 1499 struct mmap_arg_struct a; 1500 1501 if (copy_from_user(&a, arg, sizeof(a))) 1502 return -EFAULT; 1503 if (offset_in_page(a.offset)) 1504 return -EINVAL; 1505 1506 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1507 a.offset >> PAGE_SHIFT); 1508 } 1509 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1510 1511 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 1512 { 1513 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 1514 } 1515 1516 static bool vma_is_shared_writable(struct vm_area_struct *vma) 1517 { 1518 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 1519 (VM_WRITE | VM_SHARED); 1520 } 1521 1522 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 1523 { 1524 /* No managed pages to writeback. */ 1525 if (vma->vm_flags & VM_PFNMAP) 1526 return false; 1527 1528 return vma->vm_file && vma->vm_file->f_mapping && 1529 mapping_can_writeback(vma->vm_file->f_mapping); 1530 } 1531 1532 /* 1533 * Does this VMA require the underlying folios to have their dirty state 1534 * tracked? 1535 */ 1536 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 1537 { 1538 /* Only shared, writable VMAs require dirty tracking. */ 1539 if (!vma_is_shared_writable(vma)) 1540 return false; 1541 1542 /* Does the filesystem need to be notified? */ 1543 if (vm_ops_needs_writenotify(vma->vm_ops)) 1544 return true; 1545 1546 /* 1547 * Even if the filesystem doesn't indicate a need for writenotify, if it 1548 * can writeback, dirty tracking is still required. 1549 */ 1550 return vma_fs_can_writeback(vma); 1551 } 1552 1553 /* 1554 * Some shared mappings will want the pages marked read-only 1555 * to track write events. If so, we'll downgrade vm_page_prot 1556 * to the private version (using protection_map[] without the 1557 * VM_SHARED bit). 1558 */ 1559 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1560 { 1561 /* If it was private or non-writable, the write bit is already clear */ 1562 if (!vma_is_shared_writable(vma)) 1563 return false; 1564 1565 /* The backer wishes to know when pages are first written to? */ 1566 if (vm_ops_needs_writenotify(vma->vm_ops)) 1567 return true; 1568 1569 /* The open routine did something to the protections that pgprot_modify 1570 * won't preserve? */ 1571 if (pgprot_val(vm_page_prot) != 1572 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 1573 return false; 1574 1575 /* 1576 * Do we need to track softdirty? hugetlb does not support softdirty 1577 * tracking yet. 1578 */ 1579 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1580 return true; 1581 1582 /* Do we need write faults for uffd-wp tracking? */ 1583 if (userfaultfd_wp(vma)) 1584 return true; 1585 1586 /* Can the mapping track the dirty pages? */ 1587 return vma_fs_can_writeback(vma); 1588 } 1589 1590 /* 1591 * We account for memory if it's a private writeable mapping, 1592 * not hugepages and VM_NORESERVE wasn't set. 1593 */ 1594 static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags) 1595 { 1596 /* 1597 * hugetlb has its own accounting separate from the core VM 1598 * VM_HUGETLB may not be set yet so we cannot check for that flag. 1599 */ 1600 if (file && is_file_hugepages(file)) 1601 return false; 1602 1603 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1604 } 1605 1606 /** 1607 * unmapped_area() - Find an area between the low_limit and the high_limit with 1608 * the correct alignment and offset, all from @info. Note: current->mm is used 1609 * for the search. 1610 * 1611 * @info: The unmapped area information including the range [low_limit - 1612 * high_limit), the alignment offset and mask. 1613 * 1614 * Return: A memory address or -ENOMEM. 1615 */ 1616 static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 1617 { 1618 unsigned long length, gap; 1619 unsigned long low_limit, high_limit; 1620 struct vm_area_struct *tmp; 1621 VMA_ITERATOR(vmi, current->mm, 0); 1622 1623 /* Adjust search length to account for worst case alignment overhead */ 1624 length = info->length + info->align_mask + info->start_gap; 1625 if (length < info->length) 1626 return -ENOMEM; 1627 1628 low_limit = info->low_limit; 1629 if (low_limit < mmap_min_addr) 1630 low_limit = mmap_min_addr; 1631 high_limit = info->high_limit; 1632 retry: 1633 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) 1634 return -ENOMEM; 1635 1636 /* 1637 * Adjust for the gap first so it doesn't interfere with the 1638 * later alignment. The first step is the minimum needed to 1639 * fulill the start gap, the next steps is the minimum to align 1640 * that. It is the minimum needed to fulill both. 1641 */ 1642 gap = vma_iter_addr(&vmi) + info->start_gap; 1643 gap += (info->align_offset - gap) & info->align_mask; 1644 tmp = vma_next(&vmi); 1645 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 1646 if (vm_start_gap(tmp) < gap + length - 1) { 1647 low_limit = tmp->vm_end; 1648 vma_iter_reset(&vmi); 1649 goto retry; 1650 } 1651 } else { 1652 tmp = vma_prev(&vmi); 1653 if (tmp && vm_end_gap(tmp) > gap) { 1654 low_limit = vm_end_gap(tmp); 1655 vma_iter_reset(&vmi); 1656 goto retry; 1657 } 1658 } 1659 1660 return gap; 1661 } 1662 1663 /** 1664 * unmapped_area_topdown() - Find an area between the low_limit and the 1665 * high_limit with the correct alignment and offset at the highest available 1666 * address, all from @info. Note: current->mm is used for the search. 1667 * 1668 * @info: The unmapped area information including the range [low_limit - 1669 * high_limit), the alignment offset and mask. 1670 * 1671 * Return: A memory address or -ENOMEM. 1672 */ 1673 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 1674 { 1675 unsigned long length, gap, gap_end; 1676 unsigned long low_limit, high_limit; 1677 struct vm_area_struct *tmp; 1678 VMA_ITERATOR(vmi, current->mm, 0); 1679 1680 /* Adjust search length to account for worst case alignment overhead */ 1681 length = info->length + info->align_mask + info->start_gap; 1682 if (length < info->length) 1683 return -ENOMEM; 1684 1685 low_limit = info->low_limit; 1686 if (low_limit < mmap_min_addr) 1687 low_limit = mmap_min_addr; 1688 high_limit = info->high_limit; 1689 retry: 1690 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) 1691 return -ENOMEM; 1692 1693 gap = vma_iter_end(&vmi) - info->length; 1694 gap -= (gap - info->align_offset) & info->align_mask; 1695 gap_end = vma_iter_end(&vmi); 1696 tmp = vma_next(&vmi); 1697 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 1698 if (vm_start_gap(tmp) < gap_end) { 1699 high_limit = vm_start_gap(tmp); 1700 vma_iter_reset(&vmi); 1701 goto retry; 1702 } 1703 } else { 1704 tmp = vma_prev(&vmi); 1705 if (tmp && vm_end_gap(tmp) > gap) { 1706 high_limit = tmp->vm_start; 1707 vma_iter_reset(&vmi); 1708 goto retry; 1709 } 1710 } 1711 1712 return gap; 1713 } 1714 1715 /* 1716 * Search for an unmapped address range. 1717 * 1718 * We are looking for a range that: 1719 * - does not intersect with any VMA; 1720 * - is contained within the [low_limit, high_limit) interval; 1721 * - is at least the desired size. 1722 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 1723 */ 1724 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 1725 { 1726 unsigned long addr; 1727 1728 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 1729 addr = unmapped_area_topdown(info); 1730 else 1731 addr = unmapped_area(info); 1732 1733 trace_vm_unmapped_area(addr, info); 1734 return addr; 1735 } 1736 1737 /* Get an address range which is currently unmapped. 1738 * For shmat() with addr=0. 1739 * 1740 * Ugly calling convention alert: 1741 * Return value with the low bits set means error value, 1742 * ie 1743 * if (ret & ~PAGE_MASK) 1744 * error = ret; 1745 * 1746 * This function "knows" that -ENOMEM has the bits set. 1747 */ 1748 unsigned long 1749 generic_get_unmapped_area(struct file *filp, unsigned long addr, 1750 unsigned long len, unsigned long pgoff, 1751 unsigned long flags) 1752 { 1753 struct mm_struct *mm = current->mm; 1754 struct vm_area_struct *vma, *prev; 1755 struct vm_unmapped_area_info info = {}; 1756 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 1757 1758 if (len > mmap_end - mmap_min_addr) 1759 return -ENOMEM; 1760 1761 if (flags & MAP_FIXED) 1762 return addr; 1763 1764 if (addr) { 1765 addr = PAGE_ALIGN(addr); 1766 vma = find_vma_prev(mm, addr, &prev); 1767 if (mmap_end - len >= addr && addr >= mmap_min_addr && 1768 (!vma || addr + len <= vm_start_gap(vma)) && 1769 (!prev || addr >= vm_end_gap(prev))) 1770 return addr; 1771 } 1772 1773 info.length = len; 1774 info.low_limit = mm->mmap_base; 1775 info.high_limit = mmap_end; 1776 return vm_unmapped_area(&info); 1777 } 1778 1779 #ifndef HAVE_ARCH_UNMAPPED_AREA 1780 unsigned long 1781 arch_get_unmapped_area(struct file *filp, unsigned long addr, 1782 unsigned long len, unsigned long pgoff, 1783 unsigned long flags) 1784 { 1785 return generic_get_unmapped_area(filp, addr, len, pgoff, flags); 1786 } 1787 #endif 1788 1789 /* 1790 * This mmap-allocator allocates new areas top-down from below the 1791 * stack's low limit (the base): 1792 */ 1793 unsigned long 1794 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 1795 unsigned long len, unsigned long pgoff, 1796 unsigned long flags) 1797 { 1798 struct vm_area_struct *vma, *prev; 1799 struct mm_struct *mm = current->mm; 1800 struct vm_unmapped_area_info info = {}; 1801 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 1802 1803 /* requested length too big for entire address space */ 1804 if (len > mmap_end - mmap_min_addr) 1805 return -ENOMEM; 1806 1807 if (flags & MAP_FIXED) 1808 return addr; 1809 1810 /* requesting a specific address */ 1811 if (addr) { 1812 addr = PAGE_ALIGN(addr); 1813 vma = find_vma_prev(mm, addr, &prev); 1814 if (mmap_end - len >= addr && addr >= mmap_min_addr && 1815 (!vma || addr + len <= vm_start_gap(vma)) && 1816 (!prev || addr >= vm_end_gap(prev))) 1817 return addr; 1818 } 1819 1820 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 1821 info.length = len; 1822 info.low_limit = PAGE_SIZE; 1823 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 1824 addr = vm_unmapped_area(&info); 1825 1826 /* 1827 * A failed mmap() very likely causes application failure, 1828 * so fall back to the bottom-up function here. This scenario 1829 * can happen with large stack limits and large mmap() 1830 * allocations. 1831 */ 1832 if (offset_in_page(addr)) { 1833 VM_BUG_ON(addr != -ENOMEM); 1834 info.flags = 0; 1835 info.low_limit = TASK_UNMAPPED_BASE; 1836 info.high_limit = mmap_end; 1837 addr = vm_unmapped_area(&info); 1838 } 1839 1840 return addr; 1841 } 1842 1843 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1844 unsigned long 1845 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 1846 unsigned long len, unsigned long pgoff, 1847 unsigned long flags) 1848 { 1849 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 1850 } 1851 #endif 1852 1853 #ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS 1854 unsigned long 1855 arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, 1856 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 1857 { 1858 return arch_get_unmapped_area(filp, addr, len, pgoff, flags); 1859 } 1860 1861 unsigned long 1862 arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, 1863 unsigned long len, unsigned long pgoff, 1864 unsigned long flags, vm_flags_t vm_flags) 1865 { 1866 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 1867 } 1868 #endif 1869 1870 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, 1871 unsigned long addr, unsigned long len, 1872 unsigned long pgoff, unsigned long flags, 1873 vm_flags_t vm_flags) 1874 { 1875 if (test_bit(MMF_TOPDOWN, &mm->flags)) 1876 return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, 1877 flags, vm_flags); 1878 return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags); 1879 } 1880 1881 unsigned long 1882 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1883 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 1884 { 1885 unsigned long (*get_area)(struct file *, unsigned long, 1886 unsigned long, unsigned long, unsigned long) 1887 = NULL; 1888 1889 unsigned long error = arch_mmap_check(addr, len, flags); 1890 if (error) 1891 return error; 1892 1893 /* Careful about overflows.. */ 1894 if (len > TASK_SIZE) 1895 return -ENOMEM; 1896 1897 if (file) { 1898 if (file->f_op->get_unmapped_area) 1899 get_area = file->f_op->get_unmapped_area; 1900 } else if (flags & MAP_SHARED) { 1901 /* 1902 * mmap_region() will call shmem_zero_setup() to create a file, 1903 * so use shmem's get_unmapped_area in case it can be huge. 1904 */ 1905 get_area = shmem_get_unmapped_area; 1906 } 1907 1908 /* Always treat pgoff as zero for anonymous memory. */ 1909 if (!file) 1910 pgoff = 0; 1911 1912 if (get_area) { 1913 addr = get_area(file, addr, len, pgoff, flags); 1914 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1915 /* Ensures that larger anonymous mappings are THP aligned. */ 1916 addr = thp_get_unmapped_area_vmflags(file, addr, len, 1917 pgoff, flags, vm_flags); 1918 } else { 1919 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, 1920 pgoff, flags, vm_flags); 1921 } 1922 if (IS_ERR_VALUE(addr)) 1923 return addr; 1924 1925 if (addr > TASK_SIZE - len) 1926 return -ENOMEM; 1927 if (offset_in_page(addr)) 1928 return -EINVAL; 1929 1930 error = security_mmap_addr(addr); 1931 return error ? error : addr; 1932 } 1933 1934 unsigned long 1935 mm_get_unmapped_area(struct mm_struct *mm, struct file *file, 1936 unsigned long addr, unsigned long len, 1937 unsigned long pgoff, unsigned long flags) 1938 { 1939 if (test_bit(MMF_TOPDOWN, &mm->flags)) 1940 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags); 1941 return arch_get_unmapped_area(file, addr, len, pgoff, flags); 1942 } 1943 EXPORT_SYMBOL(mm_get_unmapped_area); 1944 1945 /** 1946 * find_vma_intersection() - Look up the first VMA which intersects the interval 1947 * @mm: The process address space. 1948 * @start_addr: The inclusive start user address. 1949 * @end_addr: The exclusive end user address. 1950 * 1951 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes 1952 * start_addr < end_addr. 1953 */ 1954 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 1955 unsigned long start_addr, 1956 unsigned long end_addr) 1957 { 1958 unsigned long index = start_addr; 1959 1960 mmap_assert_locked(mm); 1961 return mt_find(&mm->mm_mt, &index, end_addr - 1); 1962 } 1963 EXPORT_SYMBOL(find_vma_intersection); 1964 1965 /** 1966 * find_vma() - Find the VMA for a given address, or the next VMA. 1967 * @mm: The mm_struct to check 1968 * @addr: The address 1969 * 1970 * Returns: The VMA associated with addr, or the next VMA. 1971 * May return %NULL in the case of no VMA at addr or above. 1972 */ 1973 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 1974 { 1975 unsigned long index = addr; 1976 1977 mmap_assert_locked(mm); 1978 return mt_find(&mm->mm_mt, &index, ULONG_MAX); 1979 } 1980 EXPORT_SYMBOL(find_vma); 1981 1982 /** 1983 * find_vma_prev() - Find the VMA for a given address, or the next vma and 1984 * set %pprev to the previous VMA, if any. 1985 * @mm: The mm_struct to check 1986 * @addr: The address 1987 * @pprev: The pointer to set to the previous VMA 1988 * 1989 * Note that RCU lock is missing here since the external mmap_lock() is used 1990 * instead. 1991 * 1992 * Returns: The VMA associated with @addr, or the next vma. 1993 * May return %NULL in the case of no vma at addr or above. 1994 */ 1995 struct vm_area_struct * 1996 find_vma_prev(struct mm_struct *mm, unsigned long addr, 1997 struct vm_area_struct **pprev) 1998 { 1999 struct vm_area_struct *vma; 2000 VMA_ITERATOR(vmi, mm, addr); 2001 2002 vma = vma_iter_load(&vmi); 2003 *pprev = vma_prev(&vmi); 2004 if (!vma) 2005 vma = vma_next(&vmi); 2006 return vma; 2007 } 2008 2009 /* 2010 * Verify that the stack growth is acceptable and 2011 * update accounting. This is shared with both the 2012 * grow-up and grow-down cases. 2013 */ 2014 static int acct_stack_growth(struct vm_area_struct *vma, 2015 unsigned long size, unsigned long grow) 2016 { 2017 struct mm_struct *mm = vma->vm_mm; 2018 unsigned long new_start; 2019 2020 /* address space limit tests */ 2021 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2022 return -ENOMEM; 2023 2024 /* Stack limit test */ 2025 if (size > rlimit(RLIMIT_STACK)) 2026 return -ENOMEM; 2027 2028 /* mlock limit tests */ 2029 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) 2030 return -ENOMEM; 2031 2032 /* Check to ensure the stack will not grow into a hugetlb-only region */ 2033 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 2034 vma->vm_end - size; 2035 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 2036 return -EFAULT; 2037 2038 /* 2039 * Overcommit.. This must be the final test, as it will 2040 * update security statistics. 2041 */ 2042 if (security_vm_enough_memory_mm(mm, grow)) 2043 return -ENOMEM; 2044 2045 return 0; 2046 } 2047 2048 #if defined(CONFIG_STACK_GROWSUP) 2049 /* 2050 * PA-RISC uses this for its stack. 2051 * vma is the last one with address > vma->vm_end. Have to extend vma. 2052 */ 2053 static int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2054 { 2055 struct mm_struct *mm = vma->vm_mm; 2056 struct vm_area_struct *next; 2057 unsigned long gap_addr; 2058 int error = 0; 2059 VMA_ITERATOR(vmi, mm, vma->vm_start); 2060 2061 if (!(vma->vm_flags & VM_GROWSUP)) 2062 return -EFAULT; 2063 2064 /* Guard against exceeding limits of the address space. */ 2065 address &= PAGE_MASK; 2066 if (address >= (TASK_SIZE & PAGE_MASK)) 2067 return -ENOMEM; 2068 address += PAGE_SIZE; 2069 2070 /* Enforce stack_guard_gap */ 2071 gap_addr = address + stack_guard_gap; 2072 2073 /* Guard against overflow */ 2074 if (gap_addr < address || gap_addr > TASK_SIZE) 2075 gap_addr = TASK_SIZE; 2076 2077 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 2078 if (next && vma_is_accessible(next)) { 2079 if (!(next->vm_flags & VM_GROWSUP)) 2080 return -ENOMEM; 2081 /* Check that both stack segments have the same anon_vma? */ 2082 } 2083 2084 if (next) 2085 vma_iter_prev_range_limit(&vmi, address); 2086 2087 vma_iter_config(&vmi, vma->vm_start, address); 2088 if (vma_iter_prealloc(&vmi, vma)) 2089 return -ENOMEM; 2090 2091 /* We must make sure the anon_vma is allocated. */ 2092 if (unlikely(anon_vma_prepare(vma))) { 2093 vma_iter_free(&vmi); 2094 return -ENOMEM; 2095 } 2096 2097 /* Lock the VMA before expanding to prevent concurrent page faults */ 2098 vma_start_write(vma); 2099 /* 2100 * vma->vm_start/vm_end cannot change under us because the caller 2101 * is required to hold the mmap_lock in read mode. We need the 2102 * anon_vma lock to serialize against concurrent expand_stacks. 2103 */ 2104 anon_vma_lock_write(vma->anon_vma); 2105 2106 /* Somebody else might have raced and expanded it already */ 2107 if (address > vma->vm_end) { 2108 unsigned long size, grow; 2109 2110 size = address - vma->vm_start; 2111 grow = (address - vma->vm_end) >> PAGE_SHIFT; 2112 2113 error = -ENOMEM; 2114 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 2115 error = acct_stack_growth(vma, size, grow); 2116 if (!error) { 2117 /* 2118 * We only hold a shared mmap_lock lock here, so 2119 * we need to protect against concurrent vma 2120 * expansions. anon_vma_lock_write() doesn't 2121 * help here, as we don't guarantee that all 2122 * growable vmas in a mm share the same root 2123 * anon vma. So, we reuse mm->page_table_lock 2124 * to guard against concurrent vma expansions. 2125 */ 2126 spin_lock(&mm->page_table_lock); 2127 if (vma->vm_flags & VM_LOCKED) 2128 mm->locked_vm += grow; 2129 vm_stat_account(mm, vma->vm_flags, grow); 2130 anon_vma_interval_tree_pre_update_vma(vma); 2131 vma->vm_end = address; 2132 /* Overwrite old entry in mtree. */ 2133 vma_iter_store(&vmi, vma); 2134 anon_vma_interval_tree_post_update_vma(vma); 2135 spin_unlock(&mm->page_table_lock); 2136 2137 perf_event_mmap(vma); 2138 } 2139 } 2140 } 2141 anon_vma_unlock_write(vma->anon_vma); 2142 vma_iter_free(&vmi); 2143 validate_mm(mm); 2144 return error; 2145 } 2146 #endif /* CONFIG_STACK_GROWSUP */ 2147 2148 /* 2149 * vma is the first one with address < vma->vm_start. Have to extend vma. 2150 * mmap_lock held for writing. 2151 */ 2152 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 2153 { 2154 struct mm_struct *mm = vma->vm_mm; 2155 struct vm_area_struct *prev; 2156 int error = 0; 2157 VMA_ITERATOR(vmi, mm, vma->vm_start); 2158 2159 if (!(vma->vm_flags & VM_GROWSDOWN)) 2160 return -EFAULT; 2161 2162 address &= PAGE_MASK; 2163 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 2164 return -EPERM; 2165 2166 /* Enforce stack_guard_gap */ 2167 prev = vma_prev(&vmi); 2168 /* Check that both stack segments have the same anon_vma? */ 2169 if (prev) { 2170 if (!(prev->vm_flags & VM_GROWSDOWN) && 2171 vma_is_accessible(prev) && 2172 (address - prev->vm_end < stack_guard_gap)) 2173 return -ENOMEM; 2174 } 2175 2176 if (prev) 2177 vma_iter_next_range_limit(&vmi, vma->vm_start); 2178 2179 vma_iter_config(&vmi, address, vma->vm_end); 2180 if (vma_iter_prealloc(&vmi, vma)) 2181 return -ENOMEM; 2182 2183 /* We must make sure the anon_vma is allocated. */ 2184 if (unlikely(anon_vma_prepare(vma))) { 2185 vma_iter_free(&vmi); 2186 return -ENOMEM; 2187 } 2188 2189 /* Lock the VMA before expanding to prevent concurrent page faults */ 2190 vma_start_write(vma); 2191 /* 2192 * vma->vm_start/vm_end cannot change under us because the caller 2193 * is required to hold the mmap_lock in read mode. We need the 2194 * anon_vma lock to serialize against concurrent expand_stacks. 2195 */ 2196 anon_vma_lock_write(vma->anon_vma); 2197 2198 /* Somebody else might have raced and expanded it already */ 2199 if (address < vma->vm_start) { 2200 unsigned long size, grow; 2201 2202 size = vma->vm_end - address; 2203 grow = (vma->vm_start - address) >> PAGE_SHIFT; 2204 2205 error = -ENOMEM; 2206 if (grow <= vma->vm_pgoff) { 2207 error = acct_stack_growth(vma, size, grow); 2208 if (!error) { 2209 /* 2210 * We only hold a shared mmap_lock lock here, so 2211 * we need to protect against concurrent vma 2212 * expansions. anon_vma_lock_write() doesn't 2213 * help here, as we don't guarantee that all 2214 * growable vmas in a mm share the same root 2215 * anon vma. So, we reuse mm->page_table_lock 2216 * to guard against concurrent vma expansions. 2217 */ 2218 spin_lock(&mm->page_table_lock); 2219 if (vma->vm_flags & VM_LOCKED) 2220 mm->locked_vm += grow; 2221 vm_stat_account(mm, vma->vm_flags, grow); 2222 anon_vma_interval_tree_pre_update_vma(vma); 2223 vma->vm_start = address; 2224 vma->vm_pgoff -= grow; 2225 /* Overwrite old entry in mtree. */ 2226 vma_iter_store(&vmi, vma); 2227 anon_vma_interval_tree_post_update_vma(vma); 2228 spin_unlock(&mm->page_table_lock); 2229 2230 perf_event_mmap(vma); 2231 } 2232 } 2233 } 2234 anon_vma_unlock_write(vma->anon_vma); 2235 vma_iter_free(&vmi); 2236 validate_mm(mm); 2237 return error; 2238 } 2239 2240 /* enforced gap between the expanding stack and other mappings. */ 2241 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 2242 2243 static int __init cmdline_parse_stack_guard_gap(char *p) 2244 { 2245 unsigned long val; 2246 char *endptr; 2247 2248 val = simple_strtoul(p, &endptr, 10); 2249 if (!*endptr) 2250 stack_guard_gap = val << PAGE_SHIFT; 2251 2252 return 1; 2253 } 2254 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 2255 2256 #ifdef CONFIG_STACK_GROWSUP 2257 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 2258 { 2259 return expand_upwards(vma, address); 2260 } 2261 2262 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 2263 { 2264 struct vm_area_struct *vma, *prev; 2265 2266 addr &= PAGE_MASK; 2267 vma = find_vma_prev(mm, addr, &prev); 2268 if (vma && (vma->vm_start <= addr)) 2269 return vma; 2270 if (!prev) 2271 return NULL; 2272 if (expand_stack_locked(prev, addr)) 2273 return NULL; 2274 if (prev->vm_flags & VM_LOCKED) 2275 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 2276 return prev; 2277 } 2278 #else 2279 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 2280 { 2281 return expand_downwards(vma, address); 2282 } 2283 2284 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 2285 { 2286 struct vm_area_struct *vma; 2287 unsigned long start; 2288 2289 addr &= PAGE_MASK; 2290 vma = find_vma(mm, addr); 2291 if (!vma) 2292 return NULL; 2293 if (vma->vm_start <= addr) 2294 return vma; 2295 start = vma->vm_start; 2296 if (expand_stack_locked(vma, addr)) 2297 return NULL; 2298 if (vma->vm_flags & VM_LOCKED) 2299 populate_vma_page_range(vma, addr, start, NULL); 2300 return vma; 2301 } 2302 #endif 2303 2304 #if defined(CONFIG_STACK_GROWSUP) 2305 2306 #define vma_expand_up(vma,addr) expand_upwards(vma, addr) 2307 #define vma_expand_down(vma, addr) (-EFAULT) 2308 2309 #else 2310 2311 #define vma_expand_up(vma,addr) (-EFAULT) 2312 #define vma_expand_down(vma, addr) expand_downwards(vma, addr) 2313 2314 #endif 2315 2316 /* 2317 * expand_stack(): legacy interface for page faulting. Don't use unless 2318 * you have to. 2319 * 2320 * This is called with the mm locked for reading, drops the lock, takes 2321 * the lock for writing, tries to look up a vma again, expands it if 2322 * necessary, and downgrades the lock to reading again. 2323 * 2324 * If no vma is found or it can't be expanded, it returns NULL and has 2325 * dropped the lock. 2326 */ 2327 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) 2328 { 2329 struct vm_area_struct *vma, *prev; 2330 2331 mmap_read_unlock(mm); 2332 if (mmap_write_lock_killable(mm)) 2333 return NULL; 2334 2335 vma = find_vma_prev(mm, addr, &prev); 2336 if (vma && vma->vm_start <= addr) 2337 goto success; 2338 2339 if (prev && !vma_expand_up(prev, addr)) { 2340 vma = prev; 2341 goto success; 2342 } 2343 2344 if (vma && !vma_expand_down(vma, addr)) 2345 goto success; 2346 2347 mmap_write_unlock(mm); 2348 return NULL; 2349 2350 success: 2351 mmap_write_downgrade(mm); 2352 return vma; 2353 } 2354 2355 /* 2356 * Ok - we have the memory areas we should free on a maple tree so release them, 2357 * and do the vma updates. 2358 * 2359 * Called with the mm semaphore held. 2360 */ 2361 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) 2362 { 2363 unsigned long nr_accounted = 0; 2364 struct vm_area_struct *vma; 2365 2366 /* Update high watermark before we lower total_vm */ 2367 update_hiwater_vm(mm); 2368 mas_for_each(mas, vma, ULONG_MAX) { 2369 long nrpages = vma_pages(vma); 2370 2371 if (vma->vm_flags & VM_ACCOUNT) 2372 nr_accounted += nrpages; 2373 vm_stat_account(mm, vma->vm_flags, -nrpages); 2374 remove_vma(vma, false); 2375 } 2376 vm_unacct_memory(nr_accounted); 2377 } 2378 2379 /* 2380 * Get rid of page table information in the indicated region. 2381 * 2382 * Called with the mm semaphore held. 2383 */ 2384 static void unmap_region(struct mm_struct *mm, struct ma_state *mas, 2385 struct vm_area_struct *vma, struct vm_area_struct *prev, 2386 struct vm_area_struct *next, unsigned long start, 2387 unsigned long end, unsigned long tree_end, bool mm_wr_locked) 2388 { 2389 struct mmu_gather tlb; 2390 unsigned long mt_start = mas->index; 2391 2392 lru_add_drain(); 2393 tlb_gather_mmu(&tlb, mm); 2394 update_hiwater_rss(mm); 2395 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); 2396 mas_set(mas, mt_start); 2397 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2398 next ? next->vm_start : USER_PGTABLES_CEILING, 2399 mm_wr_locked); 2400 tlb_finish_mmu(&tlb); 2401 } 2402 2403 /* 2404 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 2405 * has already been checked or doesn't make sense to fail. 2406 * VMA Iterator will point to the end VMA. 2407 */ 2408 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 2409 unsigned long addr, int new_below) 2410 { 2411 struct vma_prepare vp; 2412 struct vm_area_struct *new; 2413 int err; 2414 2415 WARN_ON(vma->vm_start >= addr); 2416 WARN_ON(vma->vm_end <= addr); 2417 2418 if (vma->vm_ops && vma->vm_ops->may_split) { 2419 err = vma->vm_ops->may_split(vma, addr); 2420 if (err) 2421 return err; 2422 } 2423 2424 new = vm_area_dup(vma); 2425 if (!new) 2426 return -ENOMEM; 2427 2428 if (new_below) { 2429 new->vm_end = addr; 2430 } else { 2431 new->vm_start = addr; 2432 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 2433 } 2434 2435 err = -ENOMEM; 2436 vma_iter_config(vmi, new->vm_start, new->vm_end); 2437 if (vma_iter_prealloc(vmi, new)) 2438 goto out_free_vma; 2439 2440 err = vma_dup_policy(vma, new); 2441 if (err) 2442 goto out_free_vmi; 2443 2444 err = anon_vma_clone(new, vma); 2445 if (err) 2446 goto out_free_mpol; 2447 2448 if (new->vm_file) 2449 get_file(new->vm_file); 2450 2451 if (new->vm_ops && new->vm_ops->open) 2452 new->vm_ops->open(new); 2453 2454 vma_start_write(vma); 2455 vma_start_write(new); 2456 2457 init_vma_prep(&vp, vma); 2458 vp.insert = new; 2459 vma_prepare(&vp); 2460 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 2461 2462 if (new_below) { 2463 vma->vm_start = addr; 2464 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 2465 } else { 2466 vma->vm_end = addr; 2467 } 2468 2469 /* vma_complete stores the new vma */ 2470 vma_complete(&vp, vmi, vma->vm_mm); 2471 2472 /* Success. */ 2473 if (new_below) 2474 vma_next(vmi); 2475 return 0; 2476 2477 out_free_mpol: 2478 mpol_put(vma_policy(new)); 2479 out_free_vmi: 2480 vma_iter_free(vmi); 2481 out_free_vma: 2482 vm_area_free(new); 2483 return err; 2484 } 2485 2486 /* 2487 * Split a vma into two pieces at address 'addr', a new vma is allocated 2488 * either for the first part or the tail. 2489 */ 2490 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 2491 unsigned long addr, int new_below) 2492 { 2493 if (vma->vm_mm->map_count >= sysctl_max_map_count) 2494 return -ENOMEM; 2495 2496 return __split_vma(vmi, vma, addr, new_below); 2497 } 2498 2499 /* 2500 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 2501 * context and anonymous VMA name within the range [start, end). 2502 * 2503 * As a result, we might be able to merge the newly modified VMA range with an 2504 * adjacent VMA with identical properties. 2505 * 2506 * If no merge is possible and the range does not span the entirety of the VMA, 2507 * we then need to split the VMA to accommodate the change. 2508 * 2509 * The function returns either the merged VMA, the original VMA if a split was 2510 * required instead, or an error if the split failed. 2511 */ 2512 struct vm_area_struct *vma_modify(struct vma_iterator *vmi, 2513 struct vm_area_struct *prev, 2514 struct vm_area_struct *vma, 2515 unsigned long start, unsigned long end, 2516 unsigned long vm_flags, 2517 struct mempolicy *policy, 2518 struct vm_userfaultfd_ctx uffd_ctx, 2519 struct anon_vma_name *anon_name) 2520 { 2521 pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 2522 struct vm_area_struct *merged; 2523 2524 merged = vma_merge(vmi, prev, vma, start, end, vm_flags, 2525 pgoff, policy, uffd_ctx, anon_name); 2526 if (merged) 2527 return merged; 2528 2529 if (vma->vm_start < start) { 2530 int err = split_vma(vmi, vma, start, 1); 2531 2532 if (err) 2533 return ERR_PTR(err); 2534 } 2535 2536 if (vma->vm_end > end) { 2537 int err = split_vma(vmi, vma, end, 0); 2538 2539 if (err) 2540 return ERR_PTR(err); 2541 } 2542 2543 return vma; 2544 } 2545 2546 /* 2547 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller 2548 * must ensure that [start, end) does not overlap any existing VMA. 2549 */ 2550 static struct vm_area_struct 2551 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, 2552 struct vm_area_struct *vma, unsigned long start, 2553 unsigned long end, pgoff_t pgoff) 2554 { 2555 return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff, 2556 vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 2557 } 2558 2559 /* 2560 * Expand vma by delta bytes, potentially merging with an immediately adjacent 2561 * VMA with identical properties. 2562 */ 2563 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 2564 struct vm_area_struct *vma, 2565 unsigned long delta) 2566 { 2567 pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma); 2568 2569 /* vma is specified as prev, so case 1 or 2 will apply. */ 2570 return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta, 2571 vma->vm_flags, pgoff, vma_policy(vma), 2572 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 2573 } 2574 2575 /* 2576 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 2577 * @vmi: The vma iterator 2578 * @vma: The starting vm_area_struct 2579 * @mm: The mm_struct 2580 * @start: The aligned start address to munmap. 2581 * @end: The aligned end address to munmap. 2582 * @uf: The userfaultfd list_head 2583 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 2584 * success. 2585 * 2586 * Return: 0 on success and drops the lock if so directed, error and leaves the 2587 * lock held otherwise. 2588 */ 2589 static int 2590 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 2591 struct mm_struct *mm, unsigned long start, 2592 unsigned long end, struct list_head *uf, bool unlock) 2593 { 2594 struct vm_area_struct *prev, *next = NULL; 2595 struct maple_tree mt_detach; 2596 int count = 0; 2597 int error = -ENOMEM; 2598 unsigned long locked_vm = 0; 2599 MA_STATE(mas_detach, &mt_detach, 0, 0); 2600 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 2601 mt_on_stack(mt_detach); 2602 2603 /* 2604 * If we need to split any vma, do it now to save pain later. 2605 * 2606 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 2607 * unmapped vm_area_struct will remain in use: so lower split_vma 2608 * places tmp vma above, and higher split_vma places tmp vma below. 2609 */ 2610 2611 /* Does it split the first one? */ 2612 if (start > vma->vm_start) { 2613 2614 /* 2615 * Make sure that map_count on return from munmap() will 2616 * not exceed its limit; but let map_count go just above 2617 * its limit temporarily, to help free resources as expected. 2618 */ 2619 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) 2620 goto map_count_exceeded; 2621 2622 error = __split_vma(vmi, vma, start, 1); 2623 if (error) 2624 goto start_split_failed; 2625 } 2626 2627 /* 2628 * Detach a range of VMAs from the mm. Using next as a temp variable as 2629 * it is always overwritten. 2630 */ 2631 next = vma; 2632 do { 2633 /* Does it split the end? */ 2634 if (next->vm_end > end) { 2635 error = __split_vma(vmi, next, end, 0); 2636 if (error) 2637 goto end_split_failed; 2638 } 2639 vma_start_write(next); 2640 mas_set(&mas_detach, count); 2641 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); 2642 if (error) 2643 goto munmap_gather_failed; 2644 vma_mark_detached(next, true); 2645 if (next->vm_flags & VM_LOCKED) 2646 locked_vm += vma_pages(next); 2647 2648 count++; 2649 if (unlikely(uf)) { 2650 /* 2651 * If userfaultfd_unmap_prep returns an error the vmas 2652 * will remain split, but userland will get a 2653 * highly unexpected error anyway. This is no 2654 * different than the case where the first of the two 2655 * __split_vma fails, but we don't undo the first 2656 * split, despite we could. This is unlikely enough 2657 * failure that it's not worth optimizing it for. 2658 */ 2659 error = userfaultfd_unmap_prep(next, start, end, uf); 2660 2661 if (error) 2662 goto userfaultfd_error; 2663 } 2664 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 2665 BUG_ON(next->vm_start < start); 2666 BUG_ON(next->vm_start > end); 2667 #endif 2668 } for_each_vma_range(*vmi, next, end); 2669 2670 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 2671 /* Make sure no VMAs are about to be lost. */ 2672 { 2673 MA_STATE(test, &mt_detach, 0, 0); 2674 struct vm_area_struct *vma_mas, *vma_test; 2675 int test_count = 0; 2676 2677 vma_iter_set(vmi, start); 2678 rcu_read_lock(); 2679 vma_test = mas_find(&test, count - 1); 2680 for_each_vma_range(*vmi, vma_mas, end) { 2681 BUG_ON(vma_mas != vma_test); 2682 test_count++; 2683 vma_test = mas_next(&test, count - 1); 2684 } 2685 rcu_read_unlock(); 2686 BUG_ON(count != test_count); 2687 } 2688 #endif 2689 2690 while (vma_iter_addr(vmi) > start) 2691 vma_iter_prev_range(vmi); 2692 2693 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 2694 if (error) 2695 goto clear_tree_failed; 2696 2697 /* Point of no return */ 2698 mm->locked_vm -= locked_vm; 2699 mm->map_count -= count; 2700 if (unlock) 2701 mmap_write_downgrade(mm); 2702 2703 prev = vma_iter_prev_range(vmi); 2704 next = vma_next(vmi); 2705 if (next) 2706 vma_iter_prev_range(vmi); 2707 2708 /* 2709 * We can free page tables without write-locking mmap_lock because VMAs 2710 * were isolated before we downgraded mmap_lock. 2711 */ 2712 mas_set(&mas_detach, 1); 2713 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count, 2714 !unlock); 2715 /* Statistics and freeing VMAs */ 2716 mas_set(&mas_detach, 0); 2717 remove_mt(mm, &mas_detach); 2718 validate_mm(mm); 2719 if (unlock) 2720 mmap_read_unlock(mm); 2721 2722 __mt_destroy(&mt_detach); 2723 return 0; 2724 2725 clear_tree_failed: 2726 userfaultfd_error: 2727 munmap_gather_failed: 2728 end_split_failed: 2729 mas_set(&mas_detach, 0); 2730 mas_for_each(&mas_detach, next, end) 2731 vma_mark_detached(next, false); 2732 2733 __mt_destroy(&mt_detach); 2734 start_split_failed: 2735 map_count_exceeded: 2736 validate_mm(mm); 2737 return error; 2738 } 2739 2740 /* 2741 * do_vmi_munmap() - munmap a given range. 2742 * @vmi: The vma iterator 2743 * @mm: The mm_struct 2744 * @start: The start address to munmap 2745 * @len: The length of the range to munmap 2746 * @uf: The userfaultfd list_head 2747 * @unlock: set to true if the user wants to drop the mmap_lock on success 2748 * 2749 * This function takes a @mas that is either pointing to the previous VMA or set 2750 * to MA_START and sets it up to remove the mapping(s). The @len will be 2751 * aligned and any arch_unmap work will be preformed. 2752 * 2753 * Return: 0 on success and drops the lock if so directed, error and leaves the 2754 * lock held otherwise. 2755 */ 2756 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 2757 unsigned long start, size_t len, struct list_head *uf, 2758 bool unlock) 2759 { 2760 unsigned long end; 2761 struct vm_area_struct *vma; 2762 2763 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 2764 return -EINVAL; 2765 2766 end = start + PAGE_ALIGN(len); 2767 if (end == start) 2768 return -EINVAL; 2769 2770 /* 2771 * Check if memory is sealed before arch_unmap. 2772 * Prevent unmapping a sealed VMA. 2773 * can_modify_mm assumes we have acquired the lock on MM. 2774 */ 2775 if (unlikely(!can_modify_mm(mm, start, end))) 2776 return -EPERM; 2777 2778 /* arch_unmap() might do unmaps itself. */ 2779 arch_unmap(mm, start, end); 2780 2781 /* Find the first overlapping VMA */ 2782 vma = vma_find(vmi, end); 2783 if (!vma) { 2784 if (unlock) 2785 mmap_write_unlock(mm); 2786 return 0; 2787 } 2788 2789 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 2790 } 2791 2792 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. 2793 * @mm: The mm_struct 2794 * @start: The start address to munmap 2795 * @len: The length to be munmapped. 2796 * @uf: The userfaultfd list_head 2797 * 2798 * Return: 0 on success, error otherwise. 2799 */ 2800 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 2801 struct list_head *uf) 2802 { 2803 VMA_ITERATOR(vmi, mm, start); 2804 2805 return do_vmi_munmap(&vmi, mm, start, len, uf, false); 2806 } 2807 2808 unsigned long mmap_region(struct file *file, unsigned long addr, 2809 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2810 struct list_head *uf) 2811 { 2812 struct mm_struct *mm = current->mm; 2813 struct vm_area_struct *vma = NULL; 2814 struct vm_area_struct *next, *prev, *merge; 2815 pgoff_t pglen = len >> PAGE_SHIFT; 2816 unsigned long charged = 0; 2817 unsigned long end = addr + len; 2818 unsigned long merge_start = addr, merge_end = end; 2819 bool writable_file_mapping = false; 2820 pgoff_t vm_pgoff; 2821 int error; 2822 VMA_ITERATOR(vmi, mm, addr); 2823 2824 /* Check against address space limit. */ 2825 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { 2826 unsigned long nr_pages; 2827 2828 /* 2829 * MAP_FIXED may remove pages of mappings that intersects with 2830 * requested mapping. Account for the pages it would unmap. 2831 */ 2832 nr_pages = count_vma_pages_range(mm, addr, end); 2833 2834 if (!may_expand_vm(mm, vm_flags, 2835 (len >> PAGE_SHIFT) - nr_pages)) 2836 return -ENOMEM; 2837 } 2838 2839 /* Unmap any existing mapping in the area */ 2840 error = do_vmi_munmap(&vmi, mm, addr, len, uf, false); 2841 if (error == -EPERM) 2842 return error; 2843 else if (error) 2844 return -ENOMEM; 2845 2846 /* 2847 * Private writable mapping: check memory availability 2848 */ 2849 if (accountable_mapping(file, vm_flags)) { 2850 charged = len >> PAGE_SHIFT; 2851 if (security_vm_enough_memory_mm(mm, charged)) 2852 return -ENOMEM; 2853 vm_flags |= VM_ACCOUNT; 2854 } 2855 2856 next = vma_next(&vmi); 2857 prev = vma_prev(&vmi); 2858 if (vm_flags & VM_SPECIAL) { 2859 if (prev) 2860 vma_iter_next_range(&vmi); 2861 goto cannot_expand; 2862 } 2863 2864 /* Attempt to expand an old mapping */ 2865 /* Check next */ 2866 if (next && next->vm_start == end && !vma_policy(next) && 2867 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, 2868 NULL_VM_UFFD_CTX, NULL)) { 2869 merge_end = next->vm_end; 2870 vma = next; 2871 vm_pgoff = next->vm_pgoff - pglen; 2872 } 2873 2874 /* Check prev */ 2875 if (prev && prev->vm_end == addr && !vma_policy(prev) && 2876 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, 2877 pgoff, vma->vm_userfaultfd_ctx, NULL) : 2878 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, 2879 NULL_VM_UFFD_CTX, NULL))) { 2880 merge_start = prev->vm_start; 2881 vma = prev; 2882 vm_pgoff = prev->vm_pgoff; 2883 } else if (prev) { 2884 vma_iter_next_range(&vmi); 2885 } 2886 2887 /* Actually expand, if possible */ 2888 if (vma && 2889 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { 2890 khugepaged_enter_vma(vma, vm_flags); 2891 goto expanded; 2892 } 2893 2894 if (vma == prev) 2895 vma_iter_set(&vmi, addr); 2896 cannot_expand: 2897 2898 /* 2899 * Determine the object being mapped and call the appropriate 2900 * specific mapper. the address has already been validated, but 2901 * not unmapped, but the maps are removed from the list. 2902 */ 2903 vma = vm_area_alloc(mm); 2904 if (!vma) { 2905 error = -ENOMEM; 2906 goto unacct_error; 2907 } 2908 2909 vma_iter_config(&vmi, addr, end); 2910 vma_set_range(vma, addr, end, pgoff); 2911 vm_flags_init(vma, vm_flags); 2912 vma->vm_page_prot = vm_get_page_prot(vm_flags); 2913 2914 if (file) { 2915 vma->vm_file = get_file(file); 2916 error = call_mmap(file, vma); 2917 if (error) 2918 goto unmap_and_free_vma; 2919 2920 if (vma_is_shared_maywrite(vma)) { 2921 error = mapping_map_writable(file->f_mapping); 2922 if (error) 2923 goto close_and_free_vma; 2924 2925 writable_file_mapping = true; 2926 } 2927 2928 /* 2929 * Expansion is handled above, merging is handled below. 2930 * Drivers should not alter the address of the VMA. 2931 */ 2932 error = -EINVAL; 2933 if (WARN_ON((addr != vma->vm_start))) 2934 goto close_and_free_vma; 2935 2936 vma_iter_config(&vmi, addr, end); 2937 /* 2938 * If vm_flags changed after call_mmap(), we should try merge 2939 * vma again as we may succeed this time. 2940 */ 2941 if (unlikely(vm_flags != vma->vm_flags && prev)) { 2942 merge = vma_merge_new_vma(&vmi, prev, vma, 2943 vma->vm_start, vma->vm_end, 2944 vma->vm_pgoff); 2945 if (merge) { 2946 /* 2947 * ->mmap() can change vma->vm_file and fput 2948 * the original file. So fput the vma->vm_file 2949 * here or we would add an extra fput for file 2950 * and cause general protection fault 2951 * ultimately. 2952 */ 2953 fput(vma->vm_file); 2954 vm_area_free(vma); 2955 vma = merge; 2956 /* Update vm_flags to pick up the change. */ 2957 vm_flags = vma->vm_flags; 2958 goto unmap_writable; 2959 } 2960 } 2961 2962 vm_flags = vma->vm_flags; 2963 } else if (vm_flags & VM_SHARED) { 2964 error = shmem_zero_setup(vma); 2965 if (error) 2966 goto free_vma; 2967 } else { 2968 vma_set_anonymous(vma); 2969 } 2970 2971 if (map_deny_write_exec(vma, vma->vm_flags)) { 2972 error = -EACCES; 2973 goto close_and_free_vma; 2974 } 2975 2976 /* Allow architectures to sanity-check the vm_flags */ 2977 error = -EINVAL; 2978 if (!arch_validate_flags(vma->vm_flags)) 2979 goto close_and_free_vma; 2980 2981 error = -ENOMEM; 2982 if (vma_iter_prealloc(&vmi, vma)) 2983 goto close_and_free_vma; 2984 2985 /* Lock the VMA since it is modified after insertion into VMA tree */ 2986 vma_start_write(vma); 2987 vma_iter_store(&vmi, vma); 2988 mm->map_count++; 2989 vma_link_file(vma); 2990 2991 /* 2992 * vma_merge() calls khugepaged_enter_vma() either, the below 2993 * call covers the non-merge case. 2994 */ 2995 khugepaged_enter_vma(vma, vma->vm_flags); 2996 2997 /* Once vma denies write, undo our temporary denial count */ 2998 unmap_writable: 2999 if (writable_file_mapping) 3000 mapping_unmap_writable(file->f_mapping); 3001 file = vma->vm_file; 3002 ksm_add_vma(vma); 3003 expanded: 3004 perf_event_mmap(vma); 3005 3006 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); 3007 if (vm_flags & VM_LOCKED) { 3008 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 3009 is_vm_hugetlb_page(vma) || 3010 vma == get_gate_vma(current->mm)) 3011 vm_flags_clear(vma, VM_LOCKED_MASK); 3012 else 3013 mm->locked_vm += (len >> PAGE_SHIFT); 3014 } 3015 3016 if (file) 3017 uprobe_mmap(vma); 3018 3019 /* 3020 * New (or expanded) vma always get soft dirty status. 3021 * Otherwise user-space soft-dirty page tracker won't 3022 * be able to distinguish situation when vma area unmapped, 3023 * then new mapped in-place (which must be aimed as 3024 * a completely new data area). 3025 */ 3026 vm_flags_set(vma, VM_SOFTDIRTY); 3027 3028 vma_set_page_prot(vma); 3029 3030 validate_mm(mm); 3031 return addr; 3032 3033 close_and_free_vma: 3034 if (file && vma->vm_ops && vma->vm_ops->close) 3035 vma->vm_ops->close(vma); 3036 3037 if (file || vma->vm_file) { 3038 unmap_and_free_vma: 3039 fput(vma->vm_file); 3040 vma->vm_file = NULL; 3041 3042 vma_iter_set(&vmi, vma->vm_end); 3043 /* Undo any partial mapping done by a device driver. */ 3044 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, 3045 vma->vm_end, vma->vm_end, true); 3046 } 3047 if (writable_file_mapping) 3048 mapping_unmap_writable(file->f_mapping); 3049 free_vma: 3050 vm_area_free(vma); 3051 unacct_error: 3052 if (charged) 3053 vm_unacct_memory(charged); 3054 validate_mm(mm); 3055 return error; 3056 } 3057 3058 static int __vm_munmap(unsigned long start, size_t len, bool unlock) 3059 { 3060 int ret; 3061 struct mm_struct *mm = current->mm; 3062 LIST_HEAD(uf); 3063 VMA_ITERATOR(vmi, mm, start); 3064 3065 if (mmap_write_lock_killable(mm)) 3066 return -EINTR; 3067 3068 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 3069 if (ret || !unlock) 3070 mmap_write_unlock(mm); 3071 3072 userfaultfd_unmap_complete(mm, &uf); 3073 return ret; 3074 } 3075 3076 int vm_munmap(unsigned long start, size_t len) 3077 { 3078 return __vm_munmap(start, len, false); 3079 } 3080 EXPORT_SYMBOL(vm_munmap); 3081 3082 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 3083 { 3084 addr = untagged_addr(addr); 3085 return __vm_munmap(addr, len, true); 3086 } 3087 3088 3089 /* 3090 * Emulation of deprecated remap_file_pages() syscall. 3091 */ 3092 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 3093 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 3094 { 3095 3096 struct mm_struct *mm = current->mm; 3097 struct vm_area_struct *vma; 3098 unsigned long populate = 0; 3099 unsigned long ret = -EINVAL; 3100 struct file *file; 3101 3102 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", 3103 current->comm, current->pid); 3104 3105 if (prot) 3106 return ret; 3107 start = start & PAGE_MASK; 3108 size = size & PAGE_MASK; 3109 3110 if (start + size <= start) 3111 return ret; 3112 3113 /* Does pgoff wrap? */ 3114 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 3115 return ret; 3116 3117 if (mmap_write_lock_killable(mm)) 3118 return -EINTR; 3119 3120 vma = vma_lookup(mm, start); 3121 3122 if (!vma || !(vma->vm_flags & VM_SHARED)) 3123 goto out; 3124 3125 if (start + size > vma->vm_end) { 3126 VMA_ITERATOR(vmi, mm, vma->vm_end); 3127 struct vm_area_struct *next, *prev = vma; 3128 3129 for_each_vma_range(vmi, next, start + size) { 3130 /* hole between vmas ? */ 3131 if (next->vm_start != prev->vm_end) 3132 goto out; 3133 3134 if (next->vm_file != vma->vm_file) 3135 goto out; 3136 3137 if (next->vm_flags != vma->vm_flags) 3138 goto out; 3139 3140 if (start + size <= next->vm_end) 3141 break; 3142 3143 prev = next; 3144 } 3145 3146 if (!next) 3147 goto out; 3148 } 3149 3150 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 3151 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 3152 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 3153 3154 flags &= MAP_NONBLOCK; 3155 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 3156 if (vma->vm_flags & VM_LOCKED) 3157 flags |= MAP_LOCKED; 3158 3159 file = get_file(vma->vm_file); 3160 ret = do_mmap(vma->vm_file, start, size, 3161 prot, flags, 0, pgoff, &populate, NULL); 3162 fput(file); 3163 out: 3164 mmap_write_unlock(mm); 3165 if (populate) 3166 mm_populate(ret, populate); 3167 if (!IS_ERR_VALUE(ret)) 3168 ret = 0; 3169 return ret; 3170 } 3171 3172 /* 3173 * do_vma_munmap() - Unmap a full or partial vma. 3174 * @vmi: The vma iterator pointing at the vma 3175 * @vma: The first vma to be munmapped 3176 * @start: the start of the address to unmap 3177 * @end: The end of the address to unmap 3178 * @uf: The userfaultfd list_head 3179 * @unlock: Drop the lock on success 3180 * 3181 * unmaps a VMA mapping when the vma iterator is already in position. 3182 * Does not handle alignment. 3183 * 3184 * Return: 0 on success drops the lock of so directed, error on failure and will 3185 * still hold the lock. 3186 */ 3187 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 3188 unsigned long start, unsigned long end, struct list_head *uf, 3189 bool unlock) 3190 { 3191 struct mm_struct *mm = vma->vm_mm; 3192 3193 /* 3194 * Check if memory is sealed before arch_unmap. 3195 * Prevent unmapping a sealed VMA. 3196 * can_modify_mm assumes we have acquired the lock on MM. 3197 */ 3198 if (unlikely(!can_modify_mm(mm, start, end))) 3199 return -EPERM; 3200 3201 arch_unmap(mm, start, end); 3202 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 3203 } 3204 3205 /* 3206 * do_brk_flags() - Increase the brk vma if the flags match. 3207 * @vmi: The vma iterator 3208 * @addr: The start address 3209 * @len: The length of the increase 3210 * @vma: The vma, 3211 * @flags: The VMA Flags 3212 * 3213 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 3214 * do not match then create a new anonymous VMA. Eventually we may be able to 3215 * do some brk-specific accounting here. 3216 */ 3217 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 3218 unsigned long addr, unsigned long len, unsigned long flags) 3219 { 3220 struct mm_struct *mm = current->mm; 3221 struct vma_prepare vp; 3222 3223 /* 3224 * Check against address space limits by the changed size 3225 * Note: This happens *after* clearing old mappings in some code paths. 3226 */ 3227 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 3228 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 3229 return -ENOMEM; 3230 3231 if (mm->map_count > sysctl_max_map_count) 3232 return -ENOMEM; 3233 3234 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 3235 return -ENOMEM; 3236 3237 /* 3238 * Expand the existing vma if possible; Note that singular lists do not 3239 * occur after forking, so the expand will only happen on new VMAs. 3240 */ 3241 if (vma && vma->vm_end == addr && !vma_policy(vma) && 3242 can_vma_merge_after(vma, flags, NULL, NULL, 3243 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { 3244 vma_iter_config(vmi, vma->vm_start, addr + len); 3245 if (vma_iter_prealloc(vmi, vma)) 3246 goto unacct_fail; 3247 3248 vma_start_write(vma); 3249 3250 init_vma_prep(&vp, vma); 3251 vma_prepare(&vp); 3252 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); 3253 vma->vm_end = addr + len; 3254 vm_flags_set(vma, VM_SOFTDIRTY); 3255 vma_iter_store(vmi, vma); 3256 3257 vma_complete(&vp, vmi, mm); 3258 khugepaged_enter_vma(vma, flags); 3259 goto out; 3260 } 3261 3262 if (vma) 3263 vma_iter_next_range(vmi); 3264 /* create a vma struct for an anonymous mapping */ 3265 vma = vm_area_alloc(mm); 3266 if (!vma) 3267 goto unacct_fail; 3268 3269 vma_set_anonymous(vma); 3270 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 3271 vm_flags_init(vma, flags); 3272 vma->vm_page_prot = vm_get_page_prot(flags); 3273 vma_start_write(vma); 3274 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 3275 goto mas_store_fail; 3276 3277 mm->map_count++; 3278 validate_mm(mm); 3279 ksm_add_vma(vma); 3280 out: 3281 perf_event_mmap(vma); 3282 mm->total_vm += len >> PAGE_SHIFT; 3283 mm->data_vm += len >> PAGE_SHIFT; 3284 if (flags & VM_LOCKED) 3285 mm->locked_vm += (len >> PAGE_SHIFT); 3286 vm_flags_set(vma, VM_SOFTDIRTY); 3287 return 0; 3288 3289 mas_store_fail: 3290 vm_area_free(vma); 3291 unacct_fail: 3292 vm_unacct_memory(len >> PAGE_SHIFT); 3293 return -ENOMEM; 3294 } 3295 3296 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 3297 { 3298 struct mm_struct *mm = current->mm; 3299 struct vm_area_struct *vma = NULL; 3300 unsigned long len; 3301 int ret; 3302 bool populate; 3303 LIST_HEAD(uf); 3304 VMA_ITERATOR(vmi, mm, addr); 3305 3306 len = PAGE_ALIGN(request); 3307 if (len < request) 3308 return -ENOMEM; 3309 if (!len) 3310 return 0; 3311 3312 /* Until we need other flags, refuse anything except VM_EXEC. */ 3313 if ((flags & (~VM_EXEC)) != 0) 3314 return -EINVAL; 3315 3316 if (mmap_write_lock_killable(mm)) 3317 return -EINTR; 3318 3319 ret = check_brk_limits(addr, len); 3320 if (ret) 3321 goto limits_failed; 3322 3323 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); 3324 if (ret) 3325 goto munmap_failed; 3326 3327 vma = vma_prev(&vmi); 3328 ret = do_brk_flags(&vmi, vma, addr, len, flags); 3329 populate = ((mm->def_flags & VM_LOCKED) != 0); 3330 mmap_write_unlock(mm); 3331 userfaultfd_unmap_complete(mm, &uf); 3332 if (populate && !ret) 3333 mm_populate(addr, len); 3334 return ret; 3335 3336 munmap_failed: 3337 limits_failed: 3338 mmap_write_unlock(mm); 3339 return ret; 3340 } 3341 EXPORT_SYMBOL(vm_brk_flags); 3342 3343 /* Release all mmaps. */ 3344 void exit_mmap(struct mm_struct *mm) 3345 { 3346 struct mmu_gather tlb; 3347 struct vm_area_struct *vma; 3348 unsigned long nr_accounted = 0; 3349 VMA_ITERATOR(vmi, mm, 0); 3350 int count = 0; 3351 3352 /* mm's last user has gone, and its about to be pulled down */ 3353 mmu_notifier_release(mm); 3354 3355 mmap_read_lock(mm); 3356 arch_exit_mmap(mm); 3357 3358 vma = vma_next(&vmi); 3359 if (!vma || unlikely(xa_is_zero(vma))) { 3360 /* Can happen if dup_mmap() received an OOM */ 3361 mmap_read_unlock(mm); 3362 mmap_write_lock(mm); 3363 goto destroy; 3364 } 3365 3366 lru_add_drain(); 3367 flush_cache_mm(mm); 3368 tlb_gather_mmu_fullmm(&tlb, mm); 3369 /* update_hiwater_rss(mm) here? but nobody should be looking */ 3370 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ 3371 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); 3372 mmap_read_unlock(mm); 3373 3374 /* 3375 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper 3376 * because the memory has been already freed. 3377 */ 3378 set_bit(MMF_OOM_SKIP, &mm->flags); 3379 mmap_write_lock(mm); 3380 mt_clear_in_rcu(&mm->mm_mt); 3381 vma_iter_set(&vmi, vma->vm_end); 3382 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, 3383 USER_PGTABLES_CEILING, true); 3384 tlb_finish_mmu(&tlb); 3385 3386 /* 3387 * Walk the list again, actually closing and freeing it, with preemption 3388 * enabled, without holding any MM locks besides the unreachable 3389 * mmap_write_lock. 3390 */ 3391 vma_iter_set(&vmi, vma->vm_end); 3392 do { 3393 if (vma->vm_flags & VM_ACCOUNT) 3394 nr_accounted += vma_pages(vma); 3395 remove_vma(vma, true); 3396 count++; 3397 cond_resched(); 3398 vma = vma_next(&vmi); 3399 } while (vma && likely(!xa_is_zero(vma))); 3400 3401 BUG_ON(count != mm->map_count); 3402 3403 trace_exit_mmap(mm); 3404 destroy: 3405 __mt_destroy(&mm->mm_mt); 3406 mmap_write_unlock(mm); 3407 vm_unacct_memory(nr_accounted); 3408 } 3409 3410 /* Insert vm structure into process list sorted by address 3411 * and into the inode's i_mmap tree. If vm_file is non-NULL 3412 * then i_mmap_rwsem is taken here. 3413 */ 3414 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 3415 { 3416 unsigned long charged = vma_pages(vma); 3417 3418 3419 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 3420 return -ENOMEM; 3421 3422 if ((vma->vm_flags & VM_ACCOUNT) && 3423 security_vm_enough_memory_mm(mm, charged)) 3424 return -ENOMEM; 3425 3426 /* 3427 * The vm_pgoff of a purely anonymous vma should be irrelevant 3428 * until its first write fault, when page's anon_vma and index 3429 * are set. But now set the vm_pgoff it will almost certainly 3430 * end up with (unless mremap moves it elsewhere before that 3431 * first wfault), so /proc/pid/maps tells a consistent story. 3432 * 3433 * By setting it to reflect the virtual start address of the 3434 * vma, merges and splits can happen in a seamless way, just 3435 * using the existing file pgoff checks and manipulations. 3436 * Similarly in do_mmap and in do_brk_flags. 3437 */ 3438 if (vma_is_anonymous(vma)) { 3439 BUG_ON(vma->anon_vma); 3440 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 3441 } 3442 3443 if (vma_link(mm, vma)) { 3444 if (vma->vm_flags & VM_ACCOUNT) 3445 vm_unacct_memory(charged); 3446 return -ENOMEM; 3447 } 3448 3449 return 0; 3450 } 3451 3452 /* 3453 * Copy the vma structure to a new location in the same mm, 3454 * prior to moving page table entries, to effect an mremap move. 3455 */ 3456 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 3457 unsigned long addr, unsigned long len, pgoff_t pgoff, 3458 bool *need_rmap_locks) 3459 { 3460 struct vm_area_struct *vma = *vmap; 3461 unsigned long vma_start = vma->vm_start; 3462 struct mm_struct *mm = vma->vm_mm; 3463 struct vm_area_struct *new_vma, *prev; 3464 bool faulted_in_anon_vma = true; 3465 VMA_ITERATOR(vmi, mm, addr); 3466 3467 /* 3468 * If anonymous vma has not yet been faulted, update new pgoff 3469 * to match new location, to increase its chance of merging. 3470 */ 3471 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 3472 pgoff = addr >> PAGE_SHIFT; 3473 faulted_in_anon_vma = false; 3474 } 3475 3476 new_vma = find_vma_prev(mm, addr, &prev); 3477 if (new_vma && new_vma->vm_start < addr + len) 3478 return NULL; /* should never get here */ 3479 3480 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff); 3481 if (new_vma) { 3482 /* 3483 * Source vma may have been merged into new_vma 3484 */ 3485 if (unlikely(vma_start >= new_vma->vm_start && 3486 vma_start < new_vma->vm_end)) { 3487 /* 3488 * The only way we can get a vma_merge with 3489 * self during an mremap is if the vma hasn't 3490 * been faulted in yet and we were allowed to 3491 * reset the dst vma->vm_pgoff to the 3492 * destination address of the mremap to allow 3493 * the merge to happen. mremap must change the 3494 * vm_pgoff linearity between src and dst vmas 3495 * (in turn preventing a vma_merge) to be 3496 * safe. It is only safe to keep the vm_pgoff 3497 * linear if there are no pages mapped yet. 3498 */ 3499 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 3500 *vmap = vma = new_vma; 3501 } 3502 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 3503 } else { 3504 new_vma = vm_area_dup(vma); 3505 if (!new_vma) 3506 goto out; 3507 vma_set_range(new_vma, addr, addr + len, pgoff); 3508 if (vma_dup_policy(vma, new_vma)) 3509 goto out_free_vma; 3510 if (anon_vma_clone(new_vma, vma)) 3511 goto out_free_mempol; 3512 if (new_vma->vm_file) 3513 get_file(new_vma->vm_file); 3514 if (new_vma->vm_ops && new_vma->vm_ops->open) 3515 new_vma->vm_ops->open(new_vma); 3516 if (vma_link(mm, new_vma)) 3517 goto out_vma_link; 3518 *need_rmap_locks = false; 3519 } 3520 return new_vma; 3521 3522 out_vma_link: 3523 if (new_vma->vm_ops && new_vma->vm_ops->close) 3524 new_vma->vm_ops->close(new_vma); 3525 3526 if (new_vma->vm_file) 3527 fput(new_vma->vm_file); 3528 3529 unlink_anon_vmas(new_vma); 3530 out_free_mempol: 3531 mpol_put(vma_policy(new_vma)); 3532 out_free_vma: 3533 vm_area_free(new_vma); 3534 out: 3535 return NULL; 3536 } 3537 3538 /* 3539 * Return true if the calling process may expand its vm space by the passed 3540 * number of pages 3541 */ 3542 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 3543 { 3544 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 3545 return false; 3546 3547 if (is_data_mapping(flags) && 3548 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 3549 /* Workaround for Valgrind */ 3550 if (rlimit(RLIMIT_DATA) == 0 && 3551 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 3552 return true; 3553 3554 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 3555 current->comm, current->pid, 3556 (mm->data_vm + npages) << PAGE_SHIFT, 3557 rlimit(RLIMIT_DATA), 3558 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 3559 3560 if (!ignore_rlimit_data) 3561 return false; 3562 } 3563 3564 return true; 3565 } 3566 3567 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 3568 { 3569 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 3570 3571 if (is_exec_mapping(flags)) 3572 mm->exec_vm += npages; 3573 else if (is_stack_mapping(flags)) 3574 mm->stack_vm += npages; 3575 else if (is_data_mapping(flags)) 3576 mm->data_vm += npages; 3577 } 3578 3579 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 3580 3581 /* 3582 * Having a close hook prevents vma merging regardless of flags. 3583 */ 3584 static void special_mapping_close(struct vm_area_struct *vma) 3585 { 3586 } 3587 3588 static const char *special_mapping_name(struct vm_area_struct *vma) 3589 { 3590 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 3591 } 3592 3593 static int special_mapping_mremap(struct vm_area_struct *new_vma) 3594 { 3595 struct vm_special_mapping *sm = new_vma->vm_private_data; 3596 3597 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 3598 return -EFAULT; 3599 3600 if (sm->mremap) 3601 return sm->mremap(sm, new_vma); 3602 3603 return 0; 3604 } 3605 3606 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 3607 { 3608 /* 3609 * Forbid splitting special mappings - kernel has expectations over 3610 * the number of pages in mapping. Together with VM_DONTEXPAND 3611 * the size of vma should stay the same over the special mapping's 3612 * lifetime. 3613 */ 3614 return -EINVAL; 3615 } 3616 3617 static const struct vm_operations_struct special_mapping_vmops = { 3618 .close = special_mapping_close, 3619 .fault = special_mapping_fault, 3620 .mremap = special_mapping_mremap, 3621 .name = special_mapping_name, 3622 /* vDSO code relies that VVAR can't be accessed remotely */ 3623 .access = NULL, 3624 .may_split = special_mapping_split, 3625 }; 3626 3627 static const struct vm_operations_struct legacy_special_mapping_vmops = { 3628 .close = special_mapping_close, 3629 .fault = special_mapping_fault, 3630 }; 3631 3632 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 3633 { 3634 struct vm_area_struct *vma = vmf->vma; 3635 pgoff_t pgoff; 3636 struct page **pages; 3637 3638 if (vma->vm_ops == &legacy_special_mapping_vmops) { 3639 pages = vma->vm_private_data; 3640 } else { 3641 struct vm_special_mapping *sm = vma->vm_private_data; 3642 3643 if (sm->fault) 3644 return sm->fault(sm, vmf->vma, vmf); 3645 3646 pages = sm->pages; 3647 } 3648 3649 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 3650 pgoff--; 3651 3652 if (*pages) { 3653 struct page *page = *pages; 3654 get_page(page); 3655 vmf->page = page; 3656 return 0; 3657 } 3658 3659 return VM_FAULT_SIGBUS; 3660 } 3661 3662 static struct vm_area_struct *__install_special_mapping( 3663 struct mm_struct *mm, 3664 unsigned long addr, unsigned long len, 3665 unsigned long vm_flags, void *priv, 3666 const struct vm_operations_struct *ops) 3667 { 3668 int ret; 3669 struct vm_area_struct *vma; 3670 3671 vma = vm_area_alloc(mm); 3672 if (unlikely(vma == NULL)) 3673 return ERR_PTR(-ENOMEM); 3674 3675 vma_set_range(vma, addr, addr + len, 0); 3676 vm_flags_init(vma, (vm_flags | mm->def_flags | 3677 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); 3678 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 3679 3680 vma->vm_ops = ops; 3681 vma->vm_private_data = priv; 3682 3683 ret = insert_vm_struct(mm, vma); 3684 if (ret) 3685 goto out; 3686 3687 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 3688 3689 perf_event_mmap(vma); 3690 3691 return vma; 3692 3693 out: 3694 vm_area_free(vma); 3695 return ERR_PTR(ret); 3696 } 3697 3698 bool vma_is_special_mapping(const struct vm_area_struct *vma, 3699 const struct vm_special_mapping *sm) 3700 { 3701 return vma->vm_private_data == sm && 3702 (vma->vm_ops == &special_mapping_vmops || 3703 vma->vm_ops == &legacy_special_mapping_vmops); 3704 } 3705 3706 /* 3707 * Called with mm->mmap_lock held for writing. 3708 * Insert a new vma covering the given region, with the given flags. 3709 * Its pages are supplied by the given array of struct page *. 3710 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 3711 * The region past the last page supplied will always produce SIGBUS. 3712 * The array pointer and the pages it points to are assumed to stay alive 3713 * for as long as this mapping might exist. 3714 */ 3715 struct vm_area_struct *_install_special_mapping( 3716 struct mm_struct *mm, 3717 unsigned long addr, unsigned long len, 3718 unsigned long vm_flags, const struct vm_special_mapping *spec) 3719 { 3720 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 3721 &special_mapping_vmops); 3722 } 3723 3724 int install_special_mapping(struct mm_struct *mm, 3725 unsigned long addr, unsigned long len, 3726 unsigned long vm_flags, struct page **pages) 3727 { 3728 struct vm_area_struct *vma = __install_special_mapping( 3729 mm, addr, len, vm_flags, (void *)pages, 3730 &legacy_special_mapping_vmops); 3731 3732 return PTR_ERR_OR_ZERO(vma); 3733 } 3734 3735 static DEFINE_MUTEX(mm_all_locks_mutex); 3736 3737 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 3738 { 3739 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3740 /* 3741 * The LSB of head.next can't change from under us 3742 * because we hold the mm_all_locks_mutex. 3743 */ 3744 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 3745 /* 3746 * We can safely modify head.next after taking the 3747 * anon_vma->root->rwsem. If some other vma in this mm shares 3748 * the same anon_vma we won't take it again. 3749 * 3750 * No need of atomic instructions here, head.next 3751 * can't change from under us thanks to the 3752 * anon_vma->root->rwsem. 3753 */ 3754 if (__test_and_set_bit(0, (unsigned long *) 3755 &anon_vma->root->rb_root.rb_root.rb_node)) 3756 BUG(); 3757 } 3758 } 3759 3760 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 3761 { 3762 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3763 /* 3764 * AS_MM_ALL_LOCKS can't change from under us because 3765 * we hold the mm_all_locks_mutex. 3766 * 3767 * Operations on ->flags have to be atomic because 3768 * even if AS_MM_ALL_LOCKS is stable thanks to the 3769 * mm_all_locks_mutex, there may be other cpus 3770 * changing other bitflags in parallel to us. 3771 */ 3772 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 3773 BUG(); 3774 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 3775 } 3776 } 3777 3778 /* 3779 * This operation locks against the VM for all pte/vma/mm related 3780 * operations that could ever happen on a certain mm. This includes 3781 * vmtruncate, try_to_unmap, and all page faults. 3782 * 3783 * The caller must take the mmap_lock in write mode before calling 3784 * mm_take_all_locks(). The caller isn't allowed to release the 3785 * mmap_lock until mm_drop_all_locks() returns. 3786 * 3787 * mmap_lock in write mode is required in order to block all operations 3788 * that could modify pagetables and free pages without need of 3789 * altering the vma layout. It's also needed in write mode to avoid new 3790 * anon_vmas to be associated with existing vmas. 3791 * 3792 * A single task can't take more than one mm_take_all_locks() in a row 3793 * or it would deadlock. 3794 * 3795 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 3796 * mapping->flags avoid to take the same lock twice, if more than one 3797 * vma in this mm is backed by the same anon_vma or address_space. 3798 * 3799 * We take locks in following order, accordingly to comment at beginning 3800 * of mm/rmap.c: 3801 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 3802 * hugetlb mapping); 3803 * - all vmas marked locked 3804 * - all i_mmap_rwsem locks; 3805 * - all anon_vma->rwseml 3806 * 3807 * We can take all locks within these types randomly because the VM code 3808 * doesn't nest them and we protected from parallel mm_take_all_locks() by 3809 * mm_all_locks_mutex. 3810 * 3811 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 3812 * that may have to take thousand of locks. 3813 * 3814 * mm_take_all_locks() can fail if it's interrupted by signals. 3815 */ 3816 int mm_take_all_locks(struct mm_struct *mm) 3817 { 3818 struct vm_area_struct *vma; 3819 struct anon_vma_chain *avc; 3820 VMA_ITERATOR(vmi, mm, 0); 3821 3822 mmap_assert_write_locked(mm); 3823 3824 mutex_lock(&mm_all_locks_mutex); 3825 3826 /* 3827 * vma_start_write() does not have a complement in mm_drop_all_locks() 3828 * because vma_start_write() is always asymmetrical; it marks a VMA as 3829 * being written to until mmap_write_unlock() or mmap_write_downgrade() 3830 * is reached. 3831 */ 3832 for_each_vma(vmi, vma) { 3833 if (signal_pending(current)) 3834 goto out_unlock; 3835 vma_start_write(vma); 3836 } 3837 3838 vma_iter_init(&vmi, mm, 0); 3839 for_each_vma(vmi, vma) { 3840 if (signal_pending(current)) 3841 goto out_unlock; 3842 if (vma->vm_file && vma->vm_file->f_mapping && 3843 is_vm_hugetlb_page(vma)) 3844 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3845 } 3846 3847 vma_iter_init(&vmi, mm, 0); 3848 for_each_vma(vmi, vma) { 3849 if (signal_pending(current)) 3850 goto out_unlock; 3851 if (vma->vm_file && vma->vm_file->f_mapping && 3852 !is_vm_hugetlb_page(vma)) 3853 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3854 } 3855 3856 vma_iter_init(&vmi, mm, 0); 3857 for_each_vma(vmi, vma) { 3858 if (signal_pending(current)) 3859 goto out_unlock; 3860 if (vma->anon_vma) 3861 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3862 vm_lock_anon_vma(mm, avc->anon_vma); 3863 } 3864 3865 return 0; 3866 3867 out_unlock: 3868 mm_drop_all_locks(mm); 3869 return -EINTR; 3870 } 3871 3872 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 3873 { 3874 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3875 /* 3876 * The LSB of head.next can't change to 0 from under 3877 * us because we hold the mm_all_locks_mutex. 3878 * 3879 * We must however clear the bitflag before unlocking 3880 * the vma so the users using the anon_vma->rb_root will 3881 * never see our bitflag. 3882 * 3883 * No need of atomic instructions here, head.next 3884 * can't change from under us until we release the 3885 * anon_vma->root->rwsem. 3886 */ 3887 if (!__test_and_clear_bit(0, (unsigned long *) 3888 &anon_vma->root->rb_root.rb_root.rb_node)) 3889 BUG(); 3890 anon_vma_unlock_write(anon_vma); 3891 } 3892 } 3893 3894 static void vm_unlock_mapping(struct address_space *mapping) 3895 { 3896 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3897 /* 3898 * AS_MM_ALL_LOCKS can't change to 0 from under us 3899 * because we hold the mm_all_locks_mutex. 3900 */ 3901 i_mmap_unlock_write(mapping); 3902 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 3903 &mapping->flags)) 3904 BUG(); 3905 } 3906 } 3907 3908 /* 3909 * The mmap_lock cannot be released by the caller until 3910 * mm_drop_all_locks() returns. 3911 */ 3912 void mm_drop_all_locks(struct mm_struct *mm) 3913 { 3914 struct vm_area_struct *vma; 3915 struct anon_vma_chain *avc; 3916 VMA_ITERATOR(vmi, mm, 0); 3917 3918 mmap_assert_write_locked(mm); 3919 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 3920 3921 for_each_vma(vmi, vma) { 3922 if (vma->anon_vma) 3923 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3924 vm_unlock_anon_vma(avc->anon_vma); 3925 if (vma->vm_file && vma->vm_file->f_mapping) 3926 vm_unlock_mapping(vma->vm_file->f_mapping); 3927 } 3928 3929 mutex_unlock(&mm_all_locks_mutex); 3930 } 3931 3932 /* 3933 * initialise the percpu counter for VM 3934 */ 3935 void __init mmap_init(void) 3936 { 3937 int ret; 3938 3939 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 3940 VM_BUG_ON(ret); 3941 } 3942 3943 /* 3944 * Initialise sysctl_user_reserve_kbytes. 3945 * 3946 * This is intended to prevent a user from starting a single memory hogging 3947 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 3948 * mode. 3949 * 3950 * The default value is min(3% of free memory, 128MB) 3951 * 128MB is enough to recover with sshd/login, bash, and top/kill. 3952 */ 3953 static int init_user_reserve(void) 3954 { 3955 unsigned long free_kbytes; 3956 3957 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 3958 3959 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); 3960 return 0; 3961 } 3962 subsys_initcall(init_user_reserve); 3963 3964 /* 3965 * Initialise sysctl_admin_reserve_kbytes. 3966 * 3967 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 3968 * to log in and kill a memory hogging process. 3969 * 3970 * Systems with more than 256MB will reserve 8MB, enough to recover 3971 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 3972 * only reserve 3% of free pages by default. 3973 */ 3974 static int init_admin_reserve(void) 3975 { 3976 unsigned long free_kbytes; 3977 3978 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 3979 3980 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); 3981 return 0; 3982 } 3983 subsys_initcall(init_admin_reserve); 3984 3985 /* 3986 * Reinititalise user and admin reserves if memory is added or removed. 3987 * 3988 * The default user reserve max is 128MB, and the default max for the 3989 * admin reserve is 8MB. These are usually, but not always, enough to 3990 * enable recovery from a memory hogging process using login/sshd, a shell, 3991 * and tools like top. It may make sense to increase or even disable the 3992 * reserve depending on the existence of swap or variations in the recovery 3993 * tools. So, the admin may have changed them. 3994 * 3995 * If memory is added and the reserves have been eliminated or increased above 3996 * the default max, then we'll trust the admin. 3997 * 3998 * If memory is removed and there isn't enough free memory, then we 3999 * need to reset the reserves. 4000 * 4001 * Otherwise keep the reserve set by the admin. 4002 */ 4003 static int reserve_mem_notifier(struct notifier_block *nb, 4004 unsigned long action, void *data) 4005 { 4006 unsigned long tmp, free_kbytes; 4007 4008 switch (action) { 4009 case MEM_ONLINE: 4010 /* Default max is 128MB. Leave alone if modified by operator. */ 4011 tmp = sysctl_user_reserve_kbytes; 4012 if (tmp > 0 && tmp < SZ_128K) 4013 init_user_reserve(); 4014 4015 /* Default max is 8MB. Leave alone if modified by operator. */ 4016 tmp = sysctl_admin_reserve_kbytes; 4017 if (tmp > 0 && tmp < SZ_8K) 4018 init_admin_reserve(); 4019 4020 break; 4021 case MEM_OFFLINE: 4022 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 4023 4024 if (sysctl_user_reserve_kbytes > free_kbytes) { 4025 init_user_reserve(); 4026 pr_info("vm.user_reserve_kbytes reset to %lu\n", 4027 sysctl_user_reserve_kbytes); 4028 } 4029 4030 if (sysctl_admin_reserve_kbytes > free_kbytes) { 4031 init_admin_reserve(); 4032 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 4033 sysctl_admin_reserve_kbytes); 4034 } 4035 break; 4036 default: 4037 break; 4038 } 4039 return NOTIFY_OK; 4040 } 4041 4042 static int __meminit init_reserve_notifier(void) 4043 { 4044 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) 4045 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 4046 4047 return 0; 4048 } 4049 subsys_initcall(init_reserve_notifier); 4050