1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/mmap.c 4 * 5 * Written by obz. 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/backing-dev.h> 15 #include <linux/mm.h> 16 #include <linux/mm_inline.h> 17 #include <linux/shm.h> 18 #include <linux/mman.h> 19 #include <linux/pagemap.h> 20 #include <linux/swap.h> 21 #include <linux/syscalls.h> 22 #include <linux/capability.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/personality.h> 27 #include <linux/security.h> 28 #include <linux/hugetlb.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/profile.h> 31 #include <linux/export.h> 32 #include <linux/mount.h> 33 #include <linux/mempolicy.h> 34 #include <linux/rmap.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/mmdebug.h> 37 #include <linux/perf_event.h> 38 #include <linux/audit.h> 39 #include <linux/khugepaged.h> 40 #include <linux/uprobes.h> 41 #include <linux/notifier.h> 42 #include <linux/memory.h> 43 #include <linux/printk.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/moduleparam.h> 46 #include <linux/pkeys.h> 47 #include <linux/oom.h> 48 #include <linux/sched/mm.h> 49 #include <linux/ksm.h> 50 51 #include <linux/uaccess.h> 52 #include <asm/cacheflush.h> 53 #include <asm/tlb.h> 54 #include <asm/mmu_context.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/mmap.h> 58 59 #include "internal.h" 60 61 #ifndef arch_mmap_check 62 #define arch_mmap_check(addr, len, flags) (0) 63 #endif 64 65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 67 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; 68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 69 #endif 70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 74 #endif 75 76 static bool ignore_rlimit_data; 77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 78 79 static void unmap_region(struct mm_struct *mm, struct ma_state *mas, 80 struct vm_area_struct *vma, struct vm_area_struct *prev, 81 struct vm_area_struct *next, unsigned long start, 82 unsigned long end, unsigned long tree_end, bool mm_wr_locked); 83 84 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 85 { 86 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 87 } 88 89 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 90 void vma_set_page_prot(struct vm_area_struct *vma) 91 { 92 unsigned long vm_flags = vma->vm_flags; 93 pgprot_t vm_page_prot; 94 95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 96 if (vma_wants_writenotify(vma, vm_page_prot)) { 97 vm_flags &= ~VM_SHARED; 98 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 99 } 100 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 102 } 103 104 /* 105 * Requires inode->i_mapping->i_mmap_rwsem 106 */ 107 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 108 struct address_space *mapping) 109 { 110 if (vma_is_shared_maywrite(vma)) 111 mapping_unmap_writable(mapping); 112 113 flush_dcache_mmap_lock(mapping); 114 vma_interval_tree_remove(vma, &mapping->i_mmap); 115 flush_dcache_mmap_unlock(mapping); 116 } 117 118 /* 119 * Unlink a file-based vm structure from its interval tree, to hide 120 * vma from rmap and vmtruncate before freeing its page tables. 121 */ 122 void unlink_file_vma(struct vm_area_struct *vma) 123 { 124 struct file *file = vma->vm_file; 125 126 if (file) { 127 struct address_space *mapping = file->f_mapping; 128 i_mmap_lock_write(mapping); 129 __remove_shared_vm_struct(vma, mapping); 130 i_mmap_unlock_write(mapping); 131 } 132 } 133 134 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 135 { 136 vb->count = 0; 137 } 138 139 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 140 { 141 struct address_space *mapping; 142 int i; 143 144 mapping = vb->vmas[0]->vm_file->f_mapping; 145 i_mmap_lock_write(mapping); 146 for (i = 0; i < vb->count; i++) { 147 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 148 __remove_shared_vm_struct(vb->vmas[i], mapping); 149 } 150 i_mmap_unlock_write(mapping); 151 152 unlink_file_vma_batch_init(vb); 153 } 154 155 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 156 struct vm_area_struct *vma) 157 { 158 if (vma->vm_file == NULL) 159 return; 160 161 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 162 vb->count == ARRAY_SIZE(vb->vmas)) 163 unlink_file_vma_batch_process(vb); 164 165 vb->vmas[vb->count] = vma; 166 vb->count++; 167 } 168 169 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 170 { 171 if (vb->count > 0) 172 unlink_file_vma_batch_process(vb); 173 } 174 175 /* 176 * Close a vm structure and free it. 177 */ 178 static void remove_vma(struct vm_area_struct *vma, bool unreachable) 179 { 180 might_sleep(); 181 if (vma->vm_ops && vma->vm_ops->close) 182 vma->vm_ops->close(vma); 183 if (vma->vm_file) 184 fput(vma->vm_file); 185 mpol_put(vma_policy(vma)); 186 if (unreachable) 187 __vm_area_free(vma); 188 else 189 vm_area_free(vma); 190 } 191 192 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 193 unsigned long min) 194 { 195 return mas_prev(&vmi->mas, min); 196 } 197 198 /* 199 * check_brk_limits() - Use platform specific check of range & verify mlock 200 * limits. 201 * @addr: The address to check 202 * @len: The size of increase. 203 * 204 * Return: 0 on success. 205 */ 206 static int check_brk_limits(unsigned long addr, unsigned long len) 207 { 208 unsigned long mapped_addr; 209 210 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 211 if (IS_ERR_VALUE(mapped_addr)) 212 return mapped_addr; 213 214 return mlock_future_ok(current->mm, current->mm->def_flags, len) 215 ? 0 : -EAGAIN; 216 } 217 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 218 unsigned long addr, unsigned long request, unsigned long flags); 219 SYSCALL_DEFINE1(brk, unsigned long, brk) 220 { 221 unsigned long newbrk, oldbrk, origbrk; 222 struct mm_struct *mm = current->mm; 223 struct vm_area_struct *brkvma, *next = NULL; 224 unsigned long min_brk; 225 bool populate = false; 226 LIST_HEAD(uf); 227 struct vma_iterator vmi; 228 229 if (mmap_write_lock_killable(mm)) 230 return -EINTR; 231 232 origbrk = mm->brk; 233 234 #ifdef CONFIG_COMPAT_BRK 235 /* 236 * CONFIG_COMPAT_BRK can still be overridden by setting 237 * randomize_va_space to 2, which will still cause mm->start_brk 238 * to be arbitrarily shifted 239 */ 240 if (current->brk_randomized) 241 min_brk = mm->start_brk; 242 else 243 min_brk = mm->end_data; 244 #else 245 min_brk = mm->start_brk; 246 #endif 247 if (brk < min_brk) 248 goto out; 249 250 /* 251 * Check against rlimit here. If this check is done later after the test 252 * of oldbrk with newbrk then it can escape the test and let the data 253 * segment grow beyond its set limit the in case where the limit is 254 * not page aligned -Ram Gupta 255 */ 256 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 257 mm->end_data, mm->start_data)) 258 goto out; 259 260 newbrk = PAGE_ALIGN(brk); 261 oldbrk = PAGE_ALIGN(mm->brk); 262 if (oldbrk == newbrk) { 263 mm->brk = brk; 264 goto success; 265 } 266 267 /* Always allow shrinking brk. */ 268 if (brk <= mm->brk) { 269 /* Search one past newbrk */ 270 vma_iter_init(&vmi, mm, newbrk); 271 brkvma = vma_find(&vmi, oldbrk); 272 if (!brkvma || brkvma->vm_start >= oldbrk) 273 goto out; /* mapping intersects with an existing non-brk vma. */ 274 /* 275 * mm->brk must be protected by write mmap_lock. 276 * do_vma_munmap() will drop the lock on success, so update it 277 * before calling do_vma_munmap(). 278 */ 279 mm->brk = brk; 280 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true)) 281 goto out; 282 283 goto success_unlocked; 284 } 285 286 if (check_brk_limits(oldbrk, newbrk - oldbrk)) 287 goto out; 288 289 /* 290 * Only check if the next VMA is within the stack_guard_gap of the 291 * expansion area 292 */ 293 vma_iter_init(&vmi, mm, oldbrk); 294 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); 295 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 296 goto out; 297 298 brkvma = vma_prev_limit(&vmi, mm->start_brk); 299 /* Ok, looks good - let it rip. */ 300 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) 301 goto out; 302 303 mm->brk = brk; 304 if (mm->def_flags & VM_LOCKED) 305 populate = true; 306 307 success: 308 mmap_write_unlock(mm); 309 success_unlocked: 310 userfaultfd_unmap_complete(mm, &uf); 311 if (populate) 312 mm_populate(oldbrk, newbrk - oldbrk); 313 return brk; 314 315 out: 316 mm->brk = origbrk; 317 mmap_write_unlock(mm); 318 return origbrk; 319 } 320 321 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 322 static void validate_mm(struct mm_struct *mm) 323 { 324 int bug = 0; 325 int i = 0; 326 struct vm_area_struct *vma; 327 VMA_ITERATOR(vmi, mm, 0); 328 329 mt_validate(&mm->mm_mt); 330 for_each_vma(vmi, vma) { 331 #ifdef CONFIG_DEBUG_VM_RB 332 struct anon_vma *anon_vma = vma->anon_vma; 333 struct anon_vma_chain *avc; 334 #endif 335 unsigned long vmi_start, vmi_end; 336 bool warn = 0; 337 338 vmi_start = vma_iter_addr(&vmi); 339 vmi_end = vma_iter_end(&vmi); 340 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 341 warn = 1; 342 343 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 344 warn = 1; 345 346 if (warn) { 347 pr_emerg("issue in %s\n", current->comm); 348 dump_stack(); 349 dump_vma(vma); 350 pr_emerg("tree range: %px start %lx end %lx\n", vma, 351 vmi_start, vmi_end - 1); 352 vma_iter_dump_tree(&vmi); 353 } 354 355 #ifdef CONFIG_DEBUG_VM_RB 356 if (anon_vma) { 357 anon_vma_lock_read(anon_vma); 358 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 359 anon_vma_interval_tree_verify(avc); 360 anon_vma_unlock_read(anon_vma); 361 } 362 #endif 363 i++; 364 } 365 if (i != mm->map_count) { 366 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 367 bug = 1; 368 } 369 VM_BUG_ON_MM(bug, mm); 370 } 371 372 #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ 373 #define validate_mm(mm) do { } while (0) 374 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 375 376 /* 377 * vma has some anon_vma assigned, and is already inserted on that 378 * anon_vma's interval trees. 379 * 380 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 381 * vma must be removed from the anon_vma's interval trees using 382 * anon_vma_interval_tree_pre_update_vma(). 383 * 384 * After the update, the vma will be reinserted using 385 * anon_vma_interval_tree_post_update_vma(). 386 * 387 * The entire update must be protected by exclusive mmap_lock and by 388 * the root anon_vma's mutex. 389 */ 390 static inline void 391 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 392 { 393 struct anon_vma_chain *avc; 394 395 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 396 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 397 } 398 399 static inline void 400 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 401 { 402 struct anon_vma_chain *avc; 403 404 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 405 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 406 } 407 408 static unsigned long count_vma_pages_range(struct mm_struct *mm, 409 unsigned long addr, unsigned long end) 410 { 411 VMA_ITERATOR(vmi, mm, addr); 412 struct vm_area_struct *vma; 413 unsigned long nr_pages = 0; 414 415 for_each_vma_range(vmi, vma, end) { 416 unsigned long vm_start = max(addr, vma->vm_start); 417 unsigned long vm_end = min(end, vma->vm_end); 418 419 nr_pages += PHYS_PFN(vm_end - vm_start); 420 } 421 422 return nr_pages; 423 } 424 425 static void __vma_link_file(struct vm_area_struct *vma, 426 struct address_space *mapping) 427 { 428 if (vma_is_shared_maywrite(vma)) 429 mapping_allow_writable(mapping); 430 431 flush_dcache_mmap_lock(mapping); 432 vma_interval_tree_insert(vma, &mapping->i_mmap); 433 flush_dcache_mmap_unlock(mapping); 434 } 435 436 static void vma_link_file(struct vm_area_struct *vma) 437 { 438 struct file *file = vma->vm_file; 439 struct address_space *mapping; 440 441 if (file) { 442 mapping = file->f_mapping; 443 i_mmap_lock_write(mapping); 444 __vma_link_file(vma, mapping); 445 i_mmap_unlock_write(mapping); 446 } 447 } 448 449 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 450 { 451 VMA_ITERATOR(vmi, mm, 0); 452 453 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 454 if (vma_iter_prealloc(&vmi, vma)) 455 return -ENOMEM; 456 457 vma_start_write(vma); 458 vma_iter_store(&vmi, vma); 459 vma_link_file(vma); 460 mm->map_count++; 461 validate_mm(mm); 462 return 0; 463 } 464 465 /* 466 * init_multi_vma_prep() - Initializer for struct vma_prepare 467 * @vp: The vma_prepare struct 468 * @vma: The vma that will be altered once locked 469 * @next: The next vma if it is to be adjusted 470 * @remove: The first vma to be removed 471 * @remove2: The second vma to be removed 472 */ 473 static inline void init_multi_vma_prep(struct vma_prepare *vp, 474 struct vm_area_struct *vma, struct vm_area_struct *next, 475 struct vm_area_struct *remove, struct vm_area_struct *remove2) 476 { 477 memset(vp, 0, sizeof(struct vma_prepare)); 478 vp->vma = vma; 479 vp->anon_vma = vma->anon_vma; 480 vp->remove = remove; 481 vp->remove2 = remove2; 482 vp->adj_next = next; 483 if (!vp->anon_vma && next) 484 vp->anon_vma = next->anon_vma; 485 486 vp->file = vma->vm_file; 487 if (vp->file) 488 vp->mapping = vma->vm_file->f_mapping; 489 490 } 491 492 /* 493 * init_vma_prep() - Initializer wrapper for vma_prepare struct 494 * @vp: The vma_prepare struct 495 * @vma: The vma that will be altered once locked 496 */ 497 static inline void init_vma_prep(struct vma_prepare *vp, 498 struct vm_area_struct *vma) 499 { 500 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 501 } 502 503 504 /* 505 * vma_prepare() - Helper function for handling locking VMAs prior to altering 506 * @vp: The initialized vma_prepare struct 507 */ 508 static inline void vma_prepare(struct vma_prepare *vp) 509 { 510 if (vp->file) { 511 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 512 513 if (vp->adj_next) 514 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 515 vp->adj_next->vm_end); 516 517 i_mmap_lock_write(vp->mapping); 518 if (vp->insert && vp->insert->vm_file) { 519 /* 520 * Put into interval tree now, so instantiated pages 521 * are visible to arm/parisc __flush_dcache_page 522 * throughout; but we cannot insert into address 523 * space until vma start or end is updated. 524 */ 525 __vma_link_file(vp->insert, 526 vp->insert->vm_file->f_mapping); 527 } 528 } 529 530 if (vp->anon_vma) { 531 anon_vma_lock_write(vp->anon_vma); 532 anon_vma_interval_tree_pre_update_vma(vp->vma); 533 if (vp->adj_next) 534 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 535 } 536 537 if (vp->file) { 538 flush_dcache_mmap_lock(vp->mapping); 539 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 540 if (vp->adj_next) 541 vma_interval_tree_remove(vp->adj_next, 542 &vp->mapping->i_mmap); 543 } 544 545 } 546 547 /* 548 * vma_complete- Helper function for handling the unlocking after altering VMAs, 549 * or for inserting a VMA. 550 * 551 * @vp: The vma_prepare struct 552 * @vmi: The vma iterator 553 * @mm: The mm_struct 554 */ 555 static inline void vma_complete(struct vma_prepare *vp, 556 struct vma_iterator *vmi, struct mm_struct *mm) 557 { 558 if (vp->file) { 559 if (vp->adj_next) 560 vma_interval_tree_insert(vp->adj_next, 561 &vp->mapping->i_mmap); 562 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 563 flush_dcache_mmap_unlock(vp->mapping); 564 } 565 566 if (vp->remove && vp->file) { 567 __remove_shared_vm_struct(vp->remove, vp->mapping); 568 if (vp->remove2) 569 __remove_shared_vm_struct(vp->remove2, vp->mapping); 570 } else if (vp->insert) { 571 /* 572 * split_vma has split insert from vma, and needs 573 * us to insert it before dropping the locks 574 * (it may either follow vma or precede it). 575 */ 576 vma_iter_store(vmi, vp->insert); 577 mm->map_count++; 578 } 579 580 if (vp->anon_vma) { 581 anon_vma_interval_tree_post_update_vma(vp->vma); 582 if (vp->adj_next) 583 anon_vma_interval_tree_post_update_vma(vp->adj_next); 584 anon_vma_unlock_write(vp->anon_vma); 585 } 586 587 if (vp->file) { 588 i_mmap_unlock_write(vp->mapping); 589 uprobe_mmap(vp->vma); 590 591 if (vp->adj_next) 592 uprobe_mmap(vp->adj_next); 593 } 594 595 if (vp->remove) { 596 again: 597 vma_mark_detached(vp->remove, true); 598 if (vp->file) { 599 uprobe_munmap(vp->remove, vp->remove->vm_start, 600 vp->remove->vm_end); 601 fput(vp->file); 602 } 603 if (vp->remove->anon_vma) 604 anon_vma_merge(vp->vma, vp->remove); 605 mm->map_count--; 606 mpol_put(vma_policy(vp->remove)); 607 if (!vp->remove2) 608 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 609 vm_area_free(vp->remove); 610 611 /* 612 * In mprotect's case 6 (see comments on vma_merge), 613 * we are removing both mid and next vmas 614 */ 615 if (vp->remove2) { 616 vp->remove = vp->remove2; 617 vp->remove2 = NULL; 618 goto again; 619 } 620 } 621 if (vp->insert && vp->file) 622 uprobe_mmap(vp->insert); 623 validate_mm(mm); 624 } 625 626 /* 627 * dup_anon_vma() - Helper function to duplicate anon_vma 628 * @dst: The destination VMA 629 * @src: The source VMA 630 * @dup: Pointer to the destination VMA when successful. 631 * 632 * Returns: 0 on success. 633 */ 634 static inline int dup_anon_vma(struct vm_area_struct *dst, 635 struct vm_area_struct *src, struct vm_area_struct **dup) 636 { 637 /* 638 * Easily overlooked: when mprotect shifts the boundary, make sure the 639 * expanding vma has anon_vma set if the shrinking vma had, to cover any 640 * anon pages imported. 641 */ 642 if (src->anon_vma && !dst->anon_vma) { 643 int ret; 644 645 vma_assert_write_locked(dst); 646 dst->anon_vma = src->anon_vma; 647 ret = anon_vma_clone(dst, src); 648 if (ret) 649 return ret; 650 651 *dup = dst; 652 } 653 654 return 0; 655 } 656 657 /* 658 * vma_expand - Expand an existing VMA 659 * 660 * @vmi: The vma iterator 661 * @vma: The vma to expand 662 * @start: The start of the vma 663 * @end: The exclusive end of the vma 664 * @pgoff: The page offset of vma 665 * @next: The current of next vma. 666 * 667 * Expand @vma to @start and @end. Can expand off the start and end. Will 668 * expand over @next if it's different from @vma and @end == @next->vm_end. 669 * Checking if the @vma can expand and merge with @next needs to be handled by 670 * the caller. 671 * 672 * Returns: 0 on success 673 */ 674 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, 675 unsigned long start, unsigned long end, pgoff_t pgoff, 676 struct vm_area_struct *next) 677 { 678 struct vm_area_struct *anon_dup = NULL; 679 bool remove_next = false; 680 struct vma_prepare vp; 681 682 vma_start_write(vma); 683 if (next && (vma != next) && (end == next->vm_end)) { 684 int ret; 685 686 remove_next = true; 687 vma_start_write(next); 688 ret = dup_anon_vma(vma, next, &anon_dup); 689 if (ret) 690 return ret; 691 } 692 693 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); 694 /* Not merging but overwriting any part of next is not handled. */ 695 VM_WARN_ON(next && !vp.remove && 696 next != vma && end > next->vm_start); 697 /* Only handles expanding */ 698 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end); 699 700 /* Note: vma iterator must be pointing to 'start' */ 701 vma_iter_config(vmi, start, end); 702 if (vma_iter_prealloc(vmi, vma)) 703 goto nomem; 704 705 vma_prepare(&vp); 706 vma_adjust_trans_huge(vma, start, end, 0); 707 vma_set_range(vma, start, end, pgoff); 708 vma_iter_store(vmi, vma); 709 710 vma_complete(&vp, vmi, vma->vm_mm); 711 return 0; 712 713 nomem: 714 if (anon_dup) 715 unlink_anon_vmas(anon_dup); 716 return -ENOMEM; 717 } 718 719 /* 720 * vma_shrink() - Reduce an existing VMAs memory area 721 * @vmi: The vma iterator 722 * @vma: The VMA to modify 723 * @start: The new start 724 * @end: The new end 725 * 726 * Returns: 0 on success, -ENOMEM otherwise 727 */ 728 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 729 unsigned long start, unsigned long end, pgoff_t pgoff) 730 { 731 struct vma_prepare vp; 732 733 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 734 735 if (vma->vm_start < start) 736 vma_iter_config(vmi, vma->vm_start, start); 737 else 738 vma_iter_config(vmi, end, vma->vm_end); 739 740 if (vma_iter_prealloc(vmi, NULL)) 741 return -ENOMEM; 742 743 vma_start_write(vma); 744 745 init_vma_prep(&vp, vma); 746 vma_prepare(&vp); 747 vma_adjust_trans_huge(vma, start, end, 0); 748 749 vma_iter_clear(vmi); 750 vma_set_range(vma, start, end, pgoff); 751 vma_complete(&vp, vmi, vma->vm_mm); 752 return 0; 753 } 754 755 /* 756 * If the vma has a ->close operation then the driver probably needs to release 757 * per-vma resources, so we don't attempt to merge those if the caller indicates 758 * the current vma may be removed as part of the merge. 759 */ 760 static inline bool is_mergeable_vma(struct vm_area_struct *vma, 761 struct file *file, unsigned long vm_flags, 762 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 763 struct anon_vma_name *anon_name, bool may_remove_vma) 764 { 765 /* 766 * VM_SOFTDIRTY should not prevent from VMA merging, if we 767 * match the flags but dirty bit -- the caller should mark 768 * merged VMA as dirty. If dirty bit won't be excluded from 769 * comparison, we increase pressure on the memory system forcing 770 * the kernel to generate new VMAs when old one could be 771 * extended instead. 772 */ 773 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 774 return false; 775 if (vma->vm_file != file) 776 return false; 777 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) 778 return false; 779 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) 780 return false; 781 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) 782 return false; 783 return true; 784 } 785 786 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 787 struct anon_vma *anon_vma2, struct vm_area_struct *vma) 788 { 789 /* 790 * The list_is_singular() test is to avoid merging VMA cloned from 791 * parents. This can improve scalability caused by anon_vma lock. 792 */ 793 if ((!anon_vma1 || !anon_vma2) && (!vma || 794 list_is_singular(&vma->anon_vma_chain))) 795 return true; 796 return anon_vma1 == anon_vma2; 797 } 798 799 /* 800 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 801 * in front of (at a lower virtual address and file offset than) the vma. 802 * 803 * We cannot merge two vmas if they have differently assigned (non-NULL) 804 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 805 * 806 * We don't check here for the merged mmap wrapping around the end of pagecache 807 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 808 * wrap, nor mmaps which cover the final page at index -1UL. 809 * 810 * We assume the vma may be removed as part of the merge. 811 */ 812 static bool 813 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 814 struct anon_vma *anon_vma, struct file *file, 815 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 816 struct anon_vma_name *anon_name) 817 { 818 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) && 819 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 820 if (vma->vm_pgoff == vm_pgoff) 821 return true; 822 } 823 return false; 824 } 825 826 /* 827 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 828 * beyond (at a higher virtual address and file offset than) the vma. 829 * 830 * We cannot merge two vmas if they have differently assigned (non-NULL) 831 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 832 * 833 * We assume that vma is not removed as part of the merge. 834 */ 835 static bool 836 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 837 struct anon_vma *anon_vma, struct file *file, 838 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 839 struct anon_vma_name *anon_name) 840 { 841 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) && 842 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 843 pgoff_t vm_pglen; 844 vm_pglen = vma_pages(vma); 845 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 846 return true; 847 } 848 return false; 849 } 850 851 /* 852 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 853 * figure out whether that can be merged with its predecessor or its 854 * successor. Or both (it neatly fills a hole). 855 * 856 * In most cases - when called for mmap, brk or mremap - [addr,end) is 857 * certain not to be mapped by the time vma_merge is called; but when 858 * called for mprotect, it is certain to be already mapped (either at 859 * an offset within prev, or at the start of next), and the flags of 860 * this area are about to be changed to vm_flags - and the no-change 861 * case has already been eliminated. 862 * 863 * The following mprotect cases have to be considered, where **** is 864 * the area passed down from mprotect_fixup, never extending beyond one 865 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts 866 * at the same address as **** and is of the same or larger span, and 867 * NNNN the next vma after ****: 868 * 869 * **** **** **** 870 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC 871 * cannot merge might become might become 872 * PPNNNNNNNNNN PPPPPPPPPPCC 873 * mmap, brk or case 4 below case 5 below 874 * mremap move: 875 * **** **** 876 * PPPP NNNN PPPPCCCCNNNN 877 * might become might become 878 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 879 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or 880 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 881 * 882 * It is important for case 8 that the vma CCCC overlapping the 883 * region **** is never going to extended over NNNN. Instead NNNN must 884 * be extended in region **** and CCCC must be removed. This way in 885 * all cases where vma_merge succeeds, the moment vma_merge drops the 886 * rmap_locks, the properties of the merged vma will be already 887 * correct for the whole merged range. Some of those properties like 888 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 889 * be correct for the whole merged range immediately after the 890 * rmap_locks are released. Otherwise if NNNN would be removed and 891 * CCCC would be extended over the NNNN range, remove_migration_ptes 892 * or other rmap walkers (if working on addresses beyond the "end" 893 * parameter) may establish ptes with the wrong permissions of CCCC 894 * instead of the right permissions of NNNN. 895 * 896 * In the code below: 897 * PPPP is represented by *prev 898 * CCCC is represented by *curr or not represented at all (NULL) 899 * NNNN is represented by *next or not represented at all (NULL) 900 * **** is not represented - it will be merged and the vma containing the 901 * area is returned, or the function will return NULL 902 */ 903 static struct vm_area_struct 904 *vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev, 905 struct vm_area_struct *src, unsigned long addr, unsigned long end, 906 unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy, 907 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 908 struct anon_vma_name *anon_name) 909 { 910 struct mm_struct *mm = src->vm_mm; 911 struct anon_vma *anon_vma = src->anon_vma; 912 struct file *file = src->vm_file; 913 struct vm_area_struct *curr, *next, *res; 914 struct vm_area_struct *vma, *adjust, *remove, *remove2; 915 struct vm_area_struct *anon_dup = NULL; 916 struct vma_prepare vp; 917 pgoff_t vma_pgoff; 918 int err = 0; 919 bool merge_prev = false; 920 bool merge_next = false; 921 bool vma_expanded = false; 922 unsigned long vma_start = addr; 923 unsigned long vma_end = end; 924 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 925 long adj_start = 0; 926 927 /* 928 * We later require that vma->vm_flags == vm_flags, 929 * so this tests vma->vm_flags & VM_SPECIAL, too. 930 */ 931 if (vm_flags & VM_SPECIAL) 932 return NULL; 933 934 /* Does the input range span an existing VMA? (cases 5 - 8) */ 935 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); 936 937 if (!curr || /* cases 1 - 4 */ 938 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ 939 next = vma_lookup(mm, end); 940 else 941 next = NULL; /* case 5 */ 942 943 if (prev) { 944 vma_start = prev->vm_start; 945 vma_pgoff = prev->vm_pgoff; 946 947 /* Can we merge the predecessor? */ 948 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy) 949 && can_vma_merge_after(prev, vm_flags, anon_vma, file, 950 pgoff, vm_userfaultfd_ctx, anon_name)) { 951 merge_prev = true; 952 vma_prev(vmi); 953 } 954 } 955 956 /* Can we merge the successor? */ 957 if (next && mpol_equal(policy, vma_policy(next)) && 958 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, 959 vm_userfaultfd_ctx, anon_name)) { 960 merge_next = true; 961 } 962 963 /* Verify some invariant that must be enforced by the caller. */ 964 VM_WARN_ON(prev && addr <= prev->vm_start); 965 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); 966 VM_WARN_ON(addr >= end); 967 968 if (!merge_prev && !merge_next) 969 return NULL; /* Not mergeable. */ 970 971 if (merge_prev) 972 vma_start_write(prev); 973 974 res = vma = prev; 975 remove = remove2 = adjust = NULL; 976 977 /* Can we merge both the predecessor and the successor? */ 978 if (merge_prev && merge_next && 979 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 980 vma_start_write(next); 981 remove = next; /* case 1 */ 982 vma_end = next->vm_end; 983 err = dup_anon_vma(prev, next, &anon_dup); 984 if (curr) { /* case 6 */ 985 vma_start_write(curr); 986 remove = curr; 987 remove2 = next; 988 /* 989 * Note that the dup_anon_vma below cannot overwrite err 990 * since the first caller would do nothing unless next 991 * has an anon_vma. 992 */ 993 if (!next->anon_vma) 994 err = dup_anon_vma(prev, curr, &anon_dup); 995 } 996 } else if (merge_prev) { /* case 2 */ 997 if (curr) { 998 vma_start_write(curr); 999 if (end == curr->vm_end) { /* case 7 */ 1000 /* 1001 * can_vma_merge_after() assumed we would not be 1002 * removing prev vma, so it skipped the check 1003 * for vm_ops->close, but we are removing curr 1004 */ 1005 if (curr->vm_ops && curr->vm_ops->close) 1006 err = -EINVAL; 1007 remove = curr; 1008 } else { /* case 5 */ 1009 adjust = curr; 1010 adj_start = (end - curr->vm_start); 1011 } 1012 if (!err) 1013 err = dup_anon_vma(prev, curr, &anon_dup); 1014 } 1015 } else { /* merge_next */ 1016 vma_start_write(next); 1017 res = next; 1018 if (prev && addr < prev->vm_end) { /* case 4 */ 1019 vma_start_write(prev); 1020 vma_end = addr; 1021 adjust = next; 1022 adj_start = -(prev->vm_end - addr); 1023 err = dup_anon_vma(next, prev, &anon_dup); 1024 } else { 1025 /* 1026 * Note that cases 3 and 8 are the ONLY ones where prev 1027 * is permitted to be (but is not necessarily) NULL. 1028 */ 1029 vma = next; /* case 3 */ 1030 vma_start = addr; 1031 vma_end = next->vm_end; 1032 vma_pgoff = next->vm_pgoff - pglen; 1033 if (curr) { /* case 8 */ 1034 vma_pgoff = curr->vm_pgoff; 1035 vma_start_write(curr); 1036 remove = curr; 1037 err = dup_anon_vma(next, curr, &anon_dup); 1038 } 1039 } 1040 } 1041 1042 /* Error in anon_vma clone. */ 1043 if (err) 1044 goto anon_vma_fail; 1045 1046 if (vma_start < vma->vm_start || vma_end > vma->vm_end) 1047 vma_expanded = true; 1048 1049 if (vma_expanded) { 1050 vma_iter_config(vmi, vma_start, vma_end); 1051 } else { 1052 vma_iter_config(vmi, adjust->vm_start + adj_start, 1053 adjust->vm_end); 1054 } 1055 1056 if (vma_iter_prealloc(vmi, vma)) 1057 goto prealloc_fail; 1058 1059 init_multi_vma_prep(&vp, vma, adjust, remove, remove2); 1060 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 1061 vp.anon_vma != adjust->anon_vma); 1062 1063 vma_prepare(&vp); 1064 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); 1065 vma_set_range(vma, vma_start, vma_end, vma_pgoff); 1066 1067 if (vma_expanded) 1068 vma_iter_store(vmi, vma); 1069 1070 if (adj_start) { 1071 adjust->vm_start += adj_start; 1072 adjust->vm_pgoff += adj_start >> PAGE_SHIFT; 1073 if (adj_start < 0) { 1074 WARN_ON(vma_expanded); 1075 vma_iter_store(vmi, next); 1076 } 1077 } 1078 1079 vma_complete(&vp, vmi, mm); 1080 khugepaged_enter_vma(res, vm_flags); 1081 return res; 1082 1083 prealloc_fail: 1084 if (anon_dup) 1085 unlink_anon_vmas(anon_dup); 1086 1087 anon_vma_fail: 1088 vma_iter_set(vmi, addr); 1089 vma_iter_load(vmi); 1090 return NULL; 1091 } 1092 1093 /* 1094 * Rough compatibility check to quickly see if it's even worth looking 1095 * at sharing an anon_vma. 1096 * 1097 * They need to have the same vm_file, and the flags can only differ 1098 * in things that mprotect may change. 1099 * 1100 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1101 * we can merge the two vma's. For example, we refuse to merge a vma if 1102 * there is a vm_ops->close() function, because that indicates that the 1103 * driver is doing some kind of reference counting. But that doesn't 1104 * really matter for the anon_vma sharing case. 1105 */ 1106 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1107 { 1108 return a->vm_end == b->vm_start && 1109 mpol_equal(vma_policy(a), vma_policy(b)) && 1110 a->vm_file == b->vm_file && 1111 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1112 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1113 } 1114 1115 /* 1116 * Do some basic sanity checking to see if we can re-use the anon_vma 1117 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1118 * the same as 'old', the other will be the new one that is trying 1119 * to share the anon_vma. 1120 * 1121 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1122 * the anon_vma of 'old' is concurrently in the process of being set up 1123 * by another page fault trying to merge _that_. But that's ok: if it 1124 * is being set up, that automatically means that it will be a singleton 1125 * acceptable for merging, so we can do all of this optimistically. But 1126 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1127 * 1128 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1129 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1130 * is to return an anon_vma that is "complex" due to having gone through 1131 * a fork). 1132 * 1133 * We also make sure that the two vma's are compatible (adjacent, 1134 * and with the same memory policies). That's all stable, even with just 1135 * a read lock on the mmap_lock. 1136 */ 1137 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1138 { 1139 if (anon_vma_compatible(a, b)) { 1140 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1141 1142 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1143 return anon_vma; 1144 } 1145 return NULL; 1146 } 1147 1148 /* 1149 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1150 * neighbouring vmas for a suitable anon_vma, before it goes off 1151 * to allocate a new anon_vma. It checks because a repetitive 1152 * sequence of mprotects and faults may otherwise lead to distinct 1153 * anon_vmas being allocated, preventing vma merge in subsequent 1154 * mprotect. 1155 */ 1156 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1157 { 1158 struct anon_vma *anon_vma = NULL; 1159 struct vm_area_struct *prev, *next; 1160 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 1161 1162 /* Try next first. */ 1163 next = vma_iter_load(&vmi); 1164 if (next) { 1165 anon_vma = reusable_anon_vma(next, vma, next); 1166 if (anon_vma) 1167 return anon_vma; 1168 } 1169 1170 prev = vma_prev(&vmi); 1171 VM_BUG_ON_VMA(prev != vma, vma); 1172 prev = vma_prev(&vmi); 1173 /* Try prev next. */ 1174 if (prev) 1175 anon_vma = reusable_anon_vma(prev, prev, vma); 1176 1177 /* 1178 * We might reach here with anon_vma == NULL if we can't find 1179 * any reusable anon_vma. 1180 * There's no absolute need to look only at touching neighbours: 1181 * we could search further afield for "compatible" anon_vmas. 1182 * But it would probably just be a waste of time searching, 1183 * or lead to too many vmas hanging off the same anon_vma. 1184 * We're trying to allow mprotect remerging later on, 1185 * not trying to minimize memory used for anon_vmas. 1186 */ 1187 return anon_vma; 1188 } 1189 1190 /* 1191 * If a hint addr is less than mmap_min_addr change hint to be as 1192 * low as possible but still greater than mmap_min_addr 1193 */ 1194 static inline unsigned long round_hint_to_min(unsigned long hint) 1195 { 1196 hint &= PAGE_MASK; 1197 if (((void *)hint != NULL) && 1198 (hint < mmap_min_addr)) 1199 return PAGE_ALIGN(mmap_min_addr); 1200 return hint; 1201 } 1202 1203 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 1204 unsigned long bytes) 1205 { 1206 unsigned long locked_pages, limit_pages; 1207 1208 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1209 return true; 1210 1211 locked_pages = bytes >> PAGE_SHIFT; 1212 locked_pages += mm->locked_vm; 1213 1214 limit_pages = rlimit(RLIMIT_MEMLOCK); 1215 limit_pages >>= PAGE_SHIFT; 1216 1217 return locked_pages <= limit_pages; 1218 } 1219 1220 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 1221 { 1222 if (S_ISREG(inode->i_mode)) 1223 return MAX_LFS_FILESIZE; 1224 1225 if (S_ISBLK(inode->i_mode)) 1226 return MAX_LFS_FILESIZE; 1227 1228 if (S_ISSOCK(inode->i_mode)) 1229 return MAX_LFS_FILESIZE; 1230 1231 /* Special "we do even unsigned file positions" case */ 1232 if (file->f_mode & FMODE_UNSIGNED_OFFSET) 1233 return 0; 1234 1235 /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 1236 return ULONG_MAX; 1237 } 1238 1239 static inline bool file_mmap_ok(struct file *file, struct inode *inode, 1240 unsigned long pgoff, unsigned long len) 1241 { 1242 u64 maxsize = file_mmap_size_max(file, inode); 1243 1244 if (maxsize && len > maxsize) 1245 return false; 1246 maxsize -= len; 1247 if (pgoff > maxsize >> PAGE_SHIFT) 1248 return false; 1249 return true; 1250 } 1251 1252 /* 1253 * The caller must write-lock current->mm->mmap_lock. 1254 */ 1255 unsigned long do_mmap(struct file *file, unsigned long addr, 1256 unsigned long len, unsigned long prot, 1257 unsigned long flags, vm_flags_t vm_flags, 1258 unsigned long pgoff, unsigned long *populate, 1259 struct list_head *uf) 1260 { 1261 struct mm_struct *mm = current->mm; 1262 int pkey = 0; 1263 1264 *populate = 0; 1265 1266 if (!len) 1267 return -EINVAL; 1268 1269 /* 1270 * Does the application expect PROT_READ to imply PROT_EXEC? 1271 * 1272 * (the exception is when the underlying filesystem is noexec 1273 * mounted, in which case we don't add PROT_EXEC.) 1274 */ 1275 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 1276 if (!(file && path_noexec(&file->f_path))) 1277 prot |= PROT_EXEC; 1278 1279 /* force arch specific MAP_FIXED handling in get_unmapped_area */ 1280 if (flags & MAP_FIXED_NOREPLACE) 1281 flags |= MAP_FIXED; 1282 1283 if (!(flags & MAP_FIXED)) 1284 addr = round_hint_to_min(addr); 1285 1286 /* Careful about overflows.. */ 1287 len = PAGE_ALIGN(len); 1288 if (!len) 1289 return -ENOMEM; 1290 1291 /* offset overflow? */ 1292 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 1293 return -EOVERFLOW; 1294 1295 /* Too many mappings? */ 1296 if (mm->map_count > sysctl_max_map_count) 1297 return -ENOMEM; 1298 1299 /* 1300 * addr is returned from get_unmapped_area, 1301 * There are two cases: 1302 * 1> MAP_FIXED == false 1303 * unallocated memory, no need to check sealing. 1304 * 1> MAP_FIXED == true 1305 * sealing is checked inside mmap_region when 1306 * do_vmi_munmap is called. 1307 */ 1308 1309 if (prot == PROT_EXEC) { 1310 pkey = execute_only_pkey(mm); 1311 if (pkey < 0) 1312 pkey = 0; 1313 } 1314 1315 /* Do simple checking here so the lower-level routines won't have 1316 * to. we assume access permissions have been handled by the open 1317 * of the memory object, so we don't do any here. 1318 */ 1319 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 1320 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1321 1322 /* Obtain the address to map to. we verify (or select) it and ensure 1323 * that it represents a valid section of the address space. 1324 */ 1325 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); 1326 if (IS_ERR_VALUE(addr)) 1327 return addr; 1328 1329 if (flags & MAP_FIXED_NOREPLACE) { 1330 if (find_vma_intersection(mm, addr, addr + len)) 1331 return -EEXIST; 1332 } 1333 1334 if (flags & MAP_LOCKED) 1335 if (!can_do_mlock()) 1336 return -EPERM; 1337 1338 if (!mlock_future_ok(mm, vm_flags, len)) 1339 return -EAGAIN; 1340 1341 if (file) { 1342 struct inode *inode = file_inode(file); 1343 unsigned long flags_mask; 1344 1345 if (!file_mmap_ok(file, inode, pgoff, len)) 1346 return -EOVERFLOW; 1347 1348 flags_mask = LEGACY_MAP_MASK; 1349 if (file->f_op->fop_flags & FOP_MMAP_SYNC) 1350 flags_mask |= MAP_SYNC; 1351 1352 switch (flags & MAP_TYPE) { 1353 case MAP_SHARED: 1354 /* 1355 * Force use of MAP_SHARED_VALIDATE with non-legacy 1356 * flags. E.g. MAP_SYNC is dangerous to use with 1357 * MAP_SHARED as you don't know which consistency model 1358 * you will get. We silently ignore unsupported flags 1359 * with MAP_SHARED to preserve backward compatibility. 1360 */ 1361 flags &= LEGACY_MAP_MASK; 1362 fallthrough; 1363 case MAP_SHARED_VALIDATE: 1364 if (flags & ~flags_mask) 1365 return -EOPNOTSUPP; 1366 if (prot & PROT_WRITE) { 1367 if (!(file->f_mode & FMODE_WRITE)) 1368 return -EACCES; 1369 if (IS_SWAPFILE(file->f_mapping->host)) 1370 return -ETXTBSY; 1371 } 1372 1373 /* 1374 * Make sure we don't allow writing to an append-only 1375 * file.. 1376 */ 1377 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 1378 return -EACCES; 1379 1380 vm_flags |= VM_SHARED | VM_MAYSHARE; 1381 if (!(file->f_mode & FMODE_WRITE)) 1382 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 1383 fallthrough; 1384 case MAP_PRIVATE: 1385 if (!(file->f_mode & FMODE_READ)) 1386 return -EACCES; 1387 if (path_noexec(&file->f_path)) { 1388 if (vm_flags & VM_EXEC) 1389 return -EPERM; 1390 vm_flags &= ~VM_MAYEXEC; 1391 } 1392 1393 if (!file->f_op->mmap) 1394 return -ENODEV; 1395 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1396 return -EINVAL; 1397 break; 1398 1399 default: 1400 return -EINVAL; 1401 } 1402 } else { 1403 switch (flags & MAP_TYPE) { 1404 case MAP_SHARED: 1405 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1406 return -EINVAL; 1407 /* 1408 * Ignore pgoff. 1409 */ 1410 pgoff = 0; 1411 vm_flags |= VM_SHARED | VM_MAYSHARE; 1412 break; 1413 case MAP_DROPPABLE: 1414 if (VM_DROPPABLE == VM_NONE) 1415 return -ENOTSUPP; 1416 /* 1417 * A locked or stack area makes no sense to be droppable. 1418 * 1419 * Also, since droppable pages can just go away at any time 1420 * it makes no sense to copy them on fork or dump them. 1421 * 1422 * And don't attempt to combine with hugetlb for now. 1423 */ 1424 if (flags & (MAP_LOCKED | MAP_HUGETLB)) 1425 return -EINVAL; 1426 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) 1427 return -EINVAL; 1428 1429 vm_flags |= VM_DROPPABLE; 1430 1431 /* 1432 * If the pages can be dropped, then it doesn't make 1433 * sense to reserve them. 1434 */ 1435 vm_flags |= VM_NORESERVE; 1436 1437 /* 1438 * Likewise, they're volatile enough that they 1439 * shouldn't survive forks or coredumps. 1440 */ 1441 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; 1442 fallthrough; 1443 case MAP_PRIVATE: 1444 /* 1445 * Set pgoff according to addr for anon_vma. 1446 */ 1447 pgoff = addr >> PAGE_SHIFT; 1448 break; 1449 default: 1450 return -EINVAL; 1451 } 1452 } 1453 1454 /* 1455 * Set 'VM_NORESERVE' if we should not account for the 1456 * memory use of this mapping. 1457 */ 1458 if (flags & MAP_NORESERVE) { 1459 /* We honor MAP_NORESERVE if allowed to overcommit */ 1460 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1461 vm_flags |= VM_NORESERVE; 1462 1463 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 1464 if (file && is_file_hugepages(file)) 1465 vm_flags |= VM_NORESERVE; 1466 } 1467 1468 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 1469 if (!IS_ERR_VALUE(addr) && 1470 ((vm_flags & VM_LOCKED) || 1471 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 1472 *populate = len; 1473 return addr; 1474 } 1475 1476 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 1477 unsigned long prot, unsigned long flags, 1478 unsigned long fd, unsigned long pgoff) 1479 { 1480 struct file *file = NULL; 1481 unsigned long retval; 1482 1483 if (!(flags & MAP_ANONYMOUS)) { 1484 audit_mmap_fd(fd, flags); 1485 file = fget(fd); 1486 if (!file) 1487 return -EBADF; 1488 if (is_file_hugepages(file)) { 1489 len = ALIGN(len, huge_page_size(hstate_file(file))); 1490 } else if (unlikely(flags & MAP_HUGETLB)) { 1491 retval = -EINVAL; 1492 goto out_fput; 1493 } 1494 } else if (flags & MAP_HUGETLB) { 1495 struct hstate *hs; 1496 1497 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1498 if (!hs) 1499 return -EINVAL; 1500 1501 len = ALIGN(len, huge_page_size(hs)); 1502 /* 1503 * VM_NORESERVE is used because the reservations will be 1504 * taken when vm_ops->mmap() is called 1505 */ 1506 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 1507 VM_NORESERVE, 1508 HUGETLB_ANONHUGE_INODE, 1509 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1510 if (IS_ERR(file)) 1511 return PTR_ERR(file); 1512 } 1513 1514 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1515 out_fput: 1516 if (file) 1517 fput(file); 1518 return retval; 1519 } 1520 1521 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1522 unsigned long, prot, unsigned long, flags, 1523 unsigned long, fd, unsigned long, pgoff) 1524 { 1525 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 1526 } 1527 1528 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1529 struct mmap_arg_struct { 1530 unsigned long addr; 1531 unsigned long len; 1532 unsigned long prot; 1533 unsigned long flags; 1534 unsigned long fd; 1535 unsigned long offset; 1536 }; 1537 1538 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1539 { 1540 struct mmap_arg_struct a; 1541 1542 if (copy_from_user(&a, arg, sizeof(a))) 1543 return -EFAULT; 1544 if (offset_in_page(a.offset)) 1545 return -EINVAL; 1546 1547 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1548 a.offset >> PAGE_SHIFT); 1549 } 1550 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1551 1552 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 1553 { 1554 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 1555 } 1556 1557 static bool vma_is_shared_writable(struct vm_area_struct *vma) 1558 { 1559 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 1560 (VM_WRITE | VM_SHARED); 1561 } 1562 1563 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 1564 { 1565 /* No managed pages to writeback. */ 1566 if (vma->vm_flags & VM_PFNMAP) 1567 return false; 1568 1569 return vma->vm_file && vma->vm_file->f_mapping && 1570 mapping_can_writeback(vma->vm_file->f_mapping); 1571 } 1572 1573 /* 1574 * Does this VMA require the underlying folios to have their dirty state 1575 * tracked? 1576 */ 1577 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 1578 { 1579 /* Only shared, writable VMAs require dirty tracking. */ 1580 if (!vma_is_shared_writable(vma)) 1581 return false; 1582 1583 /* Does the filesystem need to be notified? */ 1584 if (vm_ops_needs_writenotify(vma->vm_ops)) 1585 return true; 1586 1587 /* 1588 * Even if the filesystem doesn't indicate a need for writenotify, if it 1589 * can writeback, dirty tracking is still required. 1590 */ 1591 return vma_fs_can_writeback(vma); 1592 } 1593 1594 /* 1595 * Some shared mappings will want the pages marked read-only 1596 * to track write events. If so, we'll downgrade vm_page_prot 1597 * to the private version (using protection_map[] without the 1598 * VM_SHARED bit). 1599 */ 1600 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1601 { 1602 /* If it was private or non-writable, the write bit is already clear */ 1603 if (!vma_is_shared_writable(vma)) 1604 return false; 1605 1606 /* The backer wishes to know when pages are first written to? */ 1607 if (vm_ops_needs_writenotify(vma->vm_ops)) 1608 return true; 1609 1610 /* The open routine did something to the protections that pgprot_modify 1611 * won't preserve? */ 1612 if (pgprot_val(vm_page_prot) != 1613 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 1614 return false; 1615 1616 /* 1617 * Do we need to track softdirty? hugetlb does not support softdirty 1618 * tracking yet. 1619 */ 1620 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1621 return true; 1622 1623 /* Do we need write faults for uffd-wp tracking? */ 1624 if (userfaultfd_wp(vma)) 1625 return true; 1626 1627 /* Can the mapping track the dirty pages? */ 1628 return vma_fs_can_writeback(vma); 1629 } 1630 1631 /* 1632 * We account for memory if it's a private writeable mapping, 1633 * not hugepages and VM_NORESERVE wasn't set. 1634 */ 1635 static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags) 1636 { 1637 /* 1638 * hugetlb has its own accounting separate from the core VM 1639 * VM_HUGETLB may not be set yet so we cannot check for that flag. 1640 */ 1641 if (file && is_file_hugepages(file)) 1642 return false; 1643 1644 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1645 } 1646 1647 /** 1648 * unmapped_area() - Find an area between the low_limit and the high_limit with 1649 * the correct alignment and offset, all from @info. Note: current->mm is used 1650 * for the search. 1651 * 1652 * @info: The unmapped area information including the range [low_limit - 1653 * high_limit), the alignment offset and mask. 1654 * 1655 * Return: A memory address or -ENOMEM. 1656 */ 1657 static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 1658 { 1659 unsigned long length, gap; 1660 unsigned long low_limit, high_limit; 1661 struct vm_area_struct *tmp; 1662 VMA_ITERATOR(vmi, current->mm, 0); 1663 1664 /* Adjust search length to account for worst case alignment overhead */ 1665 length = info->length + info->align_mask + info->start_gap; 1666 if (length < info->length) 1667 return -ENOMEM; 1668 1669 low_limit = info->low_limit; 1670 if (low_limit < mmap_min_addr) 1671 low_limit = mmap_min_addr; 1672 high_limit = info->high_limit; 1673 retry: 1674 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) 1675 return -ENOMEM; 1676 1677 /* 1678 * Adjust for the gap first so it doesn't interfere with the 1679 * later alignment. The first step is the minimum needed to 1680 * fulill the start gap, the next steps is the minimum to align 1681 * that. It is the minimum needed to fulill both. 1682 */ 1683 gap = vma_iter_addr(&vmi) + info->start_gap; 1684 gap += (info->align_offset - gap) & info->align_mask; 1685 tmp = vma_next(&vmi); 1686 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 1687 if (vm_start_gap(tmp) < gap + length - 1) { 1688 low_limit = tmp->vm_end; 1689 vma_iter_reset(&vmi); 1690 goto retry; 1691 } 1692 } else { 1693 tmp = vma_prev(&vmi); 1694 if (tmp && vm_end_gap(tmp) > gap) { 1695 low_limit = vm_end_gap(tmp); 1696 vma_iter_reset(&vmi); 1697 goto retry; 1698 } 1699 } 1700 1701 return gap; 1702 } 1703 1704 /** 1705 * unmapped_area_topdown() - Find an area between the low_limit and the 1706 * high_limit with the correct alignment and offset at the highest available 1707 * address, all from @info. Note: current->mm is used for the search. 1708 * 1709 * @info: The unmapped area information including the range [low_limit - 1710 * high_limit), the alignment offset and mask. 1711 * 1712 * Return: A memory address or -ENOMEM. 1713 */ 1714 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 1715 { 1716 unsigned long length, gap, gap_end; 1717 unsigned long low_limit, high_limit; 1718 struct vm_area_struct *tmp; 1719 VMA_ITERATOR(vmi, current->mm, 0); 1720 1721 /* Adjust search length to account for worst case alignment overhead */ 1722 length = info->length + info->align_mask + info->start_gap; 1723 if (length < info->length) 1724 return -ENOMEM; 1725 1726 low_limit = info->low_limit; 1727 if (low_limit < mmap_min_addr) 1728 low_limit = mmap_min_addr; 1729 high_limit = info->high_limit; 1730 retry: 1731 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) 1732 return -ENOMEM; 1733 1734 gap = vma_iter_end(&vmi) - info->length; 1735 gap -= (gap - info->align_offset) & info->align_mask; 1736 gap_end = vma_iter_end(&vmi); 1737 tmp = vma_next(&vmi); 1738 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 1739 if (vm_start_gap(tmp) < gap_end) { 1740 high_limit = vm_start_gap(tmp); 1741 vma_iter_reset(&vmi); 1742 goto retry; 1743 } 1744 } else { 1745 tmp = vma_prev(&vmi); 1746 if (tmp && vm_end_gap(tmp) > gap) { 1747 high_limit = tmp->vm_start; 1748 vma_iter_reset(&vmi); 1749 goto retry; 1750 } 1751 } 1752 1753 return gap; 1754 } 1755 1756 /* 1757 * Search for an unmapped address range. 1758 * 1759 * We are looking for a range that: 1760 * - does not intersect with any VMA; 1761 * - is contained within the [low_limit, high_limit) interval; 1762 * - is at least the desired size. 1763 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 1764 */ 1765 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 1766 { 1767 unsigned long addr; 1768 1769 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 1770 addr = unmapped_area_topdown(info); 1771 else 1772 addr = unmapped_area(info); 1773 1774 trace_vm_unmapped_area(addr, info); 1775 return addr; 1776 } 1777 1778 /* Get an address range which is currently unmapped. 1779 * For shmat() with addr=0. 1780 * 1781 * Ugly calling convention alert: 1782 * Return value with the low bits set means error value, 1783 * ie 1784 * if (ret & ~PAGE_MASK) 1785 * error = ret; 1786 * 1787 * This function "knows" that -ENOMEM has the bits set. 1788 */ 1789 unsigned long 1790 generic_get_unmapped_area(struct file *filp, unsigned long addr, 1791 unsigned long len, unsigned long pgoff, 1792 unsigned long flags) 1793 { 1794 struct mm_struct *mm = current->mm; 1795 struct vm_area_struct *vma, *prev; 1796 struct vm_unmapped_area_info info = {}; 1797 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 1798 1799 if (len > mmap_end - mmap_min_addr) 1800 return -ENOMEM; 1801 1802 if (flags & MAP_FIXED) 1803 return addr; 1804 1805 if (addr) { 1806 addr = PAGE_ALIGN(addr); 1807 vma = find_vma_prev(mm, addr, &prev); 1808 if (mmap_end - len >= addr && addr >= mmap_min_addr && 1809 (!vma || addr + len <= vm_start_gap(vma)) && 1810 (!prev || addr >= vm_end_gap(prev))) 1811 return addr; 1812 } 1813 1814 info.length = len; 1815 info.low_limit = mm->mmap_base; 1816 info.high_limit = mmap_end; 1817 return vm_unmapped_area(&info); 1818 } 1819 1820 #ifndef HAVE_ARCH_UNMAPPED_AREA 1821 unsigned long 1822 arch_get_unmapped_area(struct file *filp, unsigned long addr, 1823 unsigned long len, unsigned long pgoff, 1824 unsigned long flags) 1825 { 1826 return generic_get_unmapped_area(filp, addr, len, pgoff, flags); 1827 } 1828 #endif 1829 1830 /* 1831 * This mmap-allocator allocates new areas top-down from below the 1832 * stack's low limit (the base): 1833 */ 1834 unsigned long 1835 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 1836 unsigned long len, unsigned long pgoff, 1837 unsigned long flags) 1838 { 1839 struct vm_area_struct *vma, *prev; 1840 struct mm_struct *mm = current->mm; 1841 struct vm_unmapped_area_info info = {}; 1842 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 1843 1844 /* requested length too big for entire address space */ 1845 if (len > mmap_end - mmap_min_addr) 1846 return -ENOMEM; 1847 1848 if (flags & MAP_FIXED) 1849 return addr; 1850 1851 /* requesting a specific address */ 1852 if (addr) { 1853 addr = PAGE_ALIGN(addr); 1854 vma = find_vma_prev(mm, addr, &prev); 1855 if (mmap_end - len >= addr && addr >= mmap_min_addr && 1856 (!vma || addr + len <= vm_start_gap(vma)) && 1857 (!prev || addr >= vm_end_gap(prev))) 1858 return addr; 1859 } 1860 1861 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 1862 info.length = len; 1863 info.low_limit = PAGE_SIZE; 1864 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 1865 addr = vm_unmapped_area(&info); 1866 1867 /* 1868 * A failed mmap() very likely causes application failure, 1869 * so fall back to the bottom-up function here. This scenario 1870 * can happen with large stack limits and large mmap() 1871 * allocations. 1872 */ 1873 if (offset_in_page(addr)) { 1874 VM_BUG_ON(addr != -ENOMEM); 1875 info.flags = 0; 1876 info.low_limit = TASK_UNMAPPED_BASE; 1877 info.high_limit = mmap_end; 1878 addr = vm_unmapped_area(&info); 1879 } 1880 1881 return addr; 1882 } 1883 1884 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1885 unsigned long 1886 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 1887 unsigned long len, unsigned long pgoff, 1888 unsigned long flags) 1889 { 1890 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 1891 } 1892 #endif 1893 1894 #ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS 1895 unsigned long 1896 arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, 1897 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 1898 { 1899 return arch_get_unmapped_area(filp, addr, len, pgoff, flags); 1900 } 1901 1902 unsigned long 1903 arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, 1904 unsigned long len, unsigned long pgoff, 1905 unsigned long flags, vm_flags_t vm_flags) 1906 { 1907 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 1908 } 1909 #endif 1910 1911 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, 1912 unsigned long addr, unsigned long len, 1913 unsigned long pgoff, unsigned long flags, 1914 vm_flags_t vm_flags) 1915 { 1916 if (test_bit(MMF_TOPDOWN, &mm->flags)) 1917 return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, 1918 flags, vm_flags); 1919 return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags); 1920 } 1921 1922 unsigned long 1923 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1924 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) 1925 { 1926 unsigned long (*get_area)(struct file *, unsigned long, 1927 unsigned long, unsigned long, unsigned long) 1928 = NULL; 1929 1930 unsigned long error = arch_mmap_check(addr, len, flags); 1931 if (error) 1932 return error; 1933 1934 /* Careful about overflows.. */ 1935 if (len > TASK_SIZE) 1936 return -ENOMEM; 1937 1938 if (file) { 1939 if (file->f_op->get_unmapped_area) 1940 get_area = file->f_op->get_unmapped_area; 1941 } else if (flags & MAP_SHARED) { 1942 /* 1943 * mmap_region() will call shmem_zero_setup() to create a file, 1944 * so use shmem's get_unmapped_area in case it can be huge. 1945 */ 1946 get_area = shmem_get_unmapped_area; 1947 } 1948 1949 /* Always treat pgoff as zero for anonymous memory. */ 1950 if (!file) 1951 pgoff = 0; 1952 1953 if (get_area) { 1954 addr = get_area(file, addr, len, pgoff, flags); 1955 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1956 /* Ensures that larger anonymous mappings are THP aligned. */ 1957 addr = thp_get_unmapped_area_vmflags(file, addr, len, 1958 pgoff, flags, vm_flags); 1959 } else { 1960 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, 1961 pgoff, flags, vm_flags); 1962 } 1963 if (IS_ERR_VALUE(addr)) 1964 return addr; 1965 1966 if (addr > TASK_SIZE - len) 1967 return -ENOMEM; 1968 if (offset_in_page(addr)) 1969 return -EINVAL; 1970 1971 error = security_mmap_addr(addr); 1972 return error ? error : addr; 1973 } 1974 1975 unsigned long 1976 mm_get_unmapped_area(struct mm_struct *mm, struct file *file, 1977 unsigned long addr, unsigned long len, 1978 unsigned long pgoff, unsigned long flags) 1979 { 1980 if (test_bit(MMF_TOPDOWN, &mm->flags)) 1981 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags); 1982 return arch_get_unmapped_area(file, addr, len, pgoff, flags); 1983 } 1984 EXPORT_SYMBOL(mm_get_unmapped_area); 1985 1986 /** 1987 * find_vma_intersection() - Look up the first VMA which intersects the interval 1988 * @mm: The process address space. 1989 * @start_addr: The inclusive start user address. 1990 * @end_addr: The exclusive end user address. 1991 * 1992 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes 1993 * start_addr < end_addr. 1994 */ 1995 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 1996 unsigned long start_addr, 1997 unsigned long end_addr) 1998 { 1999 unsigned long index = start_addr; 2000 2001 mmap_assert_locked(mm); 2002 return mt_find(&mm->mm_mt, &index, end_addr - 1); 2003 } 2004 EXPORT_SYMBOL(find_vma_intersection); 2005 2006 /** 2007 * find_vma() - Find the VMA for a given address, or the next VMA. 2008 * @mm: The mm_struct to check 2009 * @addr: The address 2010 * 2011 * Returns: The VMA associated with addr, or the next VMA. 2012 * May return %NULL in the case of no VMA at addr or above. 2013 */ 2014 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 2015 { 2016 unsigned long index = addr; 2017 2018 mmap_assert_locked(mm); 2019 return mt_find(&mm->mm_mt, &index, ULONG_MAX); 2020 } 2021 EXPORT_SYMBOL(find_vma); 2022 2023 /** 2024 * find_vma_prev() - Find the VMA for a given address, or the next vma and 2025 * set %pprev to the previous VMA, if any. 2026 * @mm: The mm_struct to check 2027 * @addr: The address 2028 * @pprev: The pointer to set to the previous VMA 2029 * 2030 * Note that RCU lock is missing here since the external mmap_lock() is used 2031 * instead. 2032 * 2033 * Returns: The VMA associated with @addr, or the next vma. 2034 * May return %NULL in the case of no vma at addr or above. 2035 */ 2036 struct vm_area_struct * 2037 find_vma_prev(struct mm_struct *mm, unsigned long addr, 2038 struct vm_area_struct **pprev) 2039 { 2040 struct vm_area_struct *vma; 2041 VMA_ITERATOR(vmi, mm, addr); 2042 2043 vma = vma_iter_load(&vmi); 2044 *pprev = vma_prev(&vmi); 2045 if (!vma) 2046 vma = vma_next(&vmi); 2047 return vma; 2048 } 2049 2050 /* 2051 * Verify that the stack growth is acceptable and 2052 * update accounting. This is shared with both the 2053 * grow-up and grow-down cases. 2054 */ 2055 static int acct_stack_growth(struct vm_area_struct *vma, 2056 unsigned long size, unsigned long grow) 2057 { 2058 struct mm_struct *mm = vma->vm_mm; 2059 unsigned long new_start; 2060 2061 /* address space limit tests */ 2062 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2063 return -ENOMEM; 2064 2065 /* Stack limit test */ 2066 if (size > rlimit(RLIMIT_STACK)) 2067 return -ENOMEM; 2068 2069 /* mlock limit tests */ 2070 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) 2071 return -ENOMEM; 2072 2073 /* Check to ensure the stack will not grow into a hugetlb-only region */ 2074 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 2075 vma->vm_end - size; 2076 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 2077 return -EFAULT; 2078 2079 /* 2080 * Overcommit.. This must be the final test, as it will 2081 * update security statistics. 2082 */ 2083 if (security_vm_enough_memory_mm(mm, grow)) 2084 return -ENOMEM; 2085 2086 return 0; 2087 } 2088 2089 #if defined(CONFIG_STACK_GROWSUP) 2090 /* 2091 * PA-RISC uses this for its stack. 2092 * vma is the last one with address > vma->vm_end. Have to extend vma. 2093 */ 2094 static int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2095 { 2096 struct mm_struct *mm = vma->vm_mm; 2097 struct vm_area_struct *next; 2098 unsigned long gap_addr; 2099 int error = 0; 2100 VMA_ITERATOR(vmi, mm, vma->vm_start); 2101 2102 if (!(vma->vm_flags & VM_GROWSUP)) 2103 return -EFAULT; 2104 2105 /* Guard against exceeding limits of the address space. */ 2106 address &= PAGE_MASK; 2107 if (address >= (TASK_SIZE & PAGE_MASK)) 2108 return -ENOMEM; 2109 address += PAGE_SIZE; 2110 2111 /* Enforce stack_guard_gap */ 2112 gap_addr = address + stack_guard_gap; 2113 2114 /* Guard against overflow */ 2115 if (gap_addr < address || gap_addr > TASK_SIZE) 2116 gap_addr = TASK_SIZE; 2117 2118 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 2119 if (next && vma_is_accessible(next)) { 2120 if (!(next->vm_flags & VM_GROWSUP)) 2121 return -ENOMEM; 2122 /* Check that both stack segments have the same anon_vma? */ 2123 } 2124 2125 if (next) 2126 vma_iter_prev_range_limit(&vmi, address); 2127 2128 vma_iter_config(&vmi, vma->vm_start, address); 2129 if (vma_iter_prealloc(&vmi, vma)) 2130 return -ENOMEM; 2131 2132 /* We must make sure the anon_vma is allocated. */ 2133 if (unlikely(anon_vma_prepare(vma))) { 2134 vma_iter_free(&vmi); 2135 return -ENOMEM; 2136 } 2137 2138 /* Lock the VMA before expanding to prevent concurrent page faults */ 2139 vma_start_write(vma); 2140 /* 2141 * vma->vm_start/vm_end cannot change under us because the caller 2142 * is required to hold the mmap_lock in read mode. We need the 2143 * anon_vma lock to serialize against concurrent expand_stacks. 2144 */ 2145 anon_vma_lock_write(vma->anon_vma); 2146 2147 /* Somebody else might have raced and expanded it already */ 2148 if (address > vma->vm_end) { 2149 unsigned long size, grow; 2150 2151 size = address - vma->vm_start; 2152 grow = (address - vma->vm_end) >> PAGE_SHIFT; 2153 2154 error = -ENOMEM; 2155 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 2156 error = acct_stack_growth(vma, size, grow); 2157 if (!error) { 2158 /* 2159 * We only hold a shared mmap_lock lock here, so 2160 * we need to protect against concurrent vma 2161 * expansions. anon_vma_lock_write() doesn't 2162 * help here, as we don't guarantee that all 2163 * growable vmas in a mm share the same root 2164 * anon vma. So, we reuse mm->page_table_lock 2165 * to guard against concurrent vma expansions. 2166 */ 2167 spin_lock(&mm->page_table_lock); 2168 if (vma->vm_flags & VM_LOCKED) 2169 mm->locked_vm += grow; 2170 vm_stat_account(mm, vma->vm_flags, grow); 2171 anon_vma_interval_tree_pre_update_vma(vma); 2172 vma->vm_end = address; 2173 /* Overwrite old entry in mtree. */ 2174 vma_iter_store(&vmi, vma); 2175 anon_vma_interval_tree_post_update_vma(vma); 2176 spin_unlock(&mm->page_table_lock); 2177 2178 perf_event_mmap(vma); 2179 } 2180 } 2181 } 2182 anon_vma_unlock_write(vma->anon_vma); 2183 vma_iter_free(&vmi); 2184 validate_mm(mm); 2185 return error; 2186 } 2187 #endif /* CONFIG_STACK_GROWSUP */ 2188 2189 /* 2190 * vma is the first one with address < vma->vm_start. Have to extend vma. 2191 * mmap_lock held for writing. 2192 */ 2193 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 2194 { 2195 struct mm_struct *mm = vma->vm_mm; 2196 struct vm_area_struct *prev; 2197 int error = 0; 2198 VMA_ITERATOR(vmi, mm, vma->vm_start); 2199 2200 if (!(vma->vm_flags & VM_GROWSDOWN)) 2201 return -EFAULT; 2202 2203 address &= PAGE_MASK; 2204 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 2205 return -EPERM; 2206 2207 /* Enforce stack_guard_gap */ 2208 prev = vma_prev(&vmi); 2209 /* Check that both stack segments have the same anon_vma? */ 2210 if (prev) { 2211 if (!(prev->vm_flags & VM_GROWSDOWN) && 2212 vma_is_accessible(prev) && 2213 (address - prev->vm_end < stack_guard_gap)) 2214 return -ENOMEM; 2215 } 2216 2217 if (prev) 2218 vma_iter_next_range_limit(&vmi, vma->vm_start); 2219 2220 vma_iter_config(&vmi, address, vma->vm_end); 2221 if (vma_iter_prealloc(&vmi, vma)) 2222 return -ENOMEM; 2223 2224 /* We must make sure the anon_vma is allocated. */ 2225 if (unlikely(anon_vma_prepare(vma))) { 2226 vma_iter_free(&vmi); 2227 return -ENOMEM; 2228 } 2229 2230 /* Lock the VMA before expanding to prevent concurrent page faults */ 2231 vma_start_write(vma); 2232 /* 2233 * vma->vm_start/vm_end cannot change under us because the caller 2234 * is required to hold the mmap_lock in read mode. We need the 2235 * anon_vma lock to serialize against concurrent expand_stacks. 2236 */ 2237 anon_vma_lock_write(vma->anon_vma); 2238 2239 /* Somebody else might have raced and expanded it already */ 2240 if (address < vma->vm_start) { 2241 unsigned long size, grow; 2242 2243 size = vma->vm_end - address; 2244 grow = (vma->vm_start - address) >> PAGE_SHIFT; 2245 2246 error = -ENOMEM; 2247 if (grow <= vma->vm_pgoff) { 2248 error = acct_stack_growth(vma, size, grow); 2249 if (!error) { 2250 /* 2251 * We only hold a shared mmap_lock lock here, so 2252 * we need to protect against concurrent vma 2253 * expansions. anon_vma_lock_write() doesn't 2254 * help here, as we don't guarantee that all 2255 * growable vmas in a mm share the same root 2256 * anon vma. So, we reuse mm->page_table_lock 2257 * to guard against concurrent vma expansions. 2258 */ 2259 spin_lock(&mm->page_table_lock); 2260 if (vma->vm_flags & VM_LOCKED) 2261 mm->locked_vm += grow; 2262 vm_stat_account(mm, vma->vm_flags, grow); 2263 anon_vma_interval_tree_pre_update_vma(vma); 2264 vma->vm_start = address; 2265 vma->vm_pgoff -= grow; 2266 /* Overwrite old entry in mtree. */ 2267 vma_iter_store(&vmi, vma); 2268 anon_vma_interval_tree_post_update_vma(vma); 2269 spin_unlock(&mm->page_table_lock); 2270 2271 perf_event_mmap(vma); 2272 } 2273 } 2274 } 2275 anon_vma_unlock_write(vma->anon_vma); 2276 vma_iter_free(&vmi); 2277 validate_mm(mm); 2278 return error; 2279 } 2280 2281 /* enforced gap between the expanding stack and other mappings. */ 2282 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 2283 2284 static int __init cmdline_parse_stack_guard_gap(char *p) 2285 { 2286 unsigned long val; 2287 char *endptr; 2288 2289 val = simple_strtoul(p, &endptr, 10); 2290 if (!*endptr) 2291 stack_guard_gap = val << PAGE_SHIFT; 2292 2293 return 1; 2294 } 2295 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 2296 2297 #ifdef CONFIG_STACK_GROWSUP 2298 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 2299 { 2300 return expand_upwards(vma, address); 2301 } 2302 2303 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 2304 { 2305 struct vm_area_struct *vma, *prev; 2306 2307 addr &= PAGE_MASK; 2308 vma = find_vma_prev(mm, addr, &prev); 2309 if (vma && (vma->vm_start <= addr)) 2310 return vma; 2311 if (!prev) 2312 return NULL; 2313 if (expand_stack_locked(prev, addr)) 2314 return NULL; 2315 if (prev->vm_flags & VM_LOCKED) 2316 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 2317 return prev; 2318 } 2319 #else 2320 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 2321 { 2322 return expand_downwards(vma, address); 2323 } 2324 2325 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 2326 { 2327 struct vm_area_struct *vma; 2328 unsigned long start; 2329 2330 addr &= PAGE_MASK; 2331 vma = find_vma(mm, addr); 2332 if (!vma) 2333 return NULL; 2334 if (vma->vm_start <= addr) 2335 return vma; 2336 start = vma->vm_start; 2337 if (expand_stack_locked(vma, addr)) 2338 return NULL; 2339 if (vma->vm_flags & VM_LOCKED) 2340 populate_vma_page_range(vma, addr, start, NULL); 2341 return vma; 2342 } 2343 #endif 2344 2345 #if defined(CONFIG_STACK_GROWSUP) 2346 2347 #define vma_expand_up(vma,addr) expand_upwards(vma, addr) 2348 #define vma_expand_down(vma, addr) (-EFAULT) 2349 2350 #else 2351 2352 #define vma_expand_up(vma,addr) (-EFAULT) 2353 #define vma_expand_down(vma, addr) expand_downwards(vma, addr) 2354 2355 #endif 2356 2357 /* 2358 * expand_stack(): legacy interface for page faulting. Don't use unless 2359 * you have to. 2360 * 2361 * This is called with the mm locked for reading, drops the lock, takes 2362 * the lock for writing, tries to look up a vma again, expands it if 2363 * necessary, and downgrades the lock to reading again. 2364 * 2365 * If no vma is found or it can't be expanded, it returns NULL and has 2366 * dropped the lock. 2367 */ 2368 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) 2369 { 2370 struct vm_area_struct *vma, *prev; 2371 2372 mmap_read_unlock(mm); 2373 if (mmap_write_lock_killable(mm)) 2374 return NULL; 2375 2376 vma = find_vma_prev(mm, addr, &prev); 2377 if (vma && vma->vm_start <= addr) 2378 goto success; 2379 2380 if (prev && !vma_expand_up(prev, addr)) { 2381 vma = prev; 2382 goto success; 2383 } 2384 2385 if (vma && !vma_expand_down(vma, addr)) 2386 goto success; 2387 2388 mmap_write_unlock(mm); 2389 return NULL; 2390 2391 success: 2392 mmap_write_downgrade(mm); 2393 return vma; 2394 } 2395 2396 /* 2397 * Ok - we have the memory areas we should free on a maple tree so release them, 2398 * and do the vma updates. 2399 * 2400 * Called with the mm semaphore held. 2401 */ 2402 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) 2403 { 2404 unsigned long nr_accounted = 0; 2405 struct vm_area_struct *vma; 2406 2407 /* Update high watermark before we lower total_vm */ 2408 update_hiwater_vm(mm); 2409 mas_for_each(mas, vma, ULONG_MAX) { 2410 long nrpages = vma_pages(vma); 2411 2412 if (vma->vm_flags & VM_ACCOUNT) 2413 nr_accounted += nrpages; 2414 vm_stat_account(mm, vma->vm_flags, -nrpages); 2415 remove_vma(vma, false); 2416 } 2417 vm_unacct_memory(nr_accounted); 2418 } 2419 2420 /* 2421 * Get rid of page table information in the indicated region. 2422 * 2423 * Called with the mm semaphore held. 2424 */ 2425 static void unmap_region(struct mm_struct *mm, struct ma_state *mas, 2426 struct vm_area_struct *vma, struct vm_area_struct *prev, 2427 struct vm_area_struct *next, unsigned long start, 2428 unsigned long end, unsigned long tree_end, bool mm_wr_locked) 2429 { 2430 struct mmu_gather tlb; 2431 unsigned long mt_start = mas->index; 2432 2433 lru_add_drain(); 2434 tlb_gather_mmu(&tlb, mm); 2435 update_hiwater_rss(mm); 2436 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); 2437 mas_set(mas, mt_start); 2438 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2439 next ? next->vm_start : USER_PGTABLES_CEILING, 2440 mm_wr_locked); 2441 tlb_finish_mmu(&tlb); 2442 } 2443 2444 /* 2445 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 2446 * has already been checked or doesn't make sense to fail. 2447 * VMA Iterator will point to the end VMA. 2448 */ 2449 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 2450 unsigned long addr, int new_below) 2451 { 2452 struct vma_prepare vp; 2453 struct vm_area_struct *new; 2454 int err; 2455 2456 WARN_ON(vma->vm_start >= addr); 2457 WARN_ON(vma->vm_end <= addr); 2458 2459 if (vma->vm_ops && vma->vm_ops->may_split) { 2460 err = vma->vm_ops->may_split(vma, addr); 2461 if (err) 2462 return err; 2463 } 2464 2465 new = vm_area_dup(vma); 2466 if (!new) 2467 return -ENOMEM; 2468 2469 if (new_below) { 2470 new->vm_end = addr; 2471 } else { 2472 new->vm_start = addr; 2473 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 2474 } 2475 2476 err = -ENOMEM; 2477 vma_iter_config(vmi, new->vm_start, new->vm_end); 2478 if (vma_iter_prealloc(vmi, new)) 2479 goto out_free_vma; 2480 2481 err = vma_dup_policy(vma, new); 2482 if (err) 2483 goto out_free_vmi; 2484 2485 err = anon_vma_clone(new, vma); 2486 if (err) 2487 goto out_free_mpol; 2488 2489 if (new->vm_file) 2490 get_file(new->vm_file); 2491 2492 if (new->vm_ops && new->vm_ops->open) 2493 new->vm_ops->open(new); 2494 2495 vma_start_write(vma); 2496 vma_start_write(new); 2497 2498 init_vma_prep(&vp, vma); 2499 vp.insert = new; 2500 vma_prepare(&vp); 2501 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 2502 2503 if (new_below) { 2504 vma->vm_start = addr; 2505 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 2506 } else { 2507 vma->vm_end = addr; 2508 } 2509 2510 /* vma_complete stores the new vma */ 2511 vma_complete(&vp, vmi, vma->vm_mm); 2512 2513 /* Success. */ 2514 if (new_below) 2515 vma_next(vmi); 2516 return 0; 2517 2518 out_free_mpol: 2519 mpol_put(vma_policy(new)); 2520 out_free_vmi: 2521 vma_iter_free(vmi); 2522 out_free_vma: 2523 vm_area_free(new); 2524 return err; 2525 } 2526 2527 /* 2528 * Split a vma into two pieces at address 'addr', a new vma is allocated 2529 * either for the first part or the tail. 2530 */ 2531 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 2532 unsigned long addr, int new_below) 2533 { 2534 if (vma->vm_mm->map_count >= sysctl_max_map_count) 2535 return -ENOMEM; 2536 2537 return __split_vma(vmi, vma, addr, new_below); 2538 } 2539 2540 /* 2541 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 2542 * context and anonymous VMA name within the range [start, end). 2543 * 2544 * As a result, we might be able to merge the newly modified VMA range with an 2545 * adjacent VMA with identical properties. 2546 * 2547 * If no merge is possible and the range does not span the entirety of the VMA, 2548 * we then need to split the VMA to accommodate the change. 2549 * 2550 * The function returns either the merged VMA, the original VMA if a split was 2551 * required instead, or an error if the split failed. 2552 */ 2553 struct vm_area_struct *vma_modify(struct vma_iterator *vmi, 2554 struct vm_area_struct *prev, 2555 struct vm_area_struct *vma, 2556 unsigned long start, unsigned long end, 2557 unsigned long vm_flags, 2558 struct mempolicy *policy, 2559 struct vm_userfaultfd_ctx uffd_ctx, 2560 struct anon_vma_name *anon_name) 2561 { 2562 pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 2563 struct vm_area_struct *merged; 2564 2565 merged = vma_merge(vmi, prev, vma, start, end, vm_flags, 2566 pgoff, policy, uffd_ctx, anon_name); 2567 if (merged) 2568 return merged; 2569 2570 if (vma->vm_start < start) { 2571 int err = split_vma(vmi, vma, start, 1); 2572 2573 if (err) 2574 return ERR_PTR(err); 2575 } 2576 2577 if (vma->vm_end > end) { 2578 int err = split_vma(vmi, vma, end, 0); 2579 2580 if (err) 2581 return ERR_PTR(err); 2582 } 2583 2584 return vma; 2585 } 2586 2587 /* 2588 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller 2589 * must ensure that [start, end) does not overlap any existing VMA. 2590 */ 2591 static struct vm_area_struct 2592 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, 2593 struct vm_area_struct *vma, unsigned long start, 2594 unsigned long end, pgoff_t pgoff) 2595 { 2596 return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff, 2597 vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 2598 } 2599 2600 /* 2601 * Expand vma by delta bytes, potentially merging with an immediately adjacent 2602 * VMA with identical properties. 2603 */ 2604 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 2605 struct vm_area_struct *vma, 2606 unsigned long delta) 2607 { 2608 pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma); 2609 2610 /* vma is specified as prev, so case 1 or 2 will apply. */ 2611 return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta, 2612 vma->vm_flags, pgoff, vma_policy(vma), 2613 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 2614 } 2615 2616 /* 2617 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 2618 * @vmi: The vma iterator 2619 * @vma: The starting vm_area_struct 2620 * @mm: The mm_struct 2621 * @start: The aligned start address to munmap. 2622 * @end: The aligned end address to munmap. 2623 * @uf: The userfaultfd list_head 2624 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 2625 * success. 2626 * 2627 * Return: 0 on success and drops the lock if so directed, error and leaves the 2628 * lock held otherwise. 2629 */ 2630 static int 2631 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 2632 struct mm_struct *mm, unsigned long start, 2633 unsigned long end, struct list_head *uf, bool unlock) 2634 { 2635 struct vm_area_struct *prev, *next = NULL; 2636 struct maple_tree mt_detach; 2637 int count = 0; 2638 int error = -ENOMEM; 2639 unsigned long locked_vm = 0; 2640 MA_STATE(mas_detach, &mt_detach, 0, 0); 2641 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 2642 mt_on_stack(mt_detach); 2643 2644 /* 2645 * If we need to split any vma, do it now to save pain later. 2646 * 2647 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 2648 * unmapped vm_area_struct will remain in use: so lower split_vma 2649 * places tmp vma above, and higher split_vma places tmp vma below. 2650 */ 2651 2652 /* Does it split the first one? */ 2653 if (start > vma->vm_start) { 2654 2655 /* 2656 * Make sure that map_count on return from munmap() will 2657 * not exceed its limit; but let map_count go just above 2658 * its limit temporarily, to help free resources as expected. 2659 */ 2660 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) 2661 goto map_count_exceeded; 2662 2663 error = __split_vma(vmi, vma, start, 1); 2664 if (error) 2665 goto start_split_failed; 2666 } 2667 2668 /* 2669 * Detach a range of VMAs from the mm. Using next as a temp variable as 2670 * it is always overwritten. 2671 */ 2672 next = vma; 2673 do { 2674 /* Does it split the end? */ 2675 if (next->vm_end > end) { 2676 error = __split_vma(vmi, next, end, 0); 2677 if (error) 2678 goto end_split_failed; 2679 } 2680 vma_start_write(next); 2681 mas_set(&mas_detach, count); 2682 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); 2683 if (error) 2684 goto munmap_gather_failed; 2685 vma_mark_detached(next, true); 2686 if (next->vm_flags & VM_LOCKED) 2687 locked_vm += vma_pages(next); 2688 2689 count++; 2690 if (unlikely(uf)) { 2691 /* 2692 * If userfaultfd_unmap_prep returns an error the vmas 2693 * will remain split, but userland will get a 2694 * highly unexpected error anyway. This is no 2695 * different than the case where the first of the two 2696 * __split_vma fails, but we don't undo the first 2697 * split, despite we could. This is unlikely enough 2698 * failure that it's not worth optimizing it for. 2699 */ 2700 error = userfaultfd_unmap_prep(next, start, end, uf); 2701 2702 if (error) 2703 goto userfaultfd_error; 2704 } 2705 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 2706 BUG_ON(next->vm_start < start); 2707 BUG_ON(next->vm_start > end); 2708 #endif 2709 } for_each_vma_range(*vmi, next, end); 2710 2711 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 2712 /* Make sure no VMAs are about to be lost. */ 2713 { 2714 MA_STATE(test, &mt_detach, 0, 0); 2715 struct vm_area_struct *vma_mas, *vma_test; 2716 int test_count = 0; 2717 2718 vma_iter_set(vmi, start); 2719 rcu_read_lock(); 2720 vma_test = mas_find(&test, count - 1); 2721 for_each_vma_range(*vmi, vma_mas, end) { 2722 BUG_ON(vma_mas != vma_test); 2723 test_count++; 2724 vma_test = mas_next(&test, count - 1); 2725 } 2726 rcu_read_unlock(); 2727 BUG_ON(count != test_count); 2728 } 2729 #endif 2730 2731 while (vma_iter_addr(vmi) > start) 2732 vma_iter_prev_range(vmi); 2733 2734 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 2735 if (error) 2736 goto clear_tree_failed; 2737 2738 /* Point of no return */ 2739 mm->locked_vm -= locked_vm; 2740 mm->map_count -= count; 2741 if (unlock) 2742 mmap_write_downgrade(mm); 2743 2744 prev = vma_iter_prev_range(vmi); 2745 next = vma_next(vmi); 2746 if (next) 2747 vma_iter_prev_range(vmi); 2748 2749 /* 2750 * We can free page tables without write-locking mmap_lock because VMAs 2751 * were isolated before we downgraded mmap_lock. 2752 */ 2753 mas_set(&mas_detach, 1); 2754 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count, 2755 !unlock); 2756 /* Statistics and freeing VMAs */ 2757 mas_set(&mas_detach, 0); 2758 remove_mt(mm, &mas_detach); 2759 validate_mm(mm); 2760 if (unlock) 2761 mmap_read_unlock(mm); 2762 2763 __mt_destroy(&mt_detach); 2764 return 0; 2765 2766 clear_tree_failed: 2767 userfaultfd_error: 2768 munmap_gather_failed: 2769 end_split_failed: 2770 mas_set(&mas_detach, 0); 2771 mas_for_each(&mas_detach, next, end) 2772 vma_mark_detached(next, false); 2773 2774 __mt_destroy(&mt_detach); 2775 start_split_failed: 2776 map_count_exceeded: 2777 validate_mm(mm); 2778 return error; 2779 } 2780 2781 /* 2782 * do_vmi_munmap() - munmap a given range. 2783 * @vmi: The vma iterator 2784 * @mm: The mm_struct 2785 * @start: The start address to munmap 2786 * @len: The length of the range to munmap 2787 * @uf: The userfaultfd list_head 2788 * @unlock: set to true if the user wants to drop the mmap_lock on success 2789 * 2790 * This function takes a @mas that is either pointing to the previous VMA or set 2791 * to MA_START and sets it up to remove the mapping(s). The @len will be 2792 * aligned and any arch_unmap work will be preformed. 2793 * 2794 * Return: 0 on success and drops the lock if so directed, error and leaves the 2795 * lock held otherwise. 2796 */ 2797 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 2798 unsigned long start, size_t len, struct list_head *uf, 2799 bool unlock) 2800 { 2801 unsigned long end; 2802 struct vm_area_struct *vma; 2803 2804 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 2805 return -EINVAL; 2806 2807 end = start + PAGE_ALIGN(len); 2808 if (end == start) 2809 return -EINVAL; 2810 2811 /* 2812 * Check if memory is sealed before arch_unmap. 2813 * Prevent unmapping a sealed VMA. 2814 * can_modify_mm assumes we have acquired the lock on MM. 2815 */ 2816 if (unlikely(!can_modify_mm(mm, start, end))) 2817 return -EPERM; 2818 2819 /* arch_unmap() might do unmaps itself. */ 2820 arch_unmap(mm, start, end); 2821 2822 /* Find the first overlapping VMA */ 2823 vma = vma_find(vmi, end); 2824 if (!vma) { 2825 if (unlock) 2826 mmap_write_unlock(mm); 2827 return 0; 2828 } 2829 2830 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 2831 } 2832 2833 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. 2834 * @mm: The mm_struct 2835 * @start: The start address to munmap 2836 * @len: The length to be munmapped. 2837 * @uf: The userfaultfd list_head 2838 * 2839 * Return: 0 on success, error otherwise. 2840 */ 2841 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 2842 struct list_head *uf) 2843 { 2844 VMA_ITERATOR(vmi, mm, start); 2845 2846 return do_vmi_munmap(&vmi, mm, start, len, uf, false); 2847 } 2848 2849 unsigned long mmap_region(struct file *file, unsigned long addr, 2850 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2851 struct list_head *uf) 2852 { 2853 struct mm_struct *mm = current->mm; 2854 struct vm_area_struct *vma = NULL; 2855 struct vm_area_struct *next, *prev, *merge; 2856 pgoff_t pglen = len >> PAGE_SHIFT; 2857 unsigned long charged = 0; 2858 unsigned long end = addr + len; 2859 unsigned long merge_start = addr, merge_end = end; 2860 bool writable_file_mapping = false; 2861 pgoff_t vm_pgoff; 2862 int error; 2863 VMA_ITERATOR(vmi, mm, addr); 2864 2865 /* Check against address space limit. */ 2866 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { 2867 unsigned long nr_pages; 2868 2869 /* 2870 * MAP_FIXED may remove pages of mappings that intersects with 2871 * requested mapping. Account for the pages it would unmap. 2872 */ 2873 nr_pages = count_vma_pages_range(mm, addr, end); 2874 2875 if (!may_expand_vm(mm, vm_flags, 2876 (len >> PAGE_SHIFT) - nr_pages)) 2877 return -ENOMEM; 2878 } 2879 2880 /* Unmap any existing mapping in the area */ 2881 error = do_vmi_munmap(&vmi, mm, addr, len, uf, false); 2882 if (error == -EPERM) 2883 return error; 2884 else if (error) 2885 return -ENOMEM; 2886 2887 /* 2888 * Private writable mapping: check memory availability 2889 */ 2890 if (accountable_mapping(file, vm_flags)) { 2891 charged = len >> PAGE_SHIFT; 2892 if (security_vm_enough_memory_mm(mm, charged)) 2893 return -ENOMEM; 2894 vm_flags |= VM_ACCOUNT; 2895 } 2896 2897 next = vma_next(&vmi); 2898 prev = vma_prev(&vmi); 2899 if (vm_flags & VM_SPECIAL) { 2900 if (prev) 2901 vma_iter_next_range(&vmi); 2902 goto cannot_expand; 2903 } 2904 2905 /* Attempt to expand an old mapping */ 2906 /* Check next */ 2907 if (next && next->vm_start == end && !vma_policy(next) && 2908 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, 2909 NULL_VM_UFFD_CTX, NULL)) { 2910 merge_end = next->vm_end; 2911 vma = next; 2912 vm_pgoff = next->vm_pgoff - pglen; 2913 } 2914 2915 /* Check prev */ 2916 if (prev && prev->vm_end == addr && !vma_policy(prev) && 2917 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, 2918 pgoff, vma->vm_userfaultfd_ctx, NULL) : 2919 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, 2920 NULL_VM_UFFD_CTX, NULL))) { 2921 merge_start = prev->vm_start; 2922 vma = prev; 2923 vm_pgoff = prev->vm_pgoff; 2924 } else if (prev) { 2925 vma_iter_next_range(&vmi); 2926 } 2927 2928 /* Actually expand, if possible */ 2929 if (vma && 2930 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { 2931 khugepaged_enter_vma(vma, vm_flags); 2932 goto expanded; 2933 } 2934 2935 if (vma == prev) 2936 vma_iter_set(&vmi, addr); 2937 cannot_expand: 2938 2939 /* 2940 * Determine the object being mapped and call the appropriate 2941 * specific mapper. the address has already been validated, but 2942 * not unmapped, but the maps are removed from the list. 2943 */ 2944 vma = vm_area_alloc(mm); 2945 if (!vma) { 2946 error = -ENOMEM; 2947 goto unacct_error; 2948 } 2949 2950 vma_iter_config(&vmi, addr, end); 2951 vma_set_range(vma, addr, end, pgoff); 2952 vm_flags_init(vma, vm_flags); 2953 vma->vm_page_prot = vm_get_page_prot(vm_flags); 2954 2955 if (file) { 2956 vma->vm_file = get_file(file); 2957 error = call_mmap(file, vma); 2958 if (error) 2959 goto unmap_and_free_vma; 2960 2961 if (vma_is_shared_maywrite(vma)) { 2962 error = mapping_map_writable(file->f_mapping); 2963 if (error) 2964 goto close_and_free_vma; 2965 2966 writable_file_mapping = true; 2967 } 2968 2969 /* 2970 * Expansion is handled above, merging is handled below. 2971 * Drivers should not alter the address of the VMA. 2972 */ 2973 error = -EINVAL; 2974 if (WARN_ON((addr != vma->vm_start))) 2975 goto close_and_free_vma; 2976 2977 vma_iter_config(&vmi, addr, end); 2978 /* 2979 * If vm_flags changed after call_mmap(), we should try merge 2980 * vma again as we may succeed this time. 2981 */ 2982 if (unlikely(vm_flags != vma->vm_flags && prev)) { 2983 merge = vma_merge_new_vma(&vmi, prev, vma, 2984 vma->vm_start, vma->vm_end, 2985 vma->vm_pgoff); 2986 if (merge) { 2987 /* 2988 * ->mmap() can change vma->vm_file and fput 2989 * the original file. So fput the vma->vm_file 2990 * here or we would add an extra fput for file 2991 * and cause general protection fault 2992 * ultimately. 2993 */ 2994 fput(vma->vm_file); 2995 vm_area_free(vma); 2996 vma = merge; 2997 /* Update vm_flags to pick up the change. */ 2998 vm_flags = vma->vm_flags; 2999 goto unmap_writable; 3000 } 3001 } 3002 3003 vm_flags = vma->vm_flags; 3004 } else if (vm_flags & VM_SHARED) { 3005 error = shmem_zero_setup(vma); 3006 if (error) 3007 goto free_vma; 3008 } else { 3009 vma_set_anonymous(vma); 3010 } 3011 3012 if (map_deny_write_exec(vma, vma->vm_flags)) { 3013 error = -EACCES; 3014 goto close_and_free_vma; 3015 } 3016 3017 /* Allow architectures to sanity-check the vm_flags */ 3018 error = -EINVAL; 3019 if (!arch_validate_flags(vma->vm_flags)) 3020 goto close_and_free_vma; 3021 3022 error = -ENOMEM; 3023 if (vma_iter_prealloc(&vmi, vma)) 3024 goto close_and_free_vma; 3025 3026 /* Lock the VMA since it is modified after insertion into VMA tree */ 3027 vma_start_write(vma); 3028 vma_iter_store(&vmi, vma); 3029 mm->map_count++; 3030 vma_link_file(vma); 3031 3032 /* 3033 * vma_merge() calls khugepaged_enter_vma() either, the below 3034 * call covers the non-merge case. 3035 */ 3036 khugepaged_enter_vma(vma, vma->vm_flags); 3037 3038 /* Once vma denies write, undo our temporary denial count */ 3039 unmap_writable: 3040 if (writable_file_mapping) 3041 mapping_unmap_writable(file->f_mapping); 3042 file = vma->vm_file; 3043 ksm_add_vma(vma); 3044 expanded: 3045 perf_event_mmap(vma); 3046 3047 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); 3048 if (vm_flags & VM_LOCKED) { 3049 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 3050 is_vm_hugetlb_page(vma) || 3051 vma == get_gate_vma(current->mm)) 3052 vm_flags_clear(vma, VM_LOCKED_MASK); 3053 else 3054 mm->locked_vm += (len >> PAGE_SHIFT); 3055 } 3056 3057 if (file) 3058 uprobe_mmap(vma); 3059 3060 /* 3061 * New (or expanded) vma always get soft dirty status. 3062 * Otherwise user-space soft-dirty page tracker won't 3063 * be able to distinguish situation when vma area unmapped, 3064 * then new mapped in-place (which must be aimed as 3065 * a completely new data area). 3066 */ 3067 vm_flags_set(vma, VM_SOFTDIRTY); 3068 3069 vma_set_page_prot(vma); 3070 3071 validate_mm(mm); 3072 return addr; 3073 3074 close_and_free_vma: 3075 if (file && vma->vm_ops && vma->vm_ops->close) 3076 vma->vm_ops->close(vma); 3077 3078 if (file || vma->vm_file) { 3079 unmap_and_free_vma: 3080 fput(vma->vm_file); 3081 vma->vm_file = NULL; 3082 3083 vma_iter_set(&vmi, vma->vm_end); 3084 /* Undo any partial mapping done by a device driver. */ 3085 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, 3086 vma->vm_end, vma->vm_end, true); 3087 } 3088 if (writable_file_mapping) 3089 mapping_unmap_writable(file->f_mapping); 3090 free_vma: 3091 vm_area_free(vma); 3092 unacct_error: 3093 if (charged) 3094 vm_unacct_memory(charged); 3095 validate_mm(mm); 3096 return error; 3097 } 3098 3099 static int __vm_munmap(unsigned long start, size_t len, bool unlock) 3100 { 3101 int ret; 3102 struct mm_struct *mm = current->mm; 3103 LIST_HEAD(uf); 3104 VMA_ITERATOR(vmi, mm, start); 3105 3106 if (mmap_write_lock_killable(mm)) 3107 return -EINTR; 3108 3109 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 3110 if (ret || !unlock) 3111 mmap_write_unlock(mm); 3112 3113 userfaultfd_unmap_complete(mm, &uf); 3114 return ret; 3115 } 3116 3117 int vm_munmap(unsigned long start, size_t len) 3118 { 3119 return __vm_munmap(start, len, false); 3120 } 3121 EXPORT_SYMBOL(vm_munmap); 3122 3123 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 3124 { 3125 addr = untagged_addr(addr); 3126 return __vm_munmap(addr, len, true); 3127 } 3128 3129 3130 /* 3131 * Emulation of deprecated remap_file_pages() syscall. 3132 */ 3133 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 3134 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 3135 { 3136 3137 struct mm_struct *mm = current->mm; 3138 struct vm_area_struct *vma; 3139 unsigned long populate = 0; 3140 unsigned long ret = -EINVAL; 3141 struct file *file; 3142 3143 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", 3144 current->comm, current->pid); 3145 3146 if (prot) 3147 return ret; 3148 start = start & PAGE_MASK; 3149 size = size & PAGE_MASK; 3150 3151 if (start + size <= start) 3152 return ret; 3153 3154 /* Does pgoff wrap? */ 3155 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 3156 return ret; 3157 3158 if (mmap_write_lock_killable(mm)) 3159 return -EINTR; 3160 3161 vma = vma_lookup(mm, start); 3162 3163 if (!vma || !(vma->vm_flags & VM_SHARED)) 3164 goto out; 3165 3166 if (start + size > vma->vm_end) { 3167 VMA_ITERATOR(vmi, mm, vma->vm_end); 3168 struct vm_area_struct *next, *prev = vma; 3169 3170 for_each_vma_range(vmi, next, start + size) { 3171 /* hole between vmas ? */ 3172 if (next->vm_start != prev->vm_end) 3173 goto out; 3174 3175 if (next->vm_file != vma->vm_file) 3176 goto out; 3177 3178 if (next->vm_flags != vma->vm_flags) 3179 goto out; 3180 3181 if (start + size <= next->vm_end) 3182 break; 3183 3184 prev = next; 3185 } 3186 3187 if (!next) 3188 goto out; 3189 } 3190 3191 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 3192 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 3193 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 3194 3195 flags &= MAP_NONBLOCK; 3196 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 3197 if (vma->vm_flags & VM_LOCKED) 3198 flags |= MAP_LOCKED; 3199 3200 file = get_file(vma->vm_file); 3201 ret = do_mmap(vma->vm_file, start, size, 3202 prot, flags, 0, pgoff, &populate, NULL); 3203 fput(file); 3204 out: 3205 mmap_write_unlock(mm); 3206 if (populate) 3207 mm_populate(ret, populate); 3208 if (!IS_ERR_VALUE(ret)) 3209 ret = 0; 3210 return ret; 3211 } 3212 3213 /* 3214 * do_vma_munmap() - Unmap a full or partial vma. 3215 * @vmi: The vma iterator pointing at the vma 3216 * @vma: The first vma to be munmapped 3217 * @start: the start of the address to unmap 3218 * @end: The end of the address to unmap 3219 * @uf: The userfaultfd list_head 3220 * @unlock: Drop the lock on success 3221 * 3222 * unmaps a VMA mapping when the vma iterator is already in position. 3223 * Does not handle alignment. 3224 * 3225 * Return: 0 on success drops the lock of so directed, error on failure and will 3226 * still hold the lock. 3227 */ 3228 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 3229 unsigned long start, unsigned long end, struct list_head *uf, 3230 bool unlock) 3231 { 3232 struct mm_struct *mm = vma->vm_mm; 3233 3234 /* 3235 * Check if memory is sealed before arch_unmap. 3236 * Prevent unmapping a sealed VMA. 3237 * can_modify_mm assumes we have acquired the lock on MM. 3238 */ 3239 if (unlikely(!can_modify_mm(mm, start, end))) 3240 return -EPERM; 3241 3242 arch_unmap(mm, start, end); 3243 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 3244 } 3245 3246 /* 3247 * do_brk_flags() - Increase the brk vma if the flags match. 3248 * @vmi: The vma iterator 3249 * @addr: The start address 3250 * @len: The length of the increase 3251 * @vma: The vma, 3252 * @flags: The VMA Flags 3253 * 3254 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 3255 * do not match then create a new anonymous VMA. Eventually we may be able to 3256 * do some brk-specific accounting here. 3257 */ 3258 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 3259 unsigned long addr, unsigned long len, unsigned long flags) 3260 { 3261 struct mm_struct *mm = current->mm; 3262 struct vma_prepare vp; 3263 3264 /* 3265 * Check against address space limits by the changed size 3266 * Note: This happens *after* clearing old mappings in some code paths. 3267 */ 3268 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 3269 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 3270 return -ENOMEM; 3271 3272 if (mm->map_count > sysctl_max_map_count) 3273 return -ENOMEM; 3274 3275 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 3276 return -ENOMEM; 3277 3278 /* 3279 * Expand the existing vma if possible; Note that singular lists do not 3280 * occur after forking, so the expand will only happen on new VMAs. 3281 */ 3282 if (vma && vma->vm_end == addr && !vma_policy(vma) && 3283 can_vma_merge_after(vma, flags, NULL, NULL, 3284 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { 3285 vma_iter_config(vmi, vma->vm_start, addr + len); 3286 if (vma_iter_prealloc(vmi, vma)) 3287 goto unacct_fail; 3288 3289 vma_start_write(vma); 3290 3291 init_vma_prep(&vp, vma); 3292 vma_prepare(&vp); 3293 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); 3294 vma->vm_end = addr + len; 3295 vm_flags_set(vma, VM_SOFTDIRTY); 3296 vma_iter_store(vmi, vma); 3297 3298 vma_complete(&vp, vmi, mm); 3299 khugepaged_enter_vma(vma, flags); 3300 goto out; 3301 } 3302 3303 if (vma) 3304 vma_iter_next_range(vmi); 3305 /* create a vma struct for an anonymous mapping */ 3306 vma = vm_area_alloc(mm); 3307 if (!vma) 3308 goto unacct_fail; 3309 3310 vma_set_anonymous(vma); 3311 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 3312 vm_flags_init(vma, flags); 3313 vma->vm_page_prot = vm_get_page_prot(flags); 3314 vma_start_write(vma); 3315 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 3316 goto mas_store_fail; 3317 3318 mm->map_count++; 3319 validate_mm(mm); 3320 ksm_add_vma(vma); 3321 out: 3322 perf_event_mmap(vma); 3323 mm->total_vm += len >> PAGE_SHIFT; 3324 mm->data_vm += len >> PAGE_SHIFT; 3325 if (flags & VM_LOCKED) 3326 mm->locked_vm += (len >> PAGE_SHIFT); 3327 vm_flags_set(vma, VM_SOFTDIRTY); 3328 return 0; 3329 3330 mas_store_fail: 3331 vm_area_free(vma); 3332 unacct_fail: 3333 vm_unacct_memory(len >> PAGE_SHIFT); 3334 return -ENOMEM; 3335 } 3336 3337 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 3338 { 3339 struct mm_struct *mm = current->mm; 3340 struct vm_area_struct *vma = NULL; 3341 unsigned long len; 3342 int ret; 3343 bool populate; 3344 LIST_HEAD(uf); 3345 VMA_ITERATOR(vmi, mm, addr); 3346 3347 len = PAGE_ALIGN(request); 3348 if (len < request) 3349 return -ENOMEM; 3350 if (!len) 3351 return 0; 3352 3353 /* Until we need other flags, refuse anything except VM_EXEC. */ 3354 if ((flags & (~VM_EXEC)) != 0) 3355 return -EINVAL; 3356 3357 if (mmap_write_lock_killable(mm)) 3358 return -EINTR; 3359 3360 ret = check_brk_limits(addr, len); 3361 if (ret) 3362 goto limits_failed; 3363 3364 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); 3365 if (ret) 3366 goto munmap_failed; 3367 3368 vma = vma_prev(&vmi); 3369 ret = do_brk_flags(&vmi, vma, addr, len, flags); 3370 populate = ((mm->def_flags & VM_LOCKED) != 0); 3371 mmap_write_unlock(mm); 3372 userfaultfd_unmap_complete(mm, &uf); 3373 if (populate && !ret) 3374 mm_populate(addr, len); 3375 return ret; 3376 3377 munmap_failed: 3378 limits_failed: 3379 mmap_write_unlock(mm); 3380 return ret; 3381 } 3382 EXPORT_SYMBOL(vm_brk_flags); 3383 3384 /* Release all mmaps. */ 3385 void exit_mmap(struct mm_struct *mm) 3386 { 3387 struct mmu_gather tlb; 3388 struct vm_area_struct *vma; 3389 unsigned long nr_accounted = 0; 3390 VMA_ITERATOR(vmi, mm, 0); 3391 int count = 0; 3392 3393 /* mm's last user has gone, and its about to be pulled down */ 3394 mmu_notifier_release(mm); 3395 3396 mmap_read_lock(mm); 3397 arch_exit_mmap(mm); 3398 3399 vma = vma_next(&vmi); 3400 if (!vma || unlikely(xa_is_zero(vma))) { 3401 /* Can happen if dup_mmap() received an OOM */ 3402 mmap_read_unlock(mm); 3403 mmap_write_lock(mm); 3404 goto destroy; 3405 } 3406 3407 lru_add_drain(); 3408 flush_cache_mm(mm); 3409 tlb_gather_mmu_fullmm(&tlb, mm); 3410 /* update_hiwater_rss(mm) here? but nobody should be looking */ 3411 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ 3412 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); 3413 mmap_read_unlock(mm); 3414 3415 /* 3416 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper 3417 * because the memory has been already freed. 3418 */ 3419 set_bit(MMF_OOM_SKIP, &mm->flags); 3420 mmap_write_lock(mm); 3421 mt_clear_in_rcu(&mm->mm_mt); 3422 vma_iter_set(&vmi, vma->vm_end); 3423 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, 3424 USER_PGTABLES_CEILING, true); 3425 tlb_finish_mmu(&tlb); 3426 3427 /* 3428 * Walk the list again, actually closing and freeing it, with preemption 3429 * enabled, without holding any MM locks besides the unreachable 3430 * mmap_write_lock. 3431 */ 3432 vma_iter_set(&vmi, vma->vm_end); 3433 do { 3434 if (vma->vm_flags & VM_ACCOUNT) 3435 nr_accounted += vma_pages(vma); 3436 remove_vma(vma, true); 3437 count++; 3438 cond_resched(); 3439 vma = vma_next(&vmi); 3440 } while (vma && likely(!xa_is_zero(vma))); 3441 3442 BUG_ON(count != mm->map_count); 3443 3444 trace_exit_mmap(mm); 3445 destroy: 3446 __mt_destroy(&mm->mm_mt); 3447 mmap_write_unlock(mm); 3448 vm_unacct_memory(nr_accounted); 3449 } 3450 3451 /* Insert vm structure into process list sorted by address 3452 * and into the inode's i_mmap tree. If vm_file is non-NULL 3453 * then i_mmap_rwsem is taken here. 3454 */ 3455 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 3456 { 3457 unsigned long charged = vma_pages(vma); 3458 3459 3460 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 3461 return -ENOMEM; 3462 3463 if ((vma->vm_flags & VM_ACCOUNT) && 3464 security_vm_enough_memory_mm(mm, charged)) 3465 return -ENOMEM; 3466 3467 /* 3468 * The vm_pgoff of a purely anonymous vma should be irrelevant 3469 * until its first write fault, when page's anon_vma and index 3470 * are set. But now set the vm_pgoff it will almost certainly 3471 * end up with (unless mremap moves it elsewhere before that 3472 * first wfault), so /proc/pid/maps tells a consistent story. 3473 * 3474 * By setting it to reflect the virtual start address of the 3475 * vma, merges and splits can happen in a seamless way, just 3476 * using the existing file pgoff checks and manipulations. 3477 * Similarly in do_mmap and in do_brk_flags. 3478 */ 3479 if (vma_is_anonymous(vma)) { 3480 BUG_ON(vma->anon_vma); 3481 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 3482 } 3483 3484 if (vma_link(mm, vma)) { 3485 if (vma->vm_flags & VM_ACCOUNT) 3486 vm_unacct_memory(charged); 3487 return -ENOMEM; 3488 } 3489 3490 return 0; 3491 } 3492 3493 /* 3494 * Copy the vma structure to a new location in the same mm, 3495 * prior to moving page table entries, to effect an mremap move. 3496 */ 3497 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 3498 unsigned long addr, unsigned long len, pgoff_t pgoff, 3499 bool *need_rmap_locks) 3500 { 3501 struct vm_area_struct *vma = *vmap; 3502 unsigned long vma_start = vma->vm_start; 3503 struct mm_struct *mm = vma->vm_mm; 3504 struct vm_area_struct *new_vma, *prev; 3505 bool faulted_in_anon_vma = true; 3506 VMA_ITERATOR(vmi, mm, addr); 3507 3508 /* 3509 * If anonymous vma has not yet been faulted, update new pgoff 3510 * to match new location, to increase its chance of merging. 3511 */ 3512 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 3513 pgoff = addr >> PAGE_SHIFT; 3514 faulted_in_anon_vma = false; 3515 } 3516 3517 new_vma = find_vma_prev(mm, addr, &prev); 3518 if (new_vma && new_vma->vm_start < addr + len) 3519 return NULL; /* should never get here */ 3520 3521 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff); 3522 if (new_vma) { 3523 /* 3524 * Source vma may have been merged into new_vma 3525 */ 3526 if (unlikely(vma_start >= new_vma->vm_start && 3527 vma_start < new_vma->vm_end)) { 3528 /* 3529 * The only way we can get a vma_merge with 3530 * self during an mremap is if the vma hasn't 3531 * been faulted in yet and we were allowed to 3532 * reset the dst vma->vm_pgoff to the 3533 * destination address of the mremap to allow 3534 * the merge to happen. mremap must change the 3535 * vm_pgoff linearity between src and dst vmas 3536 * (in turn preventing a vma_merge) to be 3537 * safe. It is only safe to keep the vm_pgoff 3538 * linear if there are no pages mapped yet. 3539 */ 3540 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 3541 *vmap = vma = new_vma; 3542 } 3543 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 3544 } else { 3545 new_vma = vm_area_dup(vma); 3546 if (!new_vma) 3547 goto out; 3548 vma_set_range(new_vma, addr, addr + len, pgoff); 3549 if (vma_dup_policy(vma, new_vma)) 3550 goto out_free_vma; 3551 if (anon_vma_clone(new_vma, vma)) 3552 goto out_free_mempol; 3553 if (new_vma->vm_file) 3554 get_file(new_vma->vm_file); 3555 if (new_vma->vm_ops && new_vma->vm_ops->open) 3556 new_vma->vm_ops->open(new_vma); 3557 if (vma_link(mm, new_vma)) 3558 goto out_vma_link; 3559 *need_rmap_locks = false; 3560 } 3561 return new_vma; 3562 3563 out_vma_link: 3564 if (new_vma->vm_ops && new_vma->vm_ops->close) 3565 new_vma->vm_ops->close(new_vma); 3566 3567 if (new_vma->vm_file) 3568 fput(new_vma->vm_file); 3569 3570 unlink_anon_vmas(new_vma); 3571 out_free_mempol: 3572 mpol_put(vma_policy(new_vma)); 3573 out_free_vma: 3574 vm_area_free(new_vma); 3575 out: 3576 return NULL; 3577 } 3578 3579 /* 3580 * Return true if the calling process may expand its vm space by the passed 3581 * number of pages 3582 */ 3583 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 3584 { 3585 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 3586 return false; 3587 3588 if (is_data_mapping(flags) && 3589 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 3590 /* Workaround for Valgrind */ 3591 if (rlimit(RLIMIT_DATA) == 0 && 3592 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 3593 return true; 3594 3595 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 3596 current->comm, current->pid, 3597 (mm->data_vm + npages) << PAGE_SHIFT, 3598 rlimit(RLIMIT_DATA), 3599 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 3600 3601 if (!ignore_rlimit_data) 3602 return false; 3603 } 3604 3605 return true; 3606 } 3607 3608 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 3609 { 3610 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 3611 3612 if (is_exec_mapping(flags)) 3613 mm->exec_vm += npages; 3614 else if (is_stack_mapping(flags)) 3615 mm->stack_vm += npages; 3616 else if (is_data_mapping(flags)) 3617 mm->data_vm += npages; 3618 } 3619 3620 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 3621 3622 /* 3623 * Having a close hook prevents vma merging regardless of flags. 3624 */ 3625 static void special_mapping_close(struct vm_area_struct *vma) 3626 { 3627 } 3628 3629 static const char *special_mapping_name(struct vm_area_struct *vma) 3630 { 3631 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 3632 } 3633 3634 static int special_mapping_mremap(struct vm_area_struct *new_vma) 3635 { 3636 struct vm_special_mapping *sm = new_vma->vm_private_data; 3637 3638 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 3639 return -EFAULT; 3640 3641 if (sm->mremap) 3642 return sm->mremap(sm, new_vma); 3643 3644 return 0; 3645 } 3646 3647 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 3648 { 3649 /* 3650 * Forbid splitting special mappings - kernel has expectations over 3651 * the number of pages in mapping. Together with VM_DONTEXPAND 3652 * the size of vma should stay the same over the special mapping's 3653 * lifetime. 3654 */ 3655 return -EINVAL; 3656 } 3657 3658 static const struct vm_operations_struct special_mapping_vmops = { 3659 .close = special_mapping_close, 3660 .fault = special_mapping_fault, 3661 .mremap = special_mapping_mremap, 3662 .name = special_mapping_name, 3663 /* vDSO code relies that VVAR can't be accessed remotely */ 3664 .access = NULL, 3665 .may_split = special_mapping_split, 3666 }; 3667 3668 static const struct vm_operations_struct legacy_special_mapping_vmops = { 3669 .close = special_mapping_close, 3670 .fault = special_mapping_fault, 3671 }; 3672 3673 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 3674 { 3675 struct vm_area_struct *vma = vmf->vma; 3676 pgoff_t pgoff; 3677 struct page **pages; 3678 3679 if (vma->vm_ops == &legacy_special_mapping_vmops) { 3680 pages = vma->vm_private_data; 3681 } else { 3682 struct vm_special_mapping *sm = vma->vm_private_data; 3683 3684 if (sm->fault) 3685 return sm->fault(sm, vmf->vma, vmf); 3686 3687 pages = sm->pages; 3688 } 3689 3690 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 3691 pgoff--; 3692 3693 if (*pages) { 3694 struct page *page = *pages; 3695 get_page(page); 3696 vmf->page = page; 3697 return 0; 3698 } 3699 3700 return VM_FAULT_SIGBUS; 3701 } 3702 3703 static struct vm_area_struct *__install_special_mapping( 3704 struct mm_struct *mm, 3705 unsigned long addr, unsigned long len, 3706 unsigned long vm_flags, void *priv, 3707 const struct vm_operations_struct *ops) 3708 { 3709 int ret; 3710 struct vm_area_struct *vma; 3711 3712 vma = vm_area_alloc(mm); 3713 if (unlikely(vma == NULL)) 3714 return ERR_PTR(-ENOMEM); 3715 3716 vma_set_range(vma, addr, addr + len, 0); 3717 vm_flags_init(vma, (vm_flags | mm->def_flags | 3718 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); 3719 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 3720 3721 vma->vm_ops = ops; 3722 vma->vm_private_data = priv; 3723 3724 ret = insert_vm_struct(mm, vma); 3725 if (ret) 3726 goto out; 3727 3728 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 3729 3730 perf_event_mmap(vma); 3731 3732 return vma; 3733 3734 out: 3735 vm_area_free(vma); 3736 return ERR_PTR(ret); 3737 } 3738 3739 bool vma_is_special_mapping(const struct vm_area_struct *vma, 3740 const struct vm_special_mapping *sm) 3741 { 3742 return vma->vm_private_data == sm && 3743 (vma->vm_ops == &special_mapping_vmops || 3744 vma->vm_ops == &legacy_special_mapping_vmops); 3745 } 3746 3747 /* 3748 * Called with mm->mmap_lock held for writing. 3749 * Insert a new vma covering the given region, with the given flags. 3750 * Its pages are supplied by the given array of struct page *. 3751 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 3752 * The region past the last page supplied will always produce SIGBUS. 3753 * The array pointer and the pages it points to are assumed to stay alive 3754 * for as long as this mapping might exist. 3755 */ 3756 struct vm_area_struct *_install_special_mapping( 3757 struct mm_struct *mm, 3758 unsigned long addr, unsigned long len, 3759 unsigned long vm_flags, const struct vm_special_mapping *spec) 3760 { 3761 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 3762 &special_mapping_vmops); 3763 } 3764 3765 int install_special_mapping(struct mm_struct *mm, 3766 unsigned long addr, unsigned long len, 3767 unsigned long vm_flags, struct page **pages) 3768 { 3769 struct vm_area_struct *vma = __install_special_mapping( 3770 mm, addr, len, vm_flags, (void *)pages, 3771 &legacy_special_mapping_vmops); 3772 3773 return PTR_ERR_OR_ZERO(vma); 3774 } 3775 3776 static DEFINE_MUTEX(mm_all_locks_mutex); 3777 3778 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 3779 { 3780 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3781 /* 3782 * The LSB of head.next can't change from under us 3783 * because we hold the mm_all_locks_mutex. 3784 */ 3785 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 3786 /* 3787 * We can safely modify head.next after taking the 3788 * anon_vma->root->rwsem. If some other vma in this mm shares 3789 * the same anon_vma we won't take it again. 3790 * 3791 * No need of atomic instructions here, head.next 3792 * can't change from under us thanks to the 3793 * anon_vma->root->rwsem. 3794 */ 3795 if (__test_and_set_bit(0, (unsigned long *) 3796 &anon_vma->root->rb_root.rb_root.rb_node)) 3797 BUG(); 3798 } 3799 } 3800 3801 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 3802 { 3803 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3804 /* 3805 * AS_MM_ALL_LOCKS can't change from under us because 3806 * we hold the mm_all_locks_mutex. 3807 * 3808 * Operations on ->flags have to be atomic because 3809 * even if AS_MM_ALL_LOCKS is stable thanks to the 3810 * mm_all_locks_mutex, there may be other cpus 3811 * changing other bitflags in parallel to us. 3812 */ 3813 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 3814 BUG(); 3815 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 3816 } 3817 } 3818 3819 /* 3820 * This operation locks against the VM for all pte/vma/mm related 3821 * operations that could ever happen on a certain mm. This includes 3822 * vmtruncate, try_to_unmap, and all page faults. 3823 * 3824 * The caller must take the mmap_lock in write mode before calling 3825 * mm_take_all_locks(). The caller isn't allowed to release the 3826 * mmap_lock until mm_drop_all_locks() returns. 3827 * 3828 * mmap_lock in write mode is required in order to block all operations 3829 * that could modify pagetables and free pages without need of 3830 * altering the vma layout. It's also needed in write mode to avoid new 3831 * anon_vmas to be associated with existing vmas. 3832 * 3833 * A single task can't take more than one mm_take_all_locks() in a row 3834 * or it would deadlock. 3835 * 3836 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 3837 * mapping->flags avoid to take the same lock twice, if more than one 3838 * vma in this mm is backed by the same anon_vma or address_space. 3839 * 3840 * We take locks in following order, accordingly to comment at beginning 3841 * of mm/rmap.c: 3842 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 3843 * hugetlb mapping); 3844 * - all vmas marked locked 3845 * - all i_mmap_rwsem locks; 3846 * - all anon_vma->rwseml 3847 * 3848 * We can take all locks within these types randomly because the VM code 3849 * doesn't nest them and we protected from parallel mm_take_all_locks() by 3850 * mm_all_locks_mutex. 3851 * 3852 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 3853 * that may have to take thousand of locks. 3854 * 3855 * mm_take_all_locks() can fail if it's interrupted by signals. 3856 */ 3857 int mm_take_all_locks(struct mm_struct *mm) 3858 { 3859 struct vm_area_struct *vma; 3860 struct anon_vma_chain *avc; 3861 VMA_ITERATOR(vmi, mm, 0); 3862 3863 mmap_assert_write_locked(mm); 3864 3865 mutex_lock(&mm_all_locks_mutex); 3866 3867 /* 3868 * vma_start_write() does not have a complement in mm_drop_all_locks() 3869 * because vma_start_write() is always asymmetrical; it marks a VMA as 3870 * being written to until mmap_write_unlock() or mmap_write_downgrade() 3871 * is reached. 3872 */ 3873 for_each_vma(vmi, vma) { 3874 if (signal_pending(current)) 3875 goto out_unlock; 3876 vma_start_write(vma); 3877 } 3878 3879 vma_iter_init(&vmi, mm, 0); 3880 for_each_vma(vmi, vma) { 3881 if (signal_pending(current)) 3882 goto out_unlock; 3883 if (vma->vm_file && vma->vm_file->f_mapping && 3884 is_vm_hugetlb_page(vma)) 3885 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3886 } 3887 3888 vma_iter_init(&vmi, mm, 0); 3889 for_each_vma(vmi, vma) { 3890 if (signal_pending(current)) 3891 goto out_unlock; 3892 if (vma->vm_file && vma->vm_file->f_mapping && 3893 !is_vm_hugetlb_page(vma)) 3894 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3895 } 3896 3897 vma_iter_init(&vmi, mm, 0); 3898 for_each_vma(vmi, vma) { 3899 if (signal_pending(current)) 3900 goto out_unlock; 3901 if (vma->anon_vma) 3902 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3903 vm_lock_anon_vma(mm, avc->anon_vma); 3904 } 3905 3906 return 0; 3907 3908 out_unlock: 3909 mm_drop_all_locks(mm); 3910 return -EINTR; 3911 } 3912 3913 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 3914 { 3915 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3916 /* 3917 * The LSB of head.next can't change to 0 from under 3918 * us because we hold the mm_all_locks_mutex. 3919 * 3920 * We must however clear the bitflag before unlocking 3921 * the vma so the users using the anon_vma->rb_root will 3922 * never see our bitflag. 3923 * 3924 * No need of atomic instructions here, head.next 3925 * can't change from under us until we release the 3926 * anon_vma->root->rwsem. 3927 */ 3928 if (!__test_and_clear_bit(0, (unsigned long *) 3929 &anon_vma->root->rb_root.rb_root.rb_node)) 3930 BUG(); 3931 anon_vma_unlock_write(anon_vma); 3932 } 3933 } 3934 3935 static void vm_unlock_mapping(struct address_space *mapping) 3936 { 3937 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3938 /* 3939 * AS_MM_ALL_LOCKS can't change to 0 from under us 3940 * because we hold the mm_all_locks_mutex. 3941 */ 3942 i_mmap_unlock_write(mapping); 3943 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 3944 &mapping->flags)) 3945 BUG(); 3946 } 3947 } 3948 3949 /* 3950 * The mmap_lock cannot be released by the caller until 3951 * mm_drop_all_locks() returns. 3952 */ 3953 void mm_drop_all_locks(struct mm_struct *mm) 3954 { 3955 struct vm_area_struct *vma; 3956 struct anon_vma_chain *avc; 3957 VMA_ITERATOR(vmi, mm, 0); 3958 3959 mmap_assert_write_locked(mm); 3960 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 3961 3962 for_each_vma(vmi, vma) { 3963 if (vma->anon_vma) 3964 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3965 vm_unlock_anon_vma(avc->anon_vma); 3966 if (vma->vm_file && vma->vm_file->f_mapping) 3967 vm_unlock_mapping(vma->vm_file->f_mapping); 3968 } 3969 3970 mutex_unlock(&mm_all_locks_mutex); 3971 } 3972 3973 /* 3974 * initialise the percpu counter for VM 3975 */ 3976 void __init mmap_init(void) 3977 { 3978 int ret; 3979 3980 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 3981 VM_BUG_ON(ret); 3982 } 3983 3984 /* 3985 * Initialise sysctl_user_reserve_kbytes. 3986 * 3987 * This is intended to prevent a user from starting a single memory hogging 3988 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 3989 * mode. 3990 * 3991 * The default value is min(3% of free memory, 128MB) 3992 * 128MB is enough to recover with sshd/login, bash, and top/kill. 3993 */ 3994 static int init_user_reserve(void) 3995 { 3996 unsigned long free_kbytes; 3997 3998 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 3999 4000 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); 4001 return 0; 4002 } 4003 subsys_initcall(init_user_reserve); 4004 4005 /* 4006 * Initialise sysctl_admin_reserve_kbytes. 4007 * 4008 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 4009 * to log in and kill a memory hogging process. 4010 * 4011 * Systems with more than 256MB will reserve 8MB, enough to recover 4012 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 4013 * only reserve 3% of free pages by default. 4014 */ 4015 static int init_admin_reserve(void) 4016 { 4017 unsigned long free_kbytes; 4018 4019 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 4020 4021 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); 4022 return 0; 4023 } 4024 subsys_initcall(init_admin_reserve); 4025 4026 /* 4027 * Reinititalise user and admin reserves if memory is added or removed. 4028 * 4029 * The default user reserve max is 128MB, and the default max for the 4030 * admin reserve is 8MB. These are usually, but not always, enough to 4031 * enable recovery from a memory hogging process using login/sshd, a shell, 4032 * and tools like top. It may make sense to increase or even disable the 4033 * reserve depending on the existence of swap or variations in the recovery 4034 * tools. So, the admin may have changed them. 4035 * 4036 * If memory is added and the reserves have been eliminated or increased above 4037 * the default max, then we'll trust the admin. 4038 * 4039 * If memory is removed and there isn't enough free memory, then we 4040 * need to reset the reserves. 4041 * 4042 * Otherwise keep the reserve set by the admin. 4043 */ 4044 static int reserve_mem_notifier(struct notifier_block *nb, 4045 unsigned long action, void *data) 4046 { 4047 unsigned long tmp, free_kbytes; 4048 4049 switch (action) { 4050 case MEM_ONLINE: 4051 /* Default max is 128MB. Leave alone if modified by operator. */ 4052 tmp = sysctl_user_reserve_kbytes; 4053 if (tmp > 0 && tmp < SZ_128K) 4054 init_user_reserve(); 4055 4056 /* Default max is 8MB. Leave alone if modified by operator. */ 4057 tmp = sysctl_admin_reserve_kbytes; 4058 if (tmp > 0 && tmp < SZ_8K) 4059 init_admin_reserve(); 4060 4061 break; 4062 case MEM_OFFLINE: 4063 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 4064 4065 if (sysctl_user_reserve_kbytes > free_kbytes) { 4066 init_user_reserve(); 4067 pr_info("vm.user_reserve_kbytes reset to %lu\n", 4068 sysctl_user_reserve_kbytes); 4069 } 4070 4071 if (sysctl_admin_reserve_kbytes > free_kbytes) { 4072 init_admin_reserve(); 4073 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 4074 sysctl_admin_reserve_kbytes); 4075 } 4076 break; 4077 default: 4078 break; 4079 } 4080 return NOTIFY_OK; 4081 } 4082 4083 static int __meminit init_reserve_notifier(void) 4084 { 4085 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) 4086 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 4087 4088 return 0; 4089 } 4090 subsys_initcall(init_reserve_notifier); 4091