1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/mmap.c 4 * 5 * Written by obz. 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/backing-dev.h> 15 #include <linux/mm.h> 16 #include <linux/mm_inline.h> 17 #include <linux/shm.h> 18 #include <linux/mman.h> 19 #include <linux/pagemap.h> 20 #include <linux/swap.h> 21 #include <linux/syscalls.h> 22 #include <linux/capability.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/fs.h> 26 #include <linux/personality.h> 27 #include <linux/security.h> 28 #include <linux/hugetlb.h> 29 #include <linux/shmem_fs.h> 30 #include <linux/profile.h> 31 #include <linux/export.h> 32 #include <linux/mount.h> 33 #include <linux/mempolicy.h> 34 #include <linux/rmap.h> 35 #include <linux/mmu_notifier.h> 36 #include <linux/mmdebug.h> 37 #include <linux/perf_event.h> 38 #include <linux/audit.h> 39 #include <linux/khugepaged.h> 40 #include <linux/uprobes.h> 41 #include <linux/notifier.h> 42 #include <linux/memory.h> 43 #include <linux/printk.h> 44 #include <linux/userfaultfd_k.h> 45 #include <linux/moduleparam.h> 46 #include <linux/pkeys.h> 47 #include <linux/oom.h> 48 #include <linux/sched/mm.h> 49 #include <linux/ksm.h> 50 51 #include <linux/uaccess.h> 52 #include <asm/cacheflush.h> 53 #include <asm/tlb.h> 54 #include <asm/mmu_context.h> 55 56 #define CREATE_TRACE_POINTS 57 #include <trace/events/mmap.h> 58 59 #include "internal.h" 60 61 #ifndef arch_mmap_check 62 #define arch_mmap_check(addr, len, flags) (0) 63 #endif 64 65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 67 const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; 68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 69 #endif 70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 74 #endif 75 76 static bool ignore_rlimit_data; 77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 78 79 static void unmap_region(struct mm_struct *mm, struct ma_state *mas, 80 struct vm_area_struct *vma, struct vm_area_struct *prev, 81 struct vm_area_struct *next, unsigned long start, 82 unsigned long end, unsigned long tree_end, bool mm_wr_locked); 83 84 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 85 { 86 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 87 } 88 89 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 90 void vma_set_page_prot(struct vm_area_struct *vma) 91 { 92 unsigned long vm_flags = vma->vm_flags; 93 pgprot_t vm_page_prot; 94 95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 96 if (vma_wants_writenotify(vma, vm_page_prot)) { 97 vm_flags &= ~VM_SHARED; 98 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 99 } 100 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 102 } 103 104 /* 105 * Requires inode->i_mapping->i_mmap_rwsem 106 */ 107 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 108 struct file *file, struct address_space *mapping) 109 { 110 if (vma->vm_flags & VM_SHARED) 111 mapping_unmap_writable(mapping); 112 113 flush_dcache_mmap_lock(mapping); 114 vma_interval_tree_remove(vma, &mapping->i_mmap); 115 flush_dcache_mmap_unlock(mapping); 116 } 117 118 /* 119 * Unlink a file-based vm structure from its interval tree, to hide 120 * vma from rmap and vmtruncate before freeing its page tables. 121 */ 122 void unlink_file_vma(struct vm_area_struct *vma) 123 { 124 struct file *file = vma->vm_file; 125 126 if (file) { 127 struct address_space *mapping = file->f_mapping; 128 i_mmap_lock_write(mapping); 129 __remove_shared_vm_struct(vma, file, mapping); 130 i_mmap_unlock_write(mapping); 131 } 132 } 133 134 /* 135 * Close a vm structure and free it. 136 */ 137 static void remove_vma(struct vm_area_struct *vma, bool unreachable) 138 { 139 might_sleep(); 140 if (vma->vm_ops && vma->vm_ops->close) 141 vma->vm_ops->close(vma); 142 if (vma->vm_file) 143 fput(vma->vm_file); 144 mpol_put(vma_policy(vma)); 145 if (unreachable) 146 __vm_area_free(vma); 147 else 148 vm_area_free(vma); 149 } 150 151 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 152 unsigned long min) 153 { 154 return mas_prev(&vmi->mas, min); 155 } 156 157 /* 158 * check_brk_limits() - Use platform specific check of range & verify mlock 159 * limits. 160 * @addr: The address to check 161 * @len: The size of increase. 162 * 163 * Return: 0 on success. 164 */ 165 static int check_brk_limits(unsigned long addr, unsigned long len) 166 { 167 unsigned long mapped_addr; 168 169 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 170 if (IS_ERR_VALUE(mapped_addr)) 171 return mapped_addr; 172 173 return mlock_future_ok(current->mm, current->mm->def_flags, len) 174 ? 0 : -EAGAIN; 175 } 176 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 177 unsigned long addr, unsigned long request, unsigned long flags); 178 SYSCALL_DEFINE1(brk, unsigned long, brk) 179 { 180 unsigned long newbrk, oldbrk, origbrk; 181 struct mm_struct *mm = current->mm; 182 struct vm_area_struct *brkvma, *next = NULL; 183 unsigned long min_brk; 184 bool populate = false; 185 LIST_HEAD(uf); 186 struct vma_iterator vmi; 187 188 if (mmap_write_lock_killable(mm)) 189 return -EINTR; 190 191 origbrk = mm->brk; 192 193 #ifdef CONFIG_COMPAT_BRK 194 /* 195 * CONFIG_COMPAT_BRK can still be overridden by setting 196 * randomize_va_space to 2, which will still cause mm->start_brk 197 * to be arbitrarily shifted 198 */ 199 if (current->brk_randomized) 200 min_brk = mm->start_brk; 201 else 202 min_brk = mm->end_data; 203 #else 204 min_brk = mm->start_brk; 205 #endif 206 if (brk < min_brk) 207 goto out; 208 209 /* 210 * Check against rlimit here. If this check is done later after the test 211 * of oldbrk with newbrk then it can escape the test and let the data 212 * segment grow beyond its set limit the in case where the limit is 213 * not page aligned -Ram Gupta 214 */ 215 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 216 mm->end_data, mm->start_data)) 217 goto out; 218 219 newbrk = PAGE_ALIGN(brk); 220 oldbrk = PAGE_ALIGN(mm->brk); 221 if (oldbrk == newbrk) { 222 mm->brk = brk; 223 goto success; 224 } 225 226 /* Always allow shrinking brk. */ 227 if (brk <= mm->brk) { 228 /* Search one past newbrk */ 229 vma_iter_init(&vmi, mm, newbrk); 230 brkvma = vma_find(&vmi, oldbrk); 231 if (!brkvma || brkvma->vm_start >= oldbrk) 232 goto out; /* mapping intersects with an existing non-brk vma. */ 233 /* 234 * mm->brk must be protected by write mmap_lock. 235 * do_vma_munmap() will drop the lock on success, so update it 236 * before calling do_vma_munmap(). 237 */ 238 mm->brk = brk; 239 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true)) 240 goto out; 241 242 goto success_unlocked; 243 } 244 245 if (check_brk_limits(oldbrk, newbrk - oldbrk)) 246 goto out; 247 248 /* 249 * Only check if the next VMA is within the stack_guard_gap of the 250 * expansion area 251 */ 252 vma_iter_init(&vmi, mm, oldbrk); 253 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); 254 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 255 goto out; 256 257 brkvma = vma_prev_limit(&vmi, mm->start_brk); 258 /* Ok, looks good - let it rip. */ 259 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) 260 goto out; 261 262 mm->brk = brk; 263 if (mm->def_flags & VM_LOCKED) 264 populate = true; 265 266 success: 267 mmap_write_unlock(mm); 268 success_unlocked: 269 userfaultfd_unmap_complete(mm, &uf); 270 if (populate) 271 mm_populate(oldbrk, newbrk - oldbrk); 272 return brk; 273 274 out: 275 mm->brk = origbrk; 276 mmap_write_unlock(mm); 277 return origbrk; 278 } 279 280 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 281 static void validate_mm(struct mm_struct *mm) 282 { 283 int bug = 0; 284 int i = 0; 285 struct vm_area_struct *vma; 286 VMA_ITERATOR(vmi, mm, 0); 287 288 mt_validate(&mm->mm_mt); 289 for_each_vma(vmi, vma) { 290 #ifdef CONFIG_DEBUG_VM_RB 291 struct anon_vma *anon_vma = vma->anon_vma; 292 struct anon_vma_chain *avc; 293 #endif 294 unsigned long vmi_start, vmi_end; 295 bool warn = 0; 296 297 vmi_start = vma_iter_addr(&vmi); 298 vmi_end = vma_iter_end(&vmi); 299 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 300 warn = 1; 301 302 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 303 warn = 1; 304 305 if (warn) { 306 pr_emerg("issue in %s\n", current->comm); 307 dump_stack(); 308 dump_vma(vma); 309 pr_emerg("tree range: %px start %lx end %lx\n", vma, 310 vmi_start, vmi_end - 1); 311 vma_iter_dump_tree(&vmi); 312 } 313 314 #ifdef CONFIG_DEBUG_VM_RB 315 if (anon_vma) { 316 anon_vma_lock_read(anon_vma); 317 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 318 anon_vma_interval_tree_verify(avc); 319 anon_vma_unlock_read(anon_vma); 320 } 321 #endif 322 i++; 323 } 324 if (i != mm->map_count) { 325 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 326 bug = 1; 327 } 328 VM_BUG_ON_MM(bug, mm); 329 } 330 331 #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ 332 #define validate_mm(mm) do { } while (0) 333 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 334 335 /* 336 * vma has some anon_vma assigned, and is already inserted on that 337 * anon_vma's interval trees. 338 * 339 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 340 * vma must be removed from the anon_vma's interval trees using 341 * anon_vma_interval_tree_pre_update_vma(). 342 * 343 * After the update, the vma will be reinserted using 344 * anon_vma_interval_tree_post_update_vma(). 345 * 346 * The entire update must be protected by exclusive mmap_lock and by 347 * the root anon_vma's mutex. 348 */ 349 static inline void 350 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 351 { 352 struct anon_vma_chain *avc; 353 354 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 355 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 356 } 357 358 static inline void 359 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 360 { 361 struct anon_vma_chain *avc; 362 363 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 364 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 365 } 366 367 static unsigned long count_vma_pages_range(struct mm_struct *mm, 368 unsigned long addr, unsigned long end) 369 { 370 VMA_ITERATOR(vmi, mm, addr); 371 struct vm_area_struct *vma; 372 unsigned long nr_pages = 0; 373 374 for_each_vma_range(vmi, vma, end) { 375 unsigned long vm_start = max(addr, vma->vm_start); 376 unsigned long vm_end = min(end, vma->vm_end); 377 378 nr_pages += PHYS_PFN(vm_end - vm_start); 379 } 380 381 return nr_pages; 382 } 383 384 static void __vma_link_file(struct vm_area_struct *vma, 385 struct address_space *mapping) 386 { 387 if (vma->vm_flags & VM_SHARED) 388 mapping_allow_writable(mapping); 389 390 flush_dcache_mmap_lock(mapping); 391 vma_interval_tree_insert(vma, &mapping->i_mmap); 392 flush_dcache_mmap_unlock(mapping); 393 } 394 395 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 396 { 397 VMA_ITERATOR(vmi, mm, 0); 398 struct address_space *mapping = NULL; 399 400 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 401 if (vma_iter_prealloc(&vmi, vma)) 402 return -ENOMEM; 403 404 vma_start_write(vma); 405 406 vma_iter_store(&vmi, vma); 407 408 if (vma->vm_file) { 409 mapping = vma->vm_file->f_mapping; 410 i_mmap_lock_write(mapping); 411 __vma_link_file(vma, mapping); 412 i_mmap_unlock_write(mapping); 413 } 414 415 mm->map_count++; 416 validate_mm(mm); 417 return 0; 418 } 419 420 /* 421 * init_multi_vma_prep() - Initializer for struct vma_prepare 422 * @vp: The vma_prepare struct 423 * @vma: The vma that will be altered once locked 424 * @next: The next vma if it is to be adjusted 425 * @remove: The first vma to be removed 426 * @remove2: The second vma to be removed 427 */ 428 static inline void init_multi_vma_prep(struct vma_prepare *vp, 429 struct vm_area_struct *vma, struct vm_area_struct *next, 430 struct vm_area_struct *remove, struct vm_area_struct *remove2) 431 { 432 memset(vp, 0, sizeof(struct vma_prepare)); 433 vp->vma = vma; 434 vp->anon_vma = vma->anon_vma; 435 vp->remove = remove; 436 vp->remove2 = remove2; 437 vp->adj_next = next; 438 if (!vp->anon_vma && next) 439 vp->anon_vma = next->anon_vma; 440 441 vp->file = vma->vm_file; 442 if (vp->file) 443 vp->mapping = vma->vm_file->f_mapping; 444 445 } 446 447 /* 448 * init_vma_prep() - Initializer wrapper for vma_prepare struct 449 * @vp: The vma_prepare struct 450 * @vma: The vma that will be altered once locked 451 */ 452 static inline void init_vma_prep(struct vma_prepare *vp, 453 struct vm_area_struct *vma) 454 { 455 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 456 } 457 458 459 /* 460 * vma_prepare() - Helper function for handling locking VMAs prior to altering 461 * @vp: The initialized vma_prepare struct 462 */ 463 static inline void vma_prepare(struct vma_prepare *vp) 464 { 465 if (vp->file) { 466 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 467 468 if (vp->adj_next) 469 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 470 vp->adj_next->vm_end); 471 472 i_mmap_lock_write(vp->mapping); 473 if (vp->insert && vp->insert->vm_file) { 474 /* 475 * Put into interval tree now, so instantiated pages 476 * are visible to arm/parisc __flush_dcache_page 477 * throughout; but we cannot insert into address 478 * space until vma start or end is updated. 479 */ 480 __vma_link_file(vp->insert, 481 vp->insert->vm_file->f_mapping); 482 } 483 } 484 485 if (vp->anon_vma) { 486 anon_vma_lock_write(vp->anon_vma); 487 anon_vma_interval_tree_pre_update_vma(vp->vma); 488 if (vp->adj_next) 489 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 490 } 491 492 if (vp->file) { 493 flush_dcache_mmap_lock(vp->mapping); 494 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 495 if (vp->adj_next) 496 vma_interval_tree_remove(vp->adj_next, 497 &vp->mapping->i_mmap); 498 } 499 500 } 501 502 /* 503 * vma_complete- Helper function for handling the unlocking after altering VMAs, 504 * or for inserting a VMA. 505 * 506 * @vp: The vma_prepare struct 507 * @vmi: The vma iterator 508 * @mm: The mm_struct 509 */ 510 static inline void vma_complete(struct vma_prepare *vp, 511 struct vma_iterator *vmi, struct mm_struct *mm) 512 { 513 if (vp->file) { 514 if (vp->adj_next) 515 vma_interval_tree_insert(vp->adj_next, 516 &vp->mapping->i_mmap); 517 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 518 flush_dcache_mmap_unlock(vp->mapping); 519 } 520 521 if (vp->remove && vp->file) { 522 __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping); 523 if (vp->remove2) 524 __remove_shared_vm_struct(vp->remove2, vp->file, 525 vp->mapping); 526 } else if (vp->insert) { 527 /* 528 * split_vma has split insert from vma, and needs 529 * us to insert it before dropping the locks 530 * (it may either follow vma or precede it). 531 */ 532 vma_iter_store(vmi, vp->insert); 533 mm->map_count++; 534 } 535 536 if (vp->anon_vma) { 537 anon_vma_interval_tree_post_update_vma(vp->vma); 538 if (vp->adj_next) 539 anon_vma_interval_tree_post_update_vma(vp->adj_next); 540 anon_vma_unlock_write(vp->anon_vma); 541 } 542 543 if (vp->file) { 544 i_mmap_unlock_write(vp->mapping); 545 uprobe_mmap(vp->vma); 546 547 if (vp->adj_next) 548 uprobe_mmap(vp->adj_next); 549 } 550 551 if (vp->remove) { 552 again: 553 vma_mark_detached(vp->remove, true); 554 if (vp->file) { 555 uprobe_munmap(vp->remove, vp->remove->vm_start, 556 vp->remove->vm_end); 557 fput(vp->file); 558 } 559 if (vp->remove->anon_vma) 560 anon_vma_merge(vp->vma, vp->remove); 561 mm->map_count--; 562 mpol_put(vma_policy(vp->remove)); 563 if (!vp->remove2) 564 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 565 vm_area_free(vp->remove); 566 567 /* 568 * In mprotect's case 6 (see comments on vma_merge), 569 * we are removing both mid and next vmas 570 */ 571 if (vp->remove2) { 572 vp->remove = vp->remove2; 573 vp->remove2 = NULL; 574 goto again; 575 } 576 } 577 if (vp->insert && vp->file) 578 uprobe_mmap(vp->insert); 579 validate_mm(mm); 580 } 581 582 /* 583 * dup_anon_vma() - Helper function to duplicate anon_vma 584 * @dst: The destination VMA 585 * @src: The source VMA 586 * @dup: Pointer to the destination VMA when successful. 587 * 588 * Returns: 0 on success. 589 */ 590 static inline int dup_anon_vma(struct vm_area_struct *dst, 591 struct vm_area_struct *src, struct vm_area_struct **dup) 592 { 593 /* 594 * Easily overlooked: when mprotect shifts the boundary, make sure the 595 * expanding vma has anon_vma set if the shrinking vma had, to cover any 596 * anon pages imported. 597 */ 598 if (src->anon_vma && !dst->anon_vma) { 599 int ret; 600 601 vma_assert_write_locked(dst); 602 dst->anon_vma = src->anon_vma; 603 ret = anon_vma_clone(dst, src); 604 if (ret) 605 return ret; 606 607 *dup = dst; 608 } 609 610 return 0; 611 } 612 613 /* 614 * vma_expand - Expand an existing VMA 615 * 616 * @vmi: The vma iterator 617 * @vma: The vma to expand 618 * @start: The start of the vma 619 * @end: The exclusive end of the vma 620 * @pgoff: The page offset of vma 621 * @next: The current of next vma. 622 * 623 * Expand @vma to @start and @end. Can expand off the start and end. Will 624 * expand over @next if it's different from @vma and @end == @next->vm_end. 625 * Checking if the @vma can expand and merge with @next needs to be handled by 626 * the caller. 627 * 628 * Returns: 0 on success 629 */ 630 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, 631 unsigned long start, unsigned long end, pgoff_t pgoff, 632 struct vm_area_struct *next) 633 { 634 struct vm_area_struct *anon_dup = NULL; 635 bool remove_next = false; 636 struct vma_prepare vp; 637 638 vma_start_write(vma); 639 if (next && (vma != next) && (end == next->vm_end)) { 640 int ret; 641 642 remove_next = true; 643 vma_start_write(next); 644 ret = dup_anon_vma(vma, next, &anon_dup); 645 if (ret) 646 return ret; 647 } 648 649 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); 650 /* Not merging but overwriting any part of next is not handled. */ 651 VM_WARN_ON(next && !vp.remove && 652 next != vma && end > next->vm_start); 653 /* Only handles expanding */ 654 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end); 655 656 /* Note: vma iterator must be pointing to 'start' */ 657 vma_iter_config(vmi, start, end); 658 if (vma_iter_prealloc(vmi, vma)) 659 goto nomem; 660 661 vma_prepare(&vp); 662 vma_adjust_trans_huge(vma, start, end, 0); 663 vma->vm_start = start; 664 vma->vm_end = end; 665 vma->vm_pgoff = pgoff; 666 vma_iter_store(vmi, vma); 667 668 vma_complete(&vp, vmi, vma->vm_mm); 669 return 0; 670 671 nomem: 672 if (anon_dup) 673 unlink_anon_vmas(anon_dup); 674 return -ENOMEM; 675 } 676 677 /* 678 * vma_shrink() - Reduce an existing VMAs memory area 679 * @vmi: The vma iterator 680 * @vma: The VMA to modify 681 * @start: The new start 682 * @end: The new end 683 * 684 * Returns: 0 on success, -ENOMEM otherwise 685 */ 686 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 687 unsigned long start, unsigned long end, pgoff_t pgoff) 688 { 689 struct vma_prepare vp; 690 691 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 692 693 if (vma->vm_start < start) 694 vma_iter_config(vmi, vma->vm_start, start); 695 else 696 vma_iter_config(vmi, end, vma->vm_end); 697 698 if (vma_iter_prealloc(vmi, NULL)) 699 return -ENOMEM; 700 701 vma_start_write(vma); 702 703 init_vma_prep(&vp, vma); 704 vma_prepare(&vp); 705 vma_adjust_trans_huge(vma, start, end, 0); 706 707 vma_iter_clear(vmi); 708 vma->vm_start = start; 709 vma->vm_end = end; 710 vma->vm_pgoff = pgoff; 711 vma_complete(&vp, vmi, vma->vm_mm); 712 return 0; 713 } 714 715 /* 716 * If the vma has a ->close operation then the driver probably needs to release 717 * per-vma resources, so we don't attempt to merge those if the caller indicates 718 * the current vma may be removed as part of the merge. 719 */ 720 static inline bool is_mergeable_vma(struct vm_area_struct *vma, 721 struct file *file, unsigned long vm_flags, 722 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 723 struct anon_vma_name *anon_name, bool may_remove_vma) 724 { 725 /* 726 * VM_SOFTDIRTY should not prevent from VMA merging, if we 727 * match the flags but dirty bit -- the caller should mark 728 * merged VMA as dirty. If dirty bit won't be excluded from 729 * comparison, we increase pressure on the memory system forcing 730 * the kernel to generate new VMAs when old one could be 731 * extended instead. 732 */ 733 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 734 return false; 735 if (vma->vm_file != file) 736 return false; 737 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) 738 return false; 739 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) 740 return false; 741 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) 742 return false; 743 return true; 744 } 745 746 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 747 struct anon_vma *anon_vma2, struct vm_area_struct *vma) 748 { 749 /* 750 * The list_is_singular() test is to avoid merging VMA cloned from 751 * parents. This can improve scalability caused by anon_vma lock. 752 */ 753 if ((!anon_vma1 || !anon_vma2) && (!vma || 754 list_is_singular(&vma->anon_vma_chain))) 755 return true; 756 return anon_vma1 == anon_vma2; 757 } 758 759 /* 760 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 761 * in front of (at a lower virtual address and file offset than) the vma. 762 * 763 * We cannot merge two vmas if they have differently assigned (non-NULL) 764 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 765 * 766 * We don't check here for the merged mmap wrapping around the end of pagecache 767 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 768 * wrap, nor mmaps which cover the final page at index -1UL. 769 * 770 * We assume the vma may be removed as part of the merge. 771 */ 772 static bool 773 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 774 struct anon_vma *anon_vma, struct file *file, 775 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 776 struct anon_vma_name *anon_name) 777 { 778 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) && 779 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 780 if (vma->vm_pgoff == vm_pgoff) 781 return true; 782 } 783 return false; 784 } 785 786 /* 787 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 788 * beyond (at a higher virtual address and file offset than) the vma. 789 * 790 * We cannot merge two vmas if they have differently assigned (non-NULL) 791 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 792 * 793 * We assume that vma is not removed as part of the merge. 794 */ 795 static bool 796 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 797 struct anon_vma *anon_vma, struct file *file, 798 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 799 struct anon_vma_name *anon_name) 800 { 801 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) && 802 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 803 pgoff_t vm_pglen; 804 vm_pglen = vma_pages(vma); 805 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 806 return true; 807 } 808 return false; 809 } 810 811 /* 812 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 813 * figure out whether that can be merged with its predecessor or its 814 * successor. Or both (it neatly fills a hole). 815 * 816 * In most cases - when called for mmap, brk or mremap - [addr,end) is 817 * certain not to be mapped by the time vma_merge is called; but when 818 * called for mprotect, it is certain to be already mapped (either at 819 * an offset within prev, or at the start of next), and the flags of 820 * this area are about to be changed to vm_flags - and the no-change 821 * case has already been eliminated. 822 * 823 * The following mprotect cases have to be considered, where **** is 824 * the area passed down from mprotect_fixup, never extending beyond one 825 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts 826 * at the same address as **** and is of the same or larger span, and 827 * NNNN the next vma after ****: 828 * 829 * **** **** **** 830 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC 831 * cannot merge might become might become 832 * PPNNNNNNNNNN PPPPPPPPPPCC 833 * mmap, brk or case 4 below case 5 below 834 * mremap move: 835 * **** **** 836 * PPPP NNNN PPPPCCCCNNNN 837 * might become might become 838 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 839 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or 840 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 841 * 842 * It is important for case 8 that the vma CCCC overlapping the 843 * region **** is never going to extended over NNNN. Instead NNNN must 844 * be extended in region **** and CCCC must be removed. This way in 845 * all cases where vma_merge succeeds, the moment vma_merge drops the 846 * rmap_locks, the properties of the merged vma will be already 847 * correct for the whole merged range. Some of those properties like 848 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 849 * be correct for the whole merged range immediately after the 850 * rmap_locks are released. Otherwise if NNNN would be removed and 851 * CCCC would be extended over the NNNN range, remove_migration_ptes 852 * or other rmap walkers (if working on addresses beyond the "end" 853 * parameter) may establish ptes with the wrong permissions of CCCC 854 * instead of the right permissions of NNNN. 855 * 856 * In the code below: 857 * PPPP is represented by *prev 858 * CCCC is represented by *curr or not represented at all (NULL) 859 * NNNN is represented by *next or not represented at all (NULL) 860 * **** is not represented - it will be merged and the vma containing the 861 * area is returned, or the function will return NULL 862 */ 863 struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, 864 struct vm_area_struct *prev, unsigned long addr, 865 unsigned long end, unsigned long vm_flags, 866 struct anon_vma *anon_vma, struct file *file, 867 pgoff_t pgoff, struct mempolicy *policy, 868 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 869 struct anon_vma_name *anon_name) 870 { 871 struct vm_area_struct *curr, *next, *res; 872 struct vm_area_struct *vma, *adjust, *remove, *remove2; 873 struct vm_area_struct *anon_dup = NULL; 874 struct vma_prepare vp; 875 pgoff_t vma_pgoff; 876 int err = 0; 877 bool merge_prev = false; 878 bool merge_next = false; 879 bool vma_expanded = false; 880 unsigned long vma_start = addr; 881 unsigned long vma_end = end; 882 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 883 long adj_start = 0; 884 885 /* 886 * We later require that vma->vm_flags == vm_flags, 887 * so this tests vma->vm_flags & VM_SPECIAL, too. 888 */ 889 if (vm_flags & VM_SPECIAL) 890 return NULL; 891 892 /* Does the input range span an existing VMA? (cases 5 - 8) */ 893 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); 894 895 if (!curr || /* cases 1 - 4 */ 896 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ 897 next = vma_lookup(mm, end); 898 else 899 next = NULL; /* case 5 */ 900 901 if (prev) { 902 vma_start = prev->vm_start; 903 vma_pgoff = prev->vm_pgoff; 904 905 /* Can we merge the predecessor? */ 906 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy) 907 && can_vma_merge_after(prev, vm_flags, anon_vma, file, 908 pgoff, vm_userfaultfd_ctx, anon_name)) { 909 merge_prev = true; 910 vma_prev(vmi); 911 } 912 } 913 914 /* Can we merge the successor? */ 915 if (next && mpol_equal(policy, vma_policy(next)) && 916 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, 917 vm_userfaultfd_ctx, anon_name)) { 918 merge_next = true; 919 } 920 921 /* Verify some invariant that must be enforced by the caller. */ 922 VM_WARN_ON(prev && addr <= prev->vm_start); 923 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); 924 VM_WARN_ON(addr >= end); 925 926 if (!merge_prev && !merge_next) 927 return NULL; /* Not mergeable. */ 928 929 if (merge_prev) 930 vma_start_write(prev); 931 932 res = vma = prev; 933 remove = remove2 = adjust = NULL; 934 935 /* Can we merge both the predecessor and the successor? */ 936 if (merge_prev && merge_next && 937 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 938 vma_start_write(next); 939 remove = next; /* case 1 */ 940 vma_end = next->vm_end; 941 err = dup_anon_vma(prev, next, &anon_dup); 942 if (curr) { /* case 6 */ 943 vma_start_write(curr); 944 remove = curr; 945 remove2 = next; 946 /* 947 * Note that the dup_anon_vma below cannot overwrite err 948 * since the first caller would do nothing unless next 949 * has an anon_vma. 950 */ 951 if (!next->anon_vma) 952 err = dup_anon_vma(prev, curr, &anon_dup); 953 } 954 } else if (merge_prev) { /* case 2 */ 955 if (curr) { 956 vma_start_write(curr); 957 err = dup_anon_vma(prev, curr, &anon_dup); 958 if (end == curr->vm_end) { /* case 7 */ 959 remove = curr; 960 } else { /* case 5 */ 961 adjust = curr; 962 adj_start = (end - curr->vm_start); 963 } 964 } 965 } else { /* merge_next */ 966 vma_start_write(next); 967 res = next; 968 if (prev && addr < prev->vm_end) { /* case 4 */ 969 vma_start_write(prev); 970 vma_end = addr; 971 adjust = next; 972 adj_start = -(prev->vm_end - addr); 973 err = dup_anon_vma(next, prev, &anon_dup); 974 } else { 975 /* 976 * Note that cases 3 and 8 are the ONLY ones where prev 977 * is permitted to be (but is not necessarily) NULL. 978 */ 979 vma = next; /* case 3 */ 980 vma_start = addr; 981 vma_end = next->vm_end; 982 vma_pgoff = next->vm_pgoff - pglen; 983 if (curr) { /* case 8 */ 984 vma_pgoff = curr->vm_pgoff; 985 vma_start_write(curr); 986 remove = curr; 987 err = dup_anon_vma(next, curr, &anon_dup); 988 } 989 } 990 } 991 992 /* Error in anon_vma clone. */ 993 if (err) 994 goto anon_vma_fail; 995 996 if (vma_start < vma->vm_start || vma_end > vma->vm_end) 997 vma_expanded = true; 998 999 if (vma_expanded) { 1000 vma_iter_config(vmi, vma_start, vma_end); 1001 } else { 1002 vma_iter_config(vmi, adjust->vm_start + adj_start, 1003 adjust->vm_end); 1004 } 1005 1006 if (vma_iter_prealloc(vmi, vma)) 1007 goto prealloc_fail; 1008 1009 init_multi_vma_prep(&vp, vma, adjust, remove, remove2); 1010 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 1011 vp.anon_vma != adjust->anon_vma); 1012 1013 vma_prepare(&vp); 1014 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); 1015 1016 vma->vm_start = vma_start; 1017 vma->vm_end = vma_end; 1018 vma->vm_pgoff = vma_pgoff; 1019 1020 if (vma_expanded) 1021 vma_iter_store(vmi, vma); 1022 1023 if (adj_start) { 1024 adjust->vm_start += adj_start; 1025 adjust->vm_pgoff += adj_start >> PAGE_SHIFT; 1026 if (adj_start < 0) { 1027 WARN_ON(vma_expanded); 1028 vma_iter_store(vmi, next); 1029 } 1030 } 1031 1032 vma_complete(&vp, vmi, mm); 1033 khugepaged_enter_vma(res, vm_flags); 1034 return res; 1035 1036 prealloc_fail: 1037 if (anon_dup) 1038 unlink_anon_vmas(anon_dup); 1039 1040 anon_vma_fail: 1041 vma_iter_set(vmi, addr); 1042 vma_iter_load(vmi); 1043 return NULL; 1044 } 1045 1046 /* 1047 * Rough compatibility check to quickly see if it's even worth looking 1048 * at sharing an anon_vma. 1049 * 1050 * They need to have the same vm_file, and the flags can only differ 1051 * in things that mprotect may change. 1052 * 1053 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1054 * we can merge the two vma's. For example, we refuse to merge a vma if 1055 * there is a vm_ops->close() function, because that indicates that the 1056 * driver is doing some kind of reference counting. But that doesn't 1057 * really matter for the anon_vma sharing case. 1058 */ 1059 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1060 { 1061 return a->vm_end == b->vm_start && 1062 mpol_equal(vma_policy(a), vma_policy(b)) && 1063 a->vm_file == b->vm_file && 1064 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1065 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1066 } 1067 1068 /* 1069 * Do some basic sanity checking to see if we can re-use the anon_vma 1070 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1071 * the same as 'old', the other will be the new one that is trying 1072 * to share the anon_vma. 1073 * 1074 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1075 * the anon_vma of 'old' is concurrently in the process of being set up 1076 * by another page fault trying to merge _that_. But that's ok: if it 1077 * is being set up, that automatically means that it will be a singleton 1078 * acceptable for merging, so we can do all of this optimistically. But 1079 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1080 * 1081 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1082 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1083 * is to return an anon_vma that is "complex" due to having gone through 1084 * a fork). 1085 * 1086 * We also make sure that the two vma's are compatible (adjacent, 1087 * and with the same memory policies). That's all stable, even with just 1088 * a read lock on the mmap_lock. 1089 */ 1090 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1091 { 1092 if (anon_vma_compatible(a, b)) { 1093 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1094 1095 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1096 return anon_vma; 1097 } 1098 return NULL; 1099 } 1100 1101 /* 1102 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1103 * neighbouring vmas for a suitable anon_vma, before it goes off 1104 * to allocate a new anon_vma. It checks because a repetitive 1105 * sequence of mprotects and faults may otherwise lead to distinct 1106 * anon_vmas being allocated, preventing vma merge in subsequent 1107 * mprotect. 1108 */ 1109 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1110 { 1111 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); 1112 struct anon_vma *anon_vma = NULL; 1113 struct vm_area_struct *prev, *next; 1114 1115 /* Try next first. */ 1116 next = mas_walk(&mas); 1117 if (next) { 1118 anon_vma = reusable_anon_vma(next, vma, next); 1119 if (anon_vma) 1120 return anon_vma; 1121 } 1122 1123 prev = mas_prev(&mas, 0); 1124 VM_BUG_ON_VMA(prev != vma, vma); 1125 prev = mas_prev(&mas, 0); 1126 /* Try prev next. */ 1127 if (prev) 1128 anon_vma = reusable_anon_vma(prev, prev, vma); 1129 1130 /* 1131 * We might reach here with anon_vma == NULL if we can't find 1132 * any reusable anon_vma. 1133 * There's no absolute need to look only at touching neighbours: 1134 * we could search further afield for "compatible" anon_vmas. 1135 * But it would probably just be a waste of time searching, 1136 * or lead to too many vmas hanging off the same anon_vma. 1137 * We're trying to allow mprotect remerging later on, 1138 * not trying to minimize memory used for anon_vmas. 1139 */ 1140 return anon_vma; 1141 } 1142 1143 /* 1144 * If a hint addr is less than mmap_min_addr change hint to be as 1145 * low as possible but still greater than mmap_min_addr 1146 */ 1147 static inline unsigned long round_hint_to_min(unsigned long hint) 1148 { 1149 hint &= PAGE_MASK; 1150 if (((void *)hint != NULL) && 1151 (hint < mmap_min_addr)) 1152 return PAGE_ALIGN(mmap_min_addr); 1153 return hint; 1154 } 1155 1156 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 1157 unsigned long bytes) 1158 { 1159 unsigned long locked_pages, limit_pages; 1160 1161 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1162 return true; 1163 1164 locked_pages = bytes >> PAGE_SHIFT; 1165 locked_pages += mm->locked_vm; 1166 1167 limit_pages = rlimit(RLIMIT_MEMLOCK); 1168 limit_pages >>= PAGE_SHIFT; 1169 1170 return locked_pages <= limit_pages; 1171 } 1172 1173 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 1174 { 1175 if (S_ISREG(inode->i_mode)) 1176 return MAX_LFS_FILESIZE; 1177 1178 if (S_ISBLK(inode->i_mode)) 1179 return MAX_LFS_FILESIZE; 1180 1181 if (S_ISSOCK(inode->i_mode)) 1182 return MAX_LFS_FILESIZE; 1183 1184 /* Special "we do even unsigned file positions" case */ 1185 if (file->f_mode & FMODE_UNSIGNED_OFFSET) 1186 return 0; 1187 1188 /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 1189 return ULONG_MAX; 1190 } 1191 1192 static inline bool file_mmap_ok(struct file *file, struct inode *inode, 1193 unsigned long pgoff, unsigned long len) 1194 { 1195 u64 maxsize = file_mmap_size_max(file, inode); 1196 1197 if (maxsize && len > maxsize) 1198 return false; 1199 maxsize -= len; 1200 if (pgoff > maxsize >> PAGE_SHIFT) 1201 return false; 1202 return true; 1203 } 1204 1205 /* 1206 * The caller must write-lock current->mm->mmap_lock. 1207 */ 1208 unsigned long do_mmap(struct file *file, unsigned long addr, 1209 unsigned long len, unsigned long prot, 1210 unsigned long flags, vm_flags_t vm_flags, 1211 unsigned long pgoff, unsigned long *populate, 1212 struct list_head *uf) 1213 { 1214 struct mm_struct *mm = current->mm; 1215 int pkey = 0; 1216 1217 *populate = 0; 1218 1219 if (!len) 1220 return -EINVAL; 1221 1222 /* 1223 * Does the application expect PROT_READ to imply PROT_EXEC? 1224 * 1225 * (the exception is when the underlying filesystem is noexec 1226 * mounted, in which case we dont add PROT_EXEC.) 1227 */ 1228 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 1229 if (!(file && path_noexec(&file->f_path))) 1230 prot |= PROT_EXEC; 1231 1232 /* force arch specific MAP_FIXED handling in get_unmapped_area */ 1233 if (flags & MAP_FIXED_NOREPLACE) 1234 flags |= MAP_FIXED; 1235 1236 if (!(flags & MAP_FIXED)) 1237 addr = round_hint_to_min(addr); 1238 1239 /* Careful about overflows.. */ 1240 len = PAGE_ALIGN(len); 1241 if (!len) 1242 return -ENOMEM; 1243 1244 /* offset overflow? */ 1245 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 1246 return -EOVERFLOW; 1247 1248 /* Too many mappings? */ 1249 if (mm->map_count > sysctl_max_map_count) 1250 return -ENOMEM; 1251 1252 /* Obtain the address to map to. we verify (or select) it and ensure 1253 * that it represents a valid section of the address space. 1254 */ 1255 addr = get_unmapped_area(file, addr, len, pgoff, flags); 1256 if (IS_ERR_VALUE(addr)) 1257 return addr; 1258 1259 if (flags & MAP_FIXED_NOREPLACE) { 1260 if (find_vma_intersection(mm, addr, addr + len)) 1261 return -EEXIST; 1262 } 1263 1264 if (prot == PROT_EXEC) { 1265 pkey = execute_only_pkey(mm); 1266 if (pkey < 0) 1267 pkey = 0; 1268 } 1269 1270 /* Do simple checking here so the lower-level routines won't have 1271 * to. we assume access permissions have been handled by the open 1272 * of the memory object, so we don't do any here. 1273 */ 1274 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 1275 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1276 1277 if (flags & MAP_LOCKED) 1278 if (!can_do_mlock()) 1279 return -EPERM; 1280 1281 if (!mlock_future_ok(mm, vm_flags, len)) 1282 return -EAGAIN; 1283 1284 if (file) { 1285 struct inode *inode = file_inode(file); 1286 unsigned long flags_mask; 1287 1288 if (!file_mmap_ok(file, inode, pgoff, len)) 1289 return -EOVERFLOW; 1290 1291 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; 1292 1293 switch (flags & MAP_TYPE) { 1294 case MAP_SHARED: 1295 /* 1296 * Force use of MAP_SHARED_VALIDATE with non-legacy 1297 * flags. E.g. MAP_SYNC is dangerous to use with 1298 * MAP_SHARED as you don't know which consistency model 1299 * you will get. We silently ignore unsupported flags 1300 * with MAP_SHARED to preserve backward compatibility. 1301 */ 1302 flags &= LEGACY_MAP_MASK; 1303 fallthrough; 1304 case MAP_SHARED_VALIDATE: 1305 if (flags & ~flags_mask) 1306 return -EOPNOTSUPP; 1307 if (prot & PROT_WRITE) { 1308 if (!(file->f_mode & FMODE_WRITE)) 1309 return -EACCES; 1310 if (IS_SWAPFILE(file->f_mapping->host)) 1311 return -ETXTBSY; 1312 } 1313 1314 /* 1315 * Make sure we don't allow writing to an append-only 1316 * file.. 1317 */ 1318 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 1319 return -EACCES; 1320 1321 vm_flags |= VM_SHARED | VM_MAYSHARE; 1322 if (!(file->f_mode & FMODE_WRITE)) 1323 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 1324 fallthrough; 1325 case MAP_PRIVATE: 1326 if (!(file->f_mode & FMODE_READ)) 1327 return -EACCES; 1328 if (path_noexec(&file->f_path)) { 1329 if (vm_flags & VM_EXEC) 1330 return -EPERM; 1331 vm_flags &= ~VM_MAYEXEC; 1332 } 1333 1334 if (!file->f_op->mmap) 1335 return -ENODEV; 1336 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1337 return -EINVAL; 1338 break; 1339 1340 default: 1341 return -EINVAL; 1342 } 1343 } else { 1344 switch (flags & MAP_TYPE) { 1345 case MAP_SHARED: 1346 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1347 return -EINVAL; 1348 /* 1349 * Ignore pgoff. 1350 */ 1351 pgoff = 0; 1352 vm_flags |= VM_SHARED | VM_MAYSHARE; 1353 break; 1354 case MAP_PRIVATE: 1355 /* 1356 * Set pgoff according to addr for anon_vma. 1357 */ 1358 pgoff = addr >> PAGE_SHIFT; 1359 break; 1360 default: 1361 return -EINVAL; 1362 } 1363 } 1364 1365 /* 1366 * Set 'VM_NORESERVE' if we should not account for the 1367 * memory use of this mapping. 1368 */ 1369 if (flags & MAP_NORESERVE) { 1370 /* We honor MAP_NORESERVE if allowed to overcommit */ 1371 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1372 vm_flags |= VM_NORESERVE; 1373 1374 /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 1375 if (file && is_file_hugepages(file)) 1376 vm_flags |= VM_NORESERVE; 1377 } 1378 1379 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 1380 if (!IS_ERR_VALUE(addr) && 1381 ((vm_flags & VM_LOCKED) || 1382 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 1383 *populate = len; 1384 return addr; 1385 } 1386 1387 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 1388 unsigned long prot, unsigned long flags, 1389 unsigned long fd, unsigned long pgoff) 1390 { 1391 struct file *file = NULL; 1392 unsigned long retval; 1393 1394 if (!(flags & MAP_ANONYMOUS)) { 1395 audit_mmap_fd(fd, flags); 1396 file = fget(fd); 1397 if (!file) 1398 return -EBADF; 1399 if (is_file_hugepages(file)) { 1400 len = ALIGN(len, huge_page_size(hstate_file(file))); 1401 } else if (unlikely(flags & MAP_HUGETLB)) { 1402 retval = -EINVAL; 1403 goto out_fput; 1404 } 1405 } else if (flags & MAP_HUGETLB) { 1406 struct hstate *hs; 1407 1408 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1409 if (!hs) 1410 return -EINVAL; 1411 1412 len = ALIGN(len, huge_page_size(hs)); 1413 /* 1414 * VM_NORESERVE is used because the reservations will be 1415 * taken when vm_ops->mmap() is called 1416 */ 1417 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 1418 VM_NORESERVE, 1419 HUGETLB_ANONHUGE_INODE, 1420 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1421 if (IS_ERR(file)) 1422 return PTR_ERR(file); 1423 } 1424 1425 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1426 out_fput: 1427 if (file) 1428 fput(file); 1429 return retval; 1430 } 1431 1432 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1433 unsigned long, prot, unsigned long, flags, 1434 unsigned long, fd, unsigned long, pgoff) 1435 { 1436 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 1437 } 1438 1439 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1440 struct mmap_arg_struct { 1441 unsigned long addr; 1442 unsigned long len; 1443 unsigned long prot; 1444 unsigned long flags; 1445 unsigned long fd; 1446 unsigned long offset; 1447 }; 1448 1449 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1450 { 1451 struct mmap_arg_struct a; 1452 1453 if (copy_from_user(&a, arg, sizeof(a))) 1454 return -EFAULT; 1455 if (offset_in_page(a.offset)) 1456 return -EINVAL; 1457 1458 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1459 a.offset >> PAGE_SHIFT); 1460 } 1461 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1462 1463 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 1464 { 1465 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 1466 } 1467 1468 static bool vma_is_shared_writable(struct vm_area_struct *vma) 1469 { 1470 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 1471 (VM_WRITE | VM_SHARED); 1472 } 1473 1474 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 1475 { 1476 /* No managed pages to writeback. */ 1477 if (vma->vm_flags & VM_PFNMAP) 1478 return false; 1479 1480 return vma->vm_file && vma->vm_file->f_mapping && 1481 mapping_can_writeback(vma->vm_file->f_mapping); 1482 } 1483 1484 /* 1485 * Does this VMA require the underlying folios to have their dirty state 1486 * tracked? 1487 */ 1488 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 1489 { 1490 /* Only shared, writable VMAs require dirty tracking. */ 1491 if (!vma_is_shared_writable(vma)) 1492 return false; 1493 1494 /* Does the filesystem need to be notified? */ 1495 if (vm_ops_needs_writenotify(vma->vm_ops)) 1496 return true; 1497 1498 /* 1499 * Even if the filesystem doesn't indicate a need for writenotify, if it 1500 * can writeback, dirty tracking is still required. 1501 */ 1502 return vma_fs_can_writeback(vma); 1503 } 1504 1505 /* 1506 * Some shared mappings will want the pages marked read-only 1507 * to track write events. If so, we'll downgrade vm_page_prot 1508 * to the private version (using protection_map[] without the 1509 * VM_SHARED bit). 1510 */ 1511 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1512 { 1513 /* If it was private or non-writable, the write bit is already clear */ 1514 if (!vma_is_shared_writable(vma)) 1515 return 0; 1516 1517 /* The backer wishes to know when pages are first written to? */ 1518 if (vm_ops_needs_writenotify(vma->vm_ops)) 1519 return 1; 1520 1521 /* The open routine did something to the protections that pgprot_modify 1522 * won't preserve? */ 1523 if (pgprot_val(vm_page_prot) != 1524 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 1525 return 0; 1526 1527 /* 1528 * Do we need to track softdirty? hugetlb does not support softdirty 1529 * tracking yet. 1530 */ 1531 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1532 return 1; 1533 1534 /* Do we need write faults for uffd-wp tracking? */ 1535 if (userfaultfd_wp(vma)) 1536 return 1; 1537 1538 /* Can the mapping track the dirty pages? */ 1539 return vma_fs_can_writeback(vma); 1540 } 1541 1542 /* 1543 * We account for memory if it's a private writeable mapping, 1544 * not hugepages and VM_NORESERVE wasn't set. 1545 */ 1546 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) 1547 { 1548 /* 1549 * hugetlb has its own accounting separate from the core VM 1550 * VM_HUGETLB may not be set yet so we cannot check for that flag. 1551 */ 1552 if (file && is_file_hugepages(file)) 1553 return 0; 1554 1555 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1556 } 1557 1558 /** 1559 * unmapped_area() - Find an area between the low_limit and the high_limit with 1560 * the correct alignment and offset, all from @info. Note: current->mm is used 1561 * for the search. 1562 * 1563 * @info: The unmapped area information including the range [low_limit - 1564 * high_limit), the alignment offset and mask. 1565 * 1566 * Return: A memory address or -ENOMEM. 1567 */ 1568 static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 1569 { 1570 unsigned long length, gap; 1571 unsigned long low_limit, high_limit; 1572 struct vm_area_struct *tmp; 1573 1574 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); 1575 1576 /* Adjust search length to account for worst case alignment overhead */ 1577 length = info->length + info->align_mask; 1578 if (length < info->length) 1579 return -ENOMEM; 1580 1581 low_limit = info->low_limit; 1582 if (low_limit < mmap_min_addr) 1583 low_limit = mmap_min_addr; 1584 high_limit = info->high_limit; 1585 retry: 1586 if (mas_empty_area(&mas, low_limit, high_limit - 1, length)) 1587 return -ENOMEM; 1588 1589 gap = mas.index; 1590 gap += (info->align_offset - gap) & info->align_mask; 1591 tmp = mas_next(&mas, ULONG_MAX); 1592 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 1593 if (vm_start_gap(tmp) < gap + length - 1) { 1594 low_limit = tmp->vm_end; 1595 mas_reset(&mas); 1596 goto retry; 1597 } 1598 } else { 1599 tmp = mas_prev(&mas, 0); 1600 if (tmp && vm_end_gap(tmp) > gap) { 1601 low_limit = vm_end_gap(tmp); 1602 mas_reset(&mas); 1603 goto retry; 1604 } 1605 } 1606 1607 return gap; 1608 } 1609 1610 /** 1611 * unmapped_area_topdown() - Find an area between the low_limit and the 1612 * high_limit with the correct alignment and offset at the highest available 1613 * address, all from @info. Note: current->mm is used for the search. 1614 * 1615 * @info: The unmapped area information including the range [low_limit - 1616 * high_limit), the alignment offset and mask. 1617 * 1618 * Return: A memory address or -ENOMEM. 1619 */ 1620 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 1621 { 1622 unsigned long length, gap, gap_end; 1623 unsigned long low_limit, high_limit; 1624 struct vm_area_struct *tmp; 1625 1626 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0); 1627 /* Adjust search length to account for worst case alignment overhead */ 1628 length = info->length + info->align_mask; 1629 if (length < info->length) 1630 return -ENOMEM; 1631 1632 low_limit = info->low_limit; 1633 if (low_limit < mmap_min_addr) 1634 low_limit = mmap_min_addr; 1635 high_limit = info->high_limit; 1636 retry: 1637 if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length)) 1638 return -ENOMEM; 1639 1640 gap = mas.last + 1 - info->length; 1641 gap -= (gap - info->align_offset) & info->align_mask; 1642 gap_end = mas.last; 1643 tmp = mas_next(&mas, ULONG_MAX); 1644 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 1645 if (vm_start_gap(tmp) <= gap_end) { 1646 high_limit = vm_start_gap(tmp); 1647 mas_reset(&mas); 1648 goto retry; 1649 } 1650 } else { 1651 tmp = mas_prev(&mas, 0); 1652 if (tmp && vm_end_gap(tmp) > gap) { 1653 high_limit = tmp->vm_start; 1654 mas_reset(&mas); 1655 goto retry; 1656 } 1657 } 1658 1659 return gap; 1660 } 1661 1662 /* 1663 * Search for an unmapped address range. 1664 * 1665 * We are looking for a range that: 1666 * - does not intersect with any VMA; 1667 * - is contained within the [low_limit, high_limit) interval; 1668 * - is at least the desired size. 1669 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 1670 */ 1671 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 1672 { 1673 unsigned long addr; 1674 1675 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 1676 addr = unmapped_area_topdown(info); 1677 else 1678 addr = unmapped_area(info); 1679 1680 trace_vm_unmapped_area(addr, info); 1681 return addr; 1682 } 1683 1684 /* Get an address range which is currently unmapped. 1685 * For shmat() with addr=0. 1686 * 1687 * Ugly calling convention alert: 1688 * Return value with the low bits set means error value, 1689 * ie 1690 * if (ret & ~PAGE_MASK) 1691 * error = ret; 1692 * 1693 * This function "knows" that -ENOMEM has the bits set. 1694 */ 1695 unsigned long 1696 generic_get_unmapped_area(struct file *filp, unsigned long addr, 1697 unsigned long len, unsigned long pgoff, 1698 unsigned long flags) 1699 { 1700 struct mm_struct *mm = current->mm; 1701 struct vm_area_struct *vma, *prev; 1702 struct vm_unmapped_area_info info; 1703 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 1704 1705 if (len > mmap_end - mmap_min_addr) 1706 return -ENOMEM; 1707 1708 if (flags & MAP_FIXED) 1709 return addr; 1710 1711 if (addr) { 1712 addr = PAGE_ALIGN(addr); 1713 vma = find_vma_prev(mm, addr, &prev); 1714 if (mmap_end - len >= addr && addr >= mmap_min_addr && 1715 (!vma || addr + len <= vm_start_gap(vma)) && 1716 (!prev || addr >= vm_end_gap(prev))) 1717 return addr; 1718 } 1719 1720 info.flags = 0; 1721 info.length = len; 1722 info.low_limit = mm->mmap_base; 1723 info.high_limit = mmap_end; 1724 info.align_mask = 0; 1725 info.align_offset = 0; 1726 return vm_unmapped_area(&info); 1727 } 1728 1729 #ifndef HAVE_ARCH_UNMAPPED_AREA 1730 unsigned long 1731 arch_get_unmapped_area(struct file *filp, unsigned long addr, 1732 unsigned long len, unsigned long pgoff, 1733 unsigned long flags) 1734 { 1735 return generic_get_unmapped_area(filp, addr, len, pgoff, flags); 1736 } 1737 #endif 1738 1739 /* 1740 * This mmap-allocator allocates new areas top-down from below the 1741 * stack's low limit (the base): 1742 */ 1743 unsigned long 1744 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 1745 unsigned long len, unsigned long pgoff, 1746 unsigned long flags) 1747 { 1748 struct vm_area_struct *vma, *prev; 1749 struct mm_struct *mm = current->mm; 1750 struct vm_unmapped_area_info info; 1751 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 1752 1753 /* requested length too big for entire address space */ 1754 if (len > mmap_end - mmap_min_addr) 1755 return -ENOMEM; 1756 1757 if (flags & MAP_FIXED) 1758 return addr; 1759 1760 /* requesting a specific address */ 1761 if (addr) { 1762 addr = PAGE_ALIGN(addr); 1763 vma = find_vma_prev(mm, addr, &prev); 1764 if (mmap_end - len >= addr && addr >= mmap_min_addr && 1765 (!vma || addr + len <= vm_start_gap(vma)) && 1766 (!prev || addr >= vm_end_gap(prev))) 1767 return addr; 1768 } 1769 1770 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 1771 info.length = len; 1772 info.low_limit = PAGE_SIZE; 1773 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 1774 info.align_mask = 0; 1775 info.align_offset = 0; 1776 addr = vm_unmapped_area(&info); 1777 1778 /* 1779 * A failed mmap() very likely causes application failure, 1780 * so fall back to the bottom-up function here. This scenario 1781 * can happen with large stack limits and large mmap() 1782 * allocations. 1783 */ 1784 if (offset_in_page(addr)) { 1785 VM_BUG_ON(addr != -ENOMEM); 1786 info.flags = 0; 1787 info.low_limit = TASK_UNMAPPED_BASE; 1788 info.high_limit = mmap_end; 1789 addr = vm_unmapped_area(&info); 1790 } 1791 1792 return addr; 1793 } 1794 1795 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1796 unsigned long 1797 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 1798 unsigned long len, unsigned long pgoff, 1799 unsigned long flags) 1800 { 1801 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 1802 } 1803 #endif 1804 1805 unsigned long 1806 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1807 unsigned long pgoff, unsigned long flags) 1808 { 1809 unsigned long (*get_area)(struct file *, unsigned long, 1810 unsigned long, unsigned long, unsigned long); 1811 1812 unsigned long error = arch_mmap_check(addr, len, flags); 1813 if (error) 1814 return error; 1815 1816 /* Careful about overflows.. */ 1817 if (len > TASK_SIZE) 1818 return -ENOMEM; 1819 1820 get_area = current->mm->get_unmapped_area; 1821 if (file) { 1822 if (file->f_op->get_unmapped_area) 1823 get_area = file->f_op->get_unmapped_area; 1824 } else if (flags & MAP_SHARED) { 1825 /* 1826 * mmap_region() will call shmem_zero_setup() to create a file, 1827 * so use shmem's get_unmapped_area in case it can be huge. 1828 * do_mmap() will clear pgoff, so match alignment. 1829 */ 1830 pgoff = 0; 1831 get_area = shmem_get_unmapped_area; 1832 } 1833 1834 addr = get_area(file, addr, len, pgoff, flags); 1835 if (IS_ERR_VALUE(addr)) 1836 return addr; 1837 1838 if (addr > TASK_SIZE - len) 1839 return -ENOMEM; 1840 if (offset_in_page(addr)) 1841 return -EINVAL; 1842 1843 error = security_mmap_addr(addr); 1844 return error ? error : addr; 1845 } 1846 1847 EXPORT_SYMBOL(get_unmapped_area); 1848 1849 /** 1850 * find_vma_intersection() - Look up the first VMA which intersects the interval 1851 * @mm: The process address space. 1852 * @start_addr: The inclusive start user address. 1853 * @end_addr: The exclusive end user address. 1854 * 1855 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes 1856 * start_addr < end_addr. 1857 */ 1858 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, 1859 unsigned long start_addr, 1860 unsigned long end_addr) 1861 { 1862 unsigned long index = start_addr; 1863 1864 mmap_assert_locked(mm); 1865 return mt_find(&mm->mm_mt, &index, end_addr - 1); 1866 } 1867 EXPORT_SYMBOL(find_vma_intersection); 1868 1869 /** 1870 * find_vma() - Find the VMA for a given address, or the next VMA. 1871 * @mm: The mm_struct to check 1872 * @addr: The address 1873 * 1874 * Returns: The VMA associated with addr, or the next VMA. 1875 * May return %NULL in the case of no VMA at addr or above. 1876 */ 1877 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 1878 { 1879 unsigned long index = addr; 1880 1881 mmap_assert_locked(mm); 1882 return mt_find(&mm->mm_mt, &index, ULONG_MAX); 1883 } 1884 EXPORT_SYMBOL(find_vma); 1885 1886 /** 1887 * find_vma_prev() - Find the VMA for a given address, or the next vma and 1888 * set %pprev to the previous VMA, if any. 1889 * @mm: The mm_struct to check 1890 * @addr: The address 1891 * @pprev: The pointer to set to the previous VMA 1892 * 1893 * Note that RCU lock is missing here since the external mmap_lock() is used 1894 * instead. 1895 * 1896 * Returns: The VMA associated with @addr, or the next vma. 1897 * May return %NULL in the case of no vma at addr or above. 1898 */ 1899 struct vm_area_struct * 1900 find_vma_prev(struct mm_struct *mm, unsigned long addr, 1901 struct vm_area_struct **pprev) 1902 { 1903 struct vm_area_struct *vma; 1904 MA_STATE(mas, &mm->mm_mt, addr, addr); 1905 1906 vma = mas_walk(&mas); 1907 *pprev = mas_prev(&mas, 0); 1908 if (!vma) 1909 vma = mas_next(&mas, ULONG_MAX); 1910 return vma; 1911 } 1912 1913 /* 1914 * Verify that the stack growth is acceptable and 1915 * update accounting. This is shared with both the 1916 * grow-up and grow-down cases. 1917 */ 1918 static int acct_stack_growth(struct vm_area_struct *vma, 1919 unsigned long size, unsigned long grow) 1920 { 1921 struct mm_struct *mm = vma->vm_mm; 1922 unsigned long new_start; 1923 1924 /* address space limit tests */ 1925 if (!may_expand_vm(mm, vma->vm_flags, grow)) 1926 return -ENOMEM; 1927 1928 /* Stack limit test */ 1929 if (size > rlimit(RLIMIT_STACK)) 1930 return -ENOMEM; 1931 1932 /* mlock limit tests */ 1933 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) 1934 return -ENOMEM; 1935 1936 /* Check to ensure the stack will not grow into a hugetlb-only region */ 1937 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 1938 vma->vm_end - size; 1939 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 1940 return -EFAULT; 1941 1942 /* 1943 * Overcommit.. This must be the final test, as it will 1944 * update security statistics. 1945 */ 1946 if (security_vm_enough_memory_mm(mm, grow)) 1947 return -ENOMEM; 1948 1949 return 0; 1950 } 1951 1952 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) 1953 /* 1954 * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 1955 * vma is the last one with address > vma->vm_end. Have to extend vma. 1956 */ 1957 static int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1958 { 1959 struct mm_struct *mm = vma->vm_mm; 1960 struct vm_area_struct *next; 1961 unsigned long gap_addr; 1962 int error = 0; 1963 MA_STATE(mas, &mm->mm_mt, vma->vm_start, address); 1964 1965 if (!(vma->vm_flags & VM_GROWSUP)) 1966 return -EFAULT; 1967 1968 /* Guard against exceeding limits of the address space. */ 1969 address &= PAGE_MASK; 1970 if (address >= (TASK_SIZE & PAGE_MASK)) 1971 return -ENOMEM; 1972 address += PAGE_SIZE; 1973 1974 /* Enforce stack_guard_gap */ 1975 gap_addr = address + stack_guard_gap; 1976 1977 /* Guard against overflow */ 1978 if (gap_addr < address || gap_addr > TASK_SIZE) 1979 gap_addr = TASK_SIZE; 1980 1981 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 1982 if (next && vma_is_accessible(next)) { 1983 if (!(next->vm_flags & VM_GROWSUP)) 1984 return -ENOMEM; 1985 /* Check that both stack segments have the same anon_vma? */ 1986 } 1987 1988 if (next) 1989 mas_prev_range(&mas, address); 1990 1991 __mas_set_range(&mas, vma->vm_start, address - 1); 1992 if (mas_preallocate(&mas, vma, GFP_KERNEL)) 1993 return -ENOMEM; 1994 1995 /* We must make sure the anon_vma is allocated. */ 1996 if (unlikely(anon_vma_prepare(vma))) { 1997 mas_destroy(&mas); 1998 return -ENOMEM; 1999 } 2000 2001 /* Lock the VMA before expanding to prevent concurrent page faults */ 2002 vma_start_write(vma); 2003 /* 2004 * vma->vm_start/vm_end cannot change under us because the caller 2005 * is required to hold the mmap_lock in read mode. We need the 2006 * anon_vma lock to serialize against concurrent expand_stacks. 2007 */ 2008 anon_vma_lock_write(vma->anon_vma); 2009 2010 /* Somebody else might have raced and expanded it already */ 2011 if (address > vma->vm_end) { 2012 unsigned long size, grow; 2013 2014 size = address - vma->vm_start; 2015 grow = (address - vma->vm_end) >> PAGE_SHIFT; 2016 2017 error = -ENOMEM; 2018 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 2019 error = acct_stack_growth(vma, size, grow); 2020 if (!error) { 2021 /* 2022 * We only hold a shared mmap_lock lock here, so 2023 * we need to protect against concurrent vma 2024 * expansions. anon_vma_lock_write() doesn't 2025 * help here, as we don't guarantee that all 2026 * growable vmas in a mm share the same root 2027 * anon vma. So, we reuse mm->page_table_lock 2028 * to guard against concurrent vma expansions. 2029 */ 2030 spin_lock(&mm->page_table_lock); 2031 if (vma->vm_flags & VM_LOCKED) 2032 mm->locked_vm += grow; 2033 vm_stat_account(mm, vma->vm_flags, grow); 2034 anon_vma_interval_tree_pre_update_vma(vma); 2035 vma->vm_end = address; 2036 /* Overwrite old entry in mtree. */ 2037 mas_store_prealloc(&mas, vma); 2038 anon_vma_interval_tree_post_update_vma(vma); 2039 spin_unlock(&mm->page_table_lock); 2040 2041 perf_event_mmap(vma); 2042 } 2043 } 2044 } 2045 anon_vma_unlock_write(vma->anon_vma); 2046 khugepaged_enter_vma(vma, vma->vm_flags); 2047 mas_destroy(&mas); 2048 validate_mm(mm); 2049 return error; 2050 } 2051 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 2052 2053 /* 2054 * vma is the first one with address < vma->vm_start. Have to extend vma. 2055 * mmap_lock held for writing. 2056 */ 2057 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 2058 { 2059 struct mm_struct *mm = vma->vm_mm; 2060 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); 2061 struct vm_area_struct *prev; 2062 int error = 0; 2063 2064 if (!(vma->vm_flags & VM_GROWSDOWN)) 2065 return -EFAULT; 2066 2067 address &= PAGE_MASK; 2068 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 2069 return -EPERM; 2070 2071 /* Enforce stack_guard_gap */ 2072 prev = mas_prev(&mas, 0); 2073 /* Check that both stack segments have the same anon_vma? */ 2074 if (prev) { 2075 if (!(prev->vm_flags & VM_GROWSDOWN) && 2076 vma_is_accessible(prev) && 2077 (address - prev->vm_end < stack_guard_gap)) 2078 return -ENOMEM; 2079 } 2080 2081 if (prev) 2082 mas_next_range(&mas, vma->vm_start); 2083 2084 __mas_set_range(&mas, address, vma->vm_end - 1); 2085 if (mas_preallocate(&mas, vma, GFP_KERNEL)) 2086 return -ENOMEM; 2087 2088 /* We must make sure the anon_vma is allocated. */ 2089 if (unlikely(anon_vma_prepare(vma))) { 2090 mas_destroy(&mas); 2091 return -ENOMEM; 2092 } 2093 2094 /* Lock the VMA before expanding to prevent concurrent page faults */ 2095 vma_start_write(vma); 2096 /* 2097 * vma->vm_start/vm_end cannot change under us because the caller 2098 * is required to hold the mmap_lock in read mode. We need the 2099 * anon_vma lock to serialize against concurrent expand_stacks. 2100 */ 2101 anon_vma_lock_write(vma->anon_vma); 2102 2103 /* Somebody else might have raced and expanded it already */ 2104 if (address < vma->vm_start) { 2105 unsigned long size, grow; 2106 2107 size = vma->vm_end - address; 2108 grow = (vma->vm_start - address) >> PAGE_SHIFT; 2109 2110 error = -ENOMEM; 2111 if (grow <= vma->vm_pgoff) { 2112 error = acct_stack_growth(vma, size, grow); 2113 if (!error) { 2114 /* 2115 * We only hold a shared mmap_lock lock here, so 2116 * we need to protect against concurrent vma 2117 * expansions. anon_vma_lock_write() doesn't 2118 * help here, as we don't guarantee that all 2119 * growable vmas in a mm share the same root 2120 * anon vma. So, we reuse mm->page_table_lock 2121 * to guard against concurrent vma expansions. 2122 */ 2123 spin_lock(&mm->page_table_lock); 2124 if (vma->vm_flags & VM_LOCKED) 2125 mm->locked_vm += grow; 2126 vm_stat_account(mm, vma->vm_flags, grow); 2127 anon_vma_interval_tree_pre_update_vma(vma); 2128 vma->vm_start = address; 2129 vma->vm_pgoff -= grow; 2130 /* Overwrite old entry in mtree. */ 2131 mas_store_prealloc(&mas, vma); 2132 anon_vma_interval_tree_post_update_vma(vma); 2133 spin_unlock(&mm->page_table_lock); 2134 2135 perf_event_mmap(vma); 2136 } 2137 } 2138 } 2139 anon_vma_unlock_write(vma->anon_vma); 2140 khugepaged_enter_vma(vma, vma->vm_flags); 2141 mas_destroy(&mas); 2142 validate_mm(mm); 2143 return error; 2144 } 2145 2146 /* enforced gap between the expanding stack and other mappings. */ 2147 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 2148 2149 static int __init cmdline_parse_stack_guard_gap(char *p) 2150 { 2151 unsigned long val; 2152 char *endptr; 2153 2154 val = simple_strtoul(p, &endptr, 10); 2155 if (!*endptr) 2156 stack_guard_gap = val << PAGE_SHIFT; 2157 2158 return 1; 2159 } 2160 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 2161 2162 #ifdef CONFIG_STACK_GROWSUP 2163 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 2164 { 2165 return expand_upwards(vma, address); 2166 } 2167 2168 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 2169 { 2170 struct vm_area_struct *vma, *prev; 2171 2172 addr &= PAGE_MASK; 2173 vma = find_vma_prev(mm, addr, &prev); 2174 if (vma && (vma->vm_start <= addr)) 2175 return vma; 2176 if (!prev) 2177 return NULL; 2178 if (expand_stack_locked(prev, addr)) 2179 return NULL; 2180 if (prev->vm_flags & VM_LOCKED) 2181 populate_vma_page_range(prev, addr, prev->vm_end, NULL); 2182 return prev; 2183 } 2184 #else 2185 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) 2186 { 2187 return expand_downwards(vma, address); 2188 } 2189 2190 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) 2191 { 2192 struct vm_area_struct *vma; 2193 unsigned long start; 2194 2195 addr &= PAGE_MASK; 2196 vma = find_vma(mm, addr); 2197 if (!vma) 2198 return NULL; 2199 if (vma->vm_start <= addr) 2200 return vma; 2201 start = vma->vm_start; 2202 if (expand_stack_locked(vma, addr)) 2203 return NULL; 2204 if (vma->vm_flags & VM_LOCKED) 2205 populate_vma_page_range(vma, addr, start, NULL); 2206 return vma; 2207 } 2208 #endif 2209 2210 /* 2211 * IA64 has some horrid mapping rules: it can expand both up and down, 2212 * but with various special rules. 2213 * 2214 * We'll get rid of this architecture eventually, so the ugliness is 2215 * temporary. 2216 */ 2217 #ifdef CONFIG_IA64 2218 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr) 2219 { 2220 return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) && 2221 REGION_OFFSET(addr) < RGN_MAP_LIMIT; 2222 } 2223 2224 /* 2225 * IA64 stacks grow down, but there's a special register backing store 2226 * that can grow up. Only sequentially, though, so the new address must 2227 * match vm_end. 2228 */ 2229 static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr) 2230 { 2231 if (!vma_expand_ok(vma, addr)) 2232 return -EFAULT; 2233 if (vma->vm_end != (addr & PAGE_MASK)) 2234 return -EFAULT; 2235 return expand_upwards(vma, addr); 2236 } 2237 2238 static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr) 2239 { 2240 if (!vma_expand_ok(vma, addr)) 2241 return -EFAULT; 2242 return expand_downwards(vma, addr); 2243 } 2244 2245 #elif defined(CONFIG_STACK_GROWSUP) 2246 2247 #define vma_expand_up(vma,addr) expand_upwards(vma, addr) 2248 #define vma_expand_down(vma, addr) (-EFAULT) 2249 2250 #else 2251 2252 #define vma_expand_up(vma,addr) (-EFAULT) 2253 #define vma_expand_down(vma, addr) expand_downwards(vma, addr) 2254 2255 #endif 2256 2257 /* 2258 * expand_stack(): legacy interface for page faulting. Don't use unless 2259 * you have to. 2260 * 2261 * This is called with the mm locked for reading, drops the lock, takes 2262 * the lock for writing, tries to look up a vma again, expands it if 2263 * necessary, and downgrades the lock to reading again. 2264 * 2265 * If no vma is found or it can't be expanded, it returns NULL and has 2266 * dropped the lock. 2267 */ 2268 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) 2269 { 2270 struct vm_area_struct *vma, *prev; 2271 2272 mmap_read_unlock(mm); 2273 if (mmap_write_lock_killable(mm)) 2274 return NULL; 2275 2276 vma = find_vma_prev(mm, addr, &prev); 2277 if (vma && vma->vm_start <= addr) 2278 goto success; 2279 2280 if (prev && !vma_expand_up(prev, addr)) { 2281 vma = prev; 2282 goto success; 2283 } 2284 2285 if (vma && !vma_expand_down(vma, addr)) 2286 goto success; 2287 2288 mmap_write_unlock(mm); 2289 return NULL; 2290 2291 success: 2292 mmap_write_downgrade(mm); 2293 return vma; 2294 } 2295 2296 /* 2297 * Ok - we have the memory areas we should free on a maple tree so release them, 2298 * and do the vma updates. 2299 * 2300 * Called with the mm semaphore held. 2301 */ 2302 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) 2303 { 2304 unsigned long nr_accounted = 0; 2305 struct vm_area_struct *vma; 2306 2307 /* Update high watermark before we lower total_vm */ 2308 update_hiwater_vm(mm); 2309 mas_for_each(mas, vma, ULONG_MAX) { 2310 long nrpages = vma_pages(vma); 2311 2312 if (vma->vm_flags & VM_ACCOUNT) 2313 nr_accounted += nrpages; 2314 vm_stat_account(mm, vma->vm_flags, -nrpages); 2315 remove_vma(vma, false); 2316 } 2317 vm_unacct_memory(nr_accounted); 2318 } 2319 2320 /* 2321 * Get rid of page table information in the indicated region. 2322 * 2323 * Called with the mm semaphore held. 2324 */ 2325 static void unmap_region(struct mm_struct *mm, struct ma_state *mas, 2326 struct vm_area_struct *vma, struct vm_area_struct *prev, 2327 struct vm_area_struct *next, unsigned long start, 2328 unsigned long end, unsigned long tree_end, bool mm_wr_locked) 2329 { 2330 struct mmu_gather tlb; 2331 unsigned long mt_start = mas->index; 2332 2333 lru_add_drain(); 2334 tlb_gather_mmu(&tlb, mm); 2335 update_hiwater_rss(mm); 2336 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); 2337 mas_set(mas, mt_start); 2338 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2339 next ? next->vm_start : USER_PGTABLES_CEILING, 2340 mm_wr_locked); 2341 tlb_finish_mmu(&tlb); 2342 } 2343 2344 /* 2345 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 2346 * has already been checked or doesn't make sense to fail. 2347 * VMA Iterator will point to the end VMA. 2348 */ 2349 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 2350 unsigned long addr, int new_below) 2351 { 2352 struct vma_prepare vp; 2353 struct vm_area_struct *new; 2354 int err; 2355 2356 WARN_ON(vma->vm_start >= addr); 2357 WARN_ON(vma->vm_end <= addr); 2358 2359 if (vma->vm_ops && vma->vm_ops->may_split) { 2360 err = vma->vm_ops->may_split(vma, addr); 2361 if (err) 2362 return err; 2363 } 2364 2365 new = vm_area_dup(vma); 2366 if (!new) 2367 return -ENOMEM; 2368 2369 if (new_below) { 2370 new->vm_end = addr; 2371 } else { 2372 new->vm_start = addr; 2373 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 2374 } 2375 2376 err = -ENOMEM; 2377 vma_iter_config(vmi, new->vm_start, new->vm_end); 2378 if (vma_iter_prealloc(vmi, new)) 2379 goto out_free_vma; 2380 2381 err = vma_dup_policy(vma, new); 2382 if (err) 2383 goto out_free_vmi; 2384 2385 err = anon_vma_clone(new, vma); 2386 if (err) 2387 goto out_free_mpol; 2388 2389 if (new->vm_file) 2390 get_file(new->vm_file); 2391 2392 if (new->vm_ops && new->vm_ops->open) 2393 new->vm_ops->open(new); 2394 2395 vma_start_write(vma); 2396 vma_start_write(new); 2397 2398 init_vma_prep(&vp, vma); 2399 vp.insert = new; 2400 vma_prepare(&vp); 2401 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 2402 2403 if (new_below) { 2404 vma->vm_start = addr; 2405 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 2406 } else { 2407 vma->vm_end = addr; 2408 } 2409 2410 /* vma_complete stores the new vma */ 2411 vma_complete(&vp, vmi, vma->vm_mm); 2412 2413 /* Success. */ 2414 if (new_below) 2415 vma_next(vmi); 2416 return 0; 2417 2418 out_free_mpol: 2419 mpol_put(vma_policy(new)); 2420 out_free_vmi: 2421 vma_iter_free(vmi); 2422 out_free_vma: 2423 vm_area_free(new); 2424 return err; 2425 } 2426 2427 /* 2428 * Split a vma into two pieces at address 'addr', a new vma is allocated 2429 * either for the first part or the tail. 2430 */ 2431 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 2432 unsigned long addr, int new_below) 2433 { 2434 if (vma->vm_mm->map_count >= sysctl_max_map_count) 2435 return -ENOMEM; 2436 2437 return __split_vma(vmi, vma, addr, new_below); 2438 } 2439 2440 /* 2441 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 2442 * @vmi: The vma iterator 2443 * @vma: The starting vm_area_struct 2444 * @mm: The mm_struct 2445 * @start: The aligned start address to munmap. 2446 * @end: The aligned end address to munmap. 2447 * @uf: The userfaultfd list_head 2448 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 2449 * success. 2450 * 2451 * Return: 0 on success and drops the lock if so directed, error and leaves the 2452 * lock held otherwise. 2453 */ 2454 static int 2455 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 2456 struct mm_struct *mm, unsigned long start, 2457 unsigned long end, struct list_head *uf, bool unlock) 2458 { 2459 struct vm_area_struct *prev, *next = NULL; 2460 struct maple_tree mt_detach; 2461 int count = 0; 2462 int error = -ENOMEM; 2463 unsigned long locked_vm = 0; 2464 MA_STATE(mas_detach, &mt_detach, 0, 0); 2465 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 2466 mt_on_stack(mt_detach); 2467 2468 /* 2469 * If we need to split any vma, do it now to save pain later. 2470 * 2471 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 2472 * unmapped vm_area_struct will remain in use: so lower split_vma 2473 * places tmp vma above, and higher split_vma places tmp vma below. 2474 */ 2475 2476 /* Does it split the first one? */ 2477 if (start > vma->vm_start) { 2478 2479 /* 2480 * Make sure that map_count on return from munmap() will 2481 * not exceed its limit; but let map_count go just above 2482 * its limit temporarily, to help free resources as expected. 2483 */ 2484 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) 2485 goto map_count_exceeded; 2486 2487 error = __split_vma(vmi, vma, start, 1); 2488 if (error) 2489 goto start_split_failed; 2490 } 2491 2492 /* 2493 * Detach a range of VMAs from the mm. Using next as a temp variable as 2494 * it is always overwritten. 2495 */ 2496 next = vma; 2497 do { 2498 /* Does it split the end? */ 2499 if (next->vm_end > end) { 2500 error = __split_vma(vmi, next, end, 0); 2501 if (error) 2502 goto end_split_failed; 2503 } 2504 vma_start_write(next); 2505 mas_set(&mas_detach, count); 2506 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); 2507 if (error) 2508 goto munmap_gather_failed; 2509 vma_mark_detached(next, true); 2510 if (next->vm_flags & VM_LOCKED) 2511 locked_vm += vma_pages(next); 2512 2513 count++; 2514 if (unlikely(uf)) { 2515 /* 2516 * If userfaultfd_unmap_prep returns an error the vmas 2517 * will remain split, but userland will get a 2518 * highly unexpected error anyway. This is no 2519 * different than the case where the first of the two 2520 * __split_vma fails, but we don't undo the first 2521 * split, despite we could. This is unlikely enough 2522 * failure that it's not worth optimizing it for. 2523 */ 2524 error = userfaultfd_unmap_prep(next, start, end, uf); 2525 2526 if (error) 2527 goto userfaultfd_error; 2528 } 2529 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 2530 BUG_ON(next->vm_start < start); 2531 BUG_ON(next->vm_start > end); 2532 #endif 2533 } for_each_vma_range(*vmi, next, end); 2534 2535 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 2536 /* Make sure no VMAs are about to be lost. */ 2537 { 2538 MA_STATE(test, &mt_detach, 0, 0); 2539 struct vm_area_struct *vma_mas, *vma_test; 2540 int test_count = 0; 2541 2542 vma_iter_set(vmi, start); 2543 rcu_read_lock(); 2544 vma_test = mas_find(&test, count - 1); 2545 for_each_vma_range(*vmi, vma_mas, end) { 2546 BUG_ON(vma_mas != vma_test); 2547 test_count++; 2548 vma_test = mas_next(&test, count - 1); 2549 } 2550 rcu_read_unlock(); 2551 BUG_ON(count != test_count); 2552 } 2553 #endif 2554 2555 while (vma_iter_addr(vmi) > start) 2556 vma_iter_prev_range(vmi); 2557 2558 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 2559 if (error) 2560 goto clear_tree_failed; 2561 2562 /* Point of no return */ 2563 mm->locked_vm -= locked_vm; 2564 mm->map_count -= count; 2565 if (unlock) 2566 mmap_write_downgrade(mm); 2567 2568 prev = vma_iter_prev_range(vmi); 2569 next = vma_next(vmi); 2570 if (next) 2571 vma_iter_prev_range(vmi); 2572 2573 /* 2574 * We can free page tables without write-locking mmap_lock because VMAs 2575 * were isolated before we downgraded mmap_lock. 2576 */ 2577 mas_set(&mas_detach, 1); 2578 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count, 2579 !unlock); 2580 /* Statistics and freeing VMAs */ 2581 mas_set(&mas_detach, 0); 2582 remove_mt(mm, &mas_detach); 2583 validate_mm(mm); 2584 if (unlock) 2585 mmap_read_unlock(mm); 2586 2587 __mt_destroy(&mt_detach); 2588 return 0; 2589 2590 clear_tree_failed: 2591 userfaultfd_error: 2592 munmap_gather_failed: 2593 end_split_failed: 2594 mas_set(&mas_detach, 0); 2595 mas_for_each(&mas_detach, next, end) 2596 vma_mark_detached(next, false); 2597 2598 __mt_destroy(&mt_detach); 2599 start_split_failed: 2600 map_count_exceeded: 2601 validate_mm(mm); 2602 return error; 2603 } 2604 2605 /* 2606 * do_vmi_munmap() - munmap a given range. 2607 * @vmi: The vma iterator 2608 * @mm: The mm_struct 2609 * @start: The start address to munmap 2610 * @len: The length of the range to munmap 2611 * @uf: The userfaultfd list_head 2612 * @unlock: set to true if the user wants to drop the mmap_lock on success 2613 * 2614 * This function takes a @mas that is either pointing to the previous VMA or set 2615 * to MA_START and sets it up to remove the mapping(s). The @len will be 2616 * aligned and any arch_unmap work will be preformed. 2617 * 2618 * Return: 0 on success and drops the lock if so directed, error and leaves the 2619 * lock held otherwise. 2620 */ 2621 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 2622 unsigned long start, size_t len, struct list_head *uf, 2623 bool unlock) 2624 { 2625 unsigned long end; 2626 struct vm_area_struct *vma; 2627 2628 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 2629 return -EINVAL; 2630 2631 end = start + PAGE_ALIGN(len); 2632 if (end == start) 2633 return -EINVAL; 2634 2635 /* arch_unmap() might do unmaps itself. */ 2636 arch_unmap(mm, start, end); 2637 2638 /* Find the first overlapping VMA */ 2639 vma = vma_find(vmi, end); 2640 if (!vma) { 2641 if (unlock) 2642 mmap_write_unlock(mm); 2643 return 0; 2644 } 2645 2646 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 2647 } 2648 2649 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. 2650 * @mm: The mm_struct 2651 * @start: The start address to munmap 2652 * @len: The length to be munmapped. 2653 * @uf: The userfaultfd list_head 2654 * 2655 * Return: 0 on success, error otherwise. 2656 */ 2657 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 2658 struct list_head *uf) 2659 { 2660 VMA_ITERATOR(vmi, mm, start); 2661 2662 return do_vmi_munmap(&vmi, mm, start, len, uf, false); 2663 } 2664 2665 unsigned long mmap_region(struct file *file, unsigned long addr, 2666 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2667 struct list_head *uf) 2668 { 2669 struct mm_struct *mm = current->mm; 2670 struct vm_area_struct *vma = NULL; 2671 struct vm_area_struct *next, *prev, *merge; 2672 pgoff_t pglen = len >> PAGE_SHIFT; 2673 unsigned long charged = 0; 2674 unsigned long end = addr + len; 2675 unsigned long merge_start = addr, merge_end = end; 2676 pgoff_t vm_pgoff; 2677 int error; 2678 VMA_ITERATOR(vmi, mm, addr); 2679 2680 /* Check against address space limit. */ 2681 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { 2682 unsigned long nr_pages; 2683 2684 /* 2685 * MAP_FIXED may remove pages of mappings that intersects with 2686 * requested mapping. Account for the pages it would unmap. 2687 */ 2688 nr_pages = count_vma_pages_range(mm, addr, end); 2689 2690 if (!may_expand_vm(mm, vm_flags, 2691 (len >> PAGE_SHIFT) - nr_pages)) 2692 return -ENOMEM; 2693 } 2694 2695 /* Unmap any existing mapping in the area */ 2696 if (do_vmi_munmap(&vmi, mm, addr, len, uf, false)) 2697 return -ENOMEM; 2698 2699 /* 2700 * Private writable mapping: check memory availability 2701 */ 2702 if (accountable_mapping(file, vm_flags)) { 2703 charged = len >> PAGE_SHIFT; 2704 if (security_vm_enough_memory_mm(mm, charged)) 2705 return -ENOMEM; 2706 vm_flags |= VM_ACCOUNT; 2707 } 2708 2709 next = vma_next(&vmi); 2710 prev = vma_prev(&vmi); 2711 if (vm_flags & VM_SPECIAL) { 2712 if (prev) 2713 vma_iter_next_range(&vmi); 2714 goto cannot_expand; 2715 } 2716 2717 /* Attempt to expand an old mapping */ 2718 /* Check next */ 2719 if (next && next->vm_start == end && !vma_policy(next) && 2720 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, 2721 NULL_VM_UFFD_CTX, NULL)) { 2722 merge_end = next->vm_end; 2723 vma = next; 2724 vm_pgoff = next->vm_pgoff - pglen; 2725 } 2726 2727 /* Check prev */ 2728 if (prev && prev->vm_end == addr && !vma_policy(prev) && 2729 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, 2730 pgoff, vma->vm_userfaultfd_ctx, NULL) : 2731 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, 2732 NULL_VM_UFFD_CTX, NULL))) { 2733 merge_start = prev->vm_start; 2734 vma = prev; 2735 vm_pgoff = prev->vm_pgoff; 2736 } else if (prev) { 2737 vma_iter_next_range(&vmi); 2738 } 2739 2740 /* Actually expand, if possible */ 2741 if (vma && 2742 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { 2743 khugepaged_enter_vma(vma, vm_flags); 2744 goto expanded; 2745 } 2746 2747 if (vma == prev) 2748 vma_iter_set(&vmi, addr); 2749 cannot_expand: 2750 2751 /* 2752 * Determine the object being mapped and call the appropriate 2753 * specific mapper. the address has already been validated, but 2754 * not unmapped, but the maps are removed from the list. 2755 */ 2756 vma = vm_area_alloc(mm); 2757 if (!vma) { 2758 error = -ENOMEM; 2759 goto unacct_error; 2760 } 2761 2762 vma_iter_config(&vmi, addr, end); 2763 vma->vm_start = addr; 2764 vma->vm_end = end; 2765 vm_flags_init(vma, vm_flags); 2766 vma->vm_page_prot = vm_get_page_prot(vm_flags); 2767 vma->vm_pgoff = pgoff; 2768 2769 if (file) { 2770 if (vm_flags & VM_SHARED) { 2771 error = mapping_map_writable(file->f_mapping); 2772 if (error) 2773 goto free_vma; 2774 } 2775 2776 vma->vm_file = get_file(file); 2777 error = call_mmap(file, vma); 2778 if (error) 2779 goto unmap_and_free_vma; 2780 2781 /* 2782 * Expansion is handled above, merging is handled below. 2783 * Drivers should not alter the address of the VMA. 2784 */ 2785 error = -EINVAL; 2786 if (WARN_ON((addr != vma->vm_start))) 2787 goto close_and_free_vma; 2788 2789 vma_iter_config(&vmi, addr, end); 2790 /* 2791 * If vm_flags changed after call_mmap(), we should try merge 2792 * vma again as we may succeed this time. 2793 */ 2794 if (unlikely(vm_flags != vma->vm_flags && prev)) { 2795 merge = vma_merge(&vmi, mm, prev, vma->vm_start, 2796 vma->vm_end, vma->vm_flags, NULL, 2797 vma->vm_file, vma->vm_pgoff, NULL, 2798 NULL_VM_UFFD_CTX, NULL); 2799 if (merge) { 2800 /* 2801 * ->mmap() can change vma->vm_file and fput 2802 * the original file. So fput the vma->vm_file 2803 * here or we would add an extra fput for file 2804 * and cause general protection fault 2805 * ultimately. 2806 */ 2807 fput(vma->vm_file); 2808 vm_area_free(vma); 2809 vma = merge; 2810 /* Update vm_flags to pick up the change. */ 2811 vm_flags = vma->vm_flags; 2812 goto unmap_writable; 2813 } 2814 } 2815 2816 vm_flags = vma->vm_flags; 2817 } else if (vm_flags & VM_SHARED) { 2818 error = shmem_zero_setup(vma); 2819 if (error) 2820 goto free_vma; 2821 } else { 2822 vma_set_anonymous(vma); 2823 } 2824 2825 if (map_deny_write_exec(vma, vma->vm_flags)) { 2826 error = -EACCES; 2827 goto close_and_free_vma; 2828 } 2829 2830 /* Allow architectures to sanity-check the vm_flags */ 2831 error = -EINVAL; 2832 if (!arch_validate_flags(vma->vm_flags)) 2833 goto close_and_free_vma; 2834 2835 error = -ENOMEM; 2836 if (vma_iter_prealloc(&vmi, vma)) 2837 goto close_and_free_vma; 2838 2839 /* Lock the VMA since it is modified after insertion into VMA tree */ 2840 vma_start_write(vma); 2841 vma_iter_store(&vmi, vma); 2842 mm->map_count++; 2843 if (vma->vm_file) { 2844 i_mmap_lock_write(vma->vm_file->f_mapping); 2845 if (vma->vm_flags & VM_SHARED) 2846 mapping_allow_writable(vma->vm_file->f_mapping); 2847 2848 flush_dcache_mmap_lock(vma->vm_file->f_mapping); 2849 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); 2850 flush_dcache_mmap_unlock(vma->vm_file->f_mapping); 2851 i_mmap_unlock_write(vma->vm_file->f_mapping); 2852 } 2853 2854 /* 2855 * vma_merge() calls khugepaged_enter_vma() either, the below 2856 * call covers the non-merge case. 2857 */ 2858 khugepaged_enter_vma(vma, vma->vm_flags); 2859 2860 /* Once vma denies write, undo our temporary denial count */ 2861 unmap_writable: 2862 if (file && vm_flags & VM_SHARED) 2863 mapping_unmap_writable(file->f_mapping); 2864 file = vma->vm_file; 2865 ksm_add_vma(vma); 2866 expanded: 2867 perf_event_mmap(vma); 2868 2869 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); 2870 if (vm_flags & VM_LOCKED) { 2871 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 2872 is_vm_hugetlb_page(vma) || 2873 vma == get_gate_vma(current->mm)) 2874 vm_flags_clear(vma, VM_LOCKED_MASK); 2875 else 2876 mm->locked_vm += (len >> PAGE_SHIFT); 2877 } 2878 2879 if (file) 2880 uprobe_mmap(vma); 2881 2882 /* 2883 * New (or expanded) vma always get soft dirty status. 2884 * Otherwise user-space soft-dirty page tracker won't 2885 * be able to distinguish situation when vma area unmapped, 2886 * then new mapped in-place (which must be aimed as 2887 * a completely new data area). 2888 */ 2889 vm_flags_set(vma, VM_SOFTDIRTY); 2890 2891 vma_set_page_prot(vma); 2892 2893 validate_mm(mm); 2894 return addr; 2895 2896 close_and_free_vma: 2897 if (file && vma->vm_ops && vma->vm_ops->close) 2898 vma->vm_ops->close(vma); 2899 2900 if (file || vma->vm_file) { 2901 unmap_and_free_vma: 2902 fput(vma->vm_file); 2903 vma->vm_file = NULL; 2904 2905 vma_iter_set(&vmi, vma->vm_end); 2906 /* Undo any partial mapping done by a device driver. */ 2907 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, 2908 vma->vm_end, vma->vm_end, true); 2909 } 2910 if (file && (vm_flags & VM_SHARED)) 2911 mapping_unmap_writable(file->f_mapping); 2912 free_vma: 2913 vm_area_free(vma); 2914 unacct_error: 2915 if (charged) 2916 vm_unacct_memory(charged); 2917 validate_mm(mm); 2918 return error; 2919 } 2920 2921 static int __vm_munmap(unsigned long start, size_t len, bool unlock) 2922 { 2923 int ret; 2924 struct mm_struct *mm = current->mm; 2925 LIST_HEAD(uf); 2926 VMA_ITERATOR(vmi, mm, start); 2927 2928 if (mmap_write_lock_killable(mm)) 2929 return -EINTR; 2930 2931 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 2932 if (ret || !unlock) 2933 mmap_write_unlock(mm); 2934 2935 userfaultfd_unmap_complete(mm, &uf); 2936 return ret; 2937 } 2938 2939 int vm_munmap(unsigned long start, size_t len) 2940 { 2941 return __vm_munmap(start, len, false); 2942 } 2943 EXPORT_SYMBOL(vm_munmap); 2944 2945 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 2946 { 2947 addr = untagged_addr(addr); 2948 return __vm_munmap(addr, len, true); 2949 } 2950 2951 2952 /* 2953 * Emulation of deprecated remap_file_pages() syscall. 2954 */ 2955 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 2956 unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 2957 { 2958 2959 struct mm_struct *mm = current->mm; 2960 struct vm_area_struct *vma; 2961 unsigned long populate = 0; 2962 unsigned long ret = -EINVAL; 2963 struct file *file; 2964 2965 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", 2966 current->comm, current->pid); 2967 2968 if (prot) 2969 return ret; 2970 start = start & PAGE_MASK; 2971 size = size & PAGE_MASK; 2972 2973 if (start + size <= start) 2974 return ret; 2975 2976 /* Does pgoff wrap? */ 2977 if (pgoff + (size >> PAGE_SHIFT) < pgoff) 2978 return ret; 2979 2980 if (mmap_write_lock_killable(mm)) 2981 return -EINTR; 2982 2983 vma = vma_lookup(mm, start); 2984 2985 if (!vma || !(vma->vm_flags & VM_SHARED)) 2986 goto out; 2987 2988 if (start + size > vma->vm_end) { 2989 VMA_ITERATOR(vmi, mm, vma->vm_end); 2990 struct vm_area_struct *next, *prev = vma; 2991 2992 for_each_vma_range(vmi, next, start + size) { 2993 /* hole between vmas ? */ 2994 if (next->vm_start != prev->vm_end) 2995 goto out; 2996 2997 if (next->vm_file != vma->vm_file) 2998 goto out; 2999 3000 if (next->vm_flags != vma->vm_flags) 3001 goto out; 3002 3003 if (start + size <= next->vm_end) 3004 break; 3005 3006 prev = next; 3007 } 3008 3009 if (!next) 3010 goto out; 3011 } 3012 3013 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 3014 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 3015 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 3016 3017 flags &= MAP_NONBLOCK; 3018 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 3019 if (vma->vm_flags & VM_LOCKED) 3020 flags |= MAP_LOCKED; 3021 3022 file = get_file(vma->vm_file); 3023 ret = do_mmap(vma->vm_file, start, size, 3024 prot, flags, 0, pgoff, &populate, NULL); 3025 fput(file); 3026 out: 3027 mmap_write_unlock(mm); 3028 if (populate) 3029 mm_populate(ret, populate); 3030 if (!IS_ERR_VALUE(ret)) 3031 ret = 0; 3032 return ret; 3033 } 3034 3035 /* 3036 * do_vma_munmap() - Unmap a full or partial vma. 3037 * @vmi: The vma iterator pointing at the vma 3038 * @vma: The first vma to be munmapped 3039 * @start: the start of the address to unmap 3040 * @end: The end of the address to unmap 3041 * @uf: The userfaultfd list_head 3042 * @unlock: Drop the lock on success 3043 * 3044 * unmaps a VMA mapping when the vma iterator is already in position. 3045 * Does not handle alignment. 3046 * 3047 * Return: 0 on success drops the lock of so directed, error on failure and will 3048 * still hold the lock. 3049 */ 3050 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 3051 unsigned long start, unsigned long end, struct list_head *uf, 3052 bool unlock) 3053 { 3054 struct mm_struct *mm = vma->vm_mm; 3055 3056 arch_unmap(mm, start, end); 3057 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 3058 } 3059 3060 /* 3061 * do_brk_flags() - Increase the brk vma if the flags match. 3062 * @vmi: The vma iterator 3063 * @addr: The start address 3064 * @len: The length of the increase 3065 * @vma: The vma, 3066 * @flags: The VMA Flags 3067 * 3068 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 3069 * do not match then create a new anonymous VMA. Eventually we may be able to 3070 * do some brk-specific accounting here. 3071 */ 3072 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 3073 unsigned long addr, unsigned long len, unsigned long flags) 3074 { 3075 struct mm_struct *mm = current->mm; 3076 struct vma_prepare vp; 3077 3078 /* 3079 * Check against address space limits by the changed size 3080 * Note: This happens *after* clearing old mappings in some code paths. 3081 */ 3082 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 3083 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 3084 return -ENOMEM; 3085 3086 if (mm->map_count > sysctl_max_map_count) 3087 return -ENOMEM; 3088 3089 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 3090 return -ENOMEM; 3091 3092 /* 3093 * Expand the existing vma if possible; Note that singular lists do not 3094 * occur after forking, so the expand will only happen on new VMAs. 3095 */ 3096 if (vma && vma->vm_end == addr && !vma_policy(vma) && 3097 can_vma_merge_after(vma, flags, NULL, NULL, 3098 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { 3099 vma_iter_config(vmi, vma->vm_start, addr + len); 3100 if (vma_iter_prealloc(vmi, vma)) 3101 goto unacct_fail; 3102 3103 vma_start_write(vma); 3104 3105 init_vma_prep(&vp, vma); 3106 vma_prepare(&vp); 3107 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); 3108 vma->vm_end = addr + len; 3109 vm_flags_set(vma, VM_SOFTDIRTY); 3110 vma_iter_store(vmi, vma); 3111 3112 vma_complete(&vp, vmi, mm); 3113 khugepaged_enter_vma(vma, flags); 3114 goto out; 3115 } 3116 3117 if (vma) 3118 vma_iter_next_range(vmi); 3119 /* create a vma struct for an anonymous mapping */ 3120 vma = vm_area_alloc(mm); 3121 if (!vma) 3122 goto unacct_fail; 3123 3124 vma_set_anonymous(vma); 3125 vma->vm_start = addr; 3126 vma->vm_end = addr + len; 3127 vma->vm_pgoff = addr >> PAGE_SHIFT; 3128 vm_flags_init(vma, flags); 3129 vma->vm_page_prot = vm_get_page_prot(flags); 3130 vma_start_write(vma); 3131 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 3132 goto mas_store_fail; 3133 3134 mm->map_count++; 3135 validate_mm(mm); 3136 ksm_add_vma(vma); 3137 out: 3138 perf_event_mmap(vma); 3139 mm->total_vm += len >> PAGE_SHIFT; 3140 mm->data_vm += len >> PAGE_SHIFT; 3141 if (flags & VM_LOCKED) 3142 mm->locked_vm += (len >> PAGE_SHIFT); 3143 vm_flags_set(vma, VM_SOFTDIRTY); 3144 return 0; 3145 3146 mas_store_fail: 3147 vm_area_free(vma); 3148 unacct_fail: 3149 vm_unacct_memory(len >> PAGE_SHIFT); 3150 return -ENOMEM; 3151 } 3152 3153 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 3154 { 3155 struct mm_struct *mm = current->mm; 3156 struct vm_area_struct *vma = NULL; 3157 unsigned long len; 3158 int ret; 3159 bool populate; 3160 LIST_HEAD(uf); 3161 VMA_ITERATOR(vmi, mm, addr); 3162 3163 len = PAGE_ALIGN(request); 3164 if (len < request) 3165 return -ENOMEM; 3166 if (!len) 3167 return 0; 3168 3169 /* Until we need other flags, refuse anything except VM_EXEC. */ 3170 if ((flags & (~VM_EXEC)) != 0) 3171 return -EINVAL; 3172 3173 if (mmap_write_lock_killable(mm)) 3174 return -EINTR; 3175 3176 ret = check_brk_limits(addr, len); 3177 if (ret) 3178 goto limits_failed; 3179 3180 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); 3181 if (ret) 3182 goto munmap_failed; 3183 3184 vma = vma_prev(&vmi); 3185 ret = do_brk_flags(&vmi, vma, addr, len, flags); 3186 populate = ((mm->def_flags & VM_LOCKED) != 0); 3187 mmap_write_unlock(mm); 3188 userfaultfd_unmap_complete(mm, &uf); 3189 if (populate && !ret) 3190 mm_populate(addr, len); 3191 return ret; 3192 3193 munmap_failed: 3194 limits_failed: 3195 mmap_write_unlock(mm); 3196 return ret; 3197 } 3198 EXPORT_SYMBOL(vm_brk_flags); 3199 3200 int vm_brk(unsigned long addr, unsigned long len) 3201 { 3202 return vm_brk_flags(addr, len, 0); 3203 } 3204 EXPORT_SYMBOL(vm_brk); 3205 3206 /* Release all mmaps. */ 3207 void exit_mmap(struct mm_struct *mm) 3208 { 3209 struct mmu_gather tlb; 3210 struct vm_area_struct *vma; 3211 unsigned long nr_accounted = 0; 3212 MA_STATE(mas, &mm->mm_mt, 0, 0); 3213 int count = 0; 3214 3215 /* mm's last user has gone, and its about to be pulled down */ 3216 mmu_notifier_release(mm); 3217 3218 mmap_read_lock(mm); 3219 arch_exit_mmap(mm); 3220 3221 vma = mas_find(&mas, ULONG_MAX); 3222 if (!vma) { 3223 /* Can happen if dup_mmap() received an OOM */ 3224 mmap_read_unlock(mm); 3225 return; 3226 } 3227 3228 lru_add_drain(); 3229 flush_cache_mm(mm); 3230 tlb_gather_mmu_fullmm(&tlb, mm); 3231 /* update_hiwater_rss(mm) here? but nobody should be looking */ 3232 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ 3233 unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false); 3234 mmap_read_unlock(mm); 3235 3236 /* 3237 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper 3238 * because the memory has been already freed. 3239 */ 3240 set_bit(MMF_OOM_SKIP, &mm->flags); 3241 mmap_write_lock(mm); 3242 mt_clear_in_rcu(&mm->mm_mt); 3243 mas_set(&mas, vma->vm_end); 3244 free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS, 3245 USER_PGTABLES_CEILING, true); 3246 tlb_finish_mmu(&tlb); 3247 3248 /* 3249 * Walk the list again, actually closing and freeing it, with preemption 3250 * enabled, without holding any MM locks besides the unreachable 3251 * mmap_write_lock. 3252 */ 3253 mas_set(&mas, vma->vm_end); 3254 do { 3255 if (vma->vm_flags & VM_ACCOUNT) 3256 nr_accounted += vma_pages(vma); 3257 remove_vma(vma, true); 3258 count++; 3259 cond_resched(); 3260 } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); 3261 3262 BUG_ON(count != mm->map_count); 3263 3264 trace_exit_mmap(mm); 3265 __mt_destroy(&mm->mm_mt); 3266 mmap_write_unlock(mm); 3267 vm_unacct_memory(nr_accounted); 3268 } 3269 3270 /* Insert vm structure into process list sorted by address 3271 * and into the inode's i_mmap tree. If vm_file is non-NULL 3272 * then i_mmap_rwsem is taken here. 3273 */ 3274 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 3275 { 3276 unsigned long charged = vma_pages(vma); 3277 3278 3279 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 3280 return -ENOMEM; 3281 3282 if ((vma->vm_flags & VM_ACCOUNT) && 3283 security_vm_enough_memory_mm(mm, charged)) 3284 return -ENOMEM; 3285 3286 /* 3287 * The vm_pgoff of a purely anonymous vma should be irrelevant 3288 * until its first write fault, when page's anon_vma and index 3289 * are set. But now set the vm_pgoff it will almost certainly 3290 * end up with (unless mremap moves it elsewhere before that 3291 * first wfault), so /proc/pid/maps tells a consistent story. 3292 * 3293 * By setting it to reflect the virtual start address of the 3294 * vma, merges and splits can happen in a seamless way, just 3295 * using the existing file pgoff checks and manipulations. 3296 * Similarly in do_mmap and in do_brk_flags. 3297 */ 3298 if (vma_is_anonymous(vma)) { 3299 BUG_ON(vma->anon_vma); 3300 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 3301 } 3302 3303 if (vma_link(mm, vma)) { 3304 if (vma->vm_flags & VM_ACCOUNT) 3305 vm_unacct_memory(charged); 3306 return -ENOMEM; 3307 } 3308 3309 return 0; 3310 } 3311 3312 /* 3313 * Copy the vma structure to a new location in the same mm, 3314 * prior to moving page table entries, to effect an mremap move. 3315 */ 3316 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 3317 unsigned long addr, unsigned long len, pgoff_t pgoff, 3318 bool *need_rmap_locks) 3319 { 3320 struct vm_area_struct *vma = *vmap; 3321 unsigned long vma_start = vma->vm_start; 3322 struct mm_struct *mm = vma->vm_mm; 3323 struct vm_area_struct *new_vma, *prev; 3324 bool faulted_in_anon_vma = true; 3325 VMA_ITERATOR(vmi, mm, addr); 3326 3327 /* 3328 * If anonymous vma has not yet been faulted, update new pgoff 3329 * to match new location, to increase its chance of merging. 3330 */ 3331 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 3332 pgoff = addr >> PAGE_SHIFT; 3333 faulted_in_anon_vma = false; 3334 } 3335 3336 new_vma = find_vma_prev(mm, addr, &prev); 3337 if (new_vma && new_vma->vm_start < addr + len) 3338 return NULL; /* should never get here */ 3339 3340 new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags, 3341 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 3342 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 3343 if (new_vma) { 3344 /* 3345 * Source vma may have been merged into new_vma 3346 */ 3347 if (unlikely(vma_start >= new_vma->vm_start && 3348 vma_start < new_vma->vm_end)) { 3349 /* 3350 * The only way we can get a vma_merge with 3351 * self during an mremap is if the vma hasn't 3352 * been faulted in yet and we were allowed to 3353 * reset the dst vma->vm_pgoff to the 3354 * destination address of the mremap to allow 3355 * the merge to happen. mremap must change the 3356 * vm_pgoff linearity between src and dst vmas 3357 * (in turn preventing a vma_merge) to be 3358 * safe. It is only safe to keep the vm_pgoff 3359 * linear if there are no pages mapped yet. 3360 */ 3361 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 3362 *vmap = vma = new_vma; 3363 } 3364 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 3365 } else { 3366 new_vma = vm_area_dup(vma); 3367 if (!new_vma) 3368 goto out; 3369 new_vma->vm_start = addr; 3370 new_vma->vm_end = addr + len; 3371 new_vma->vm_pgoff = pgoff; 3372 if (vma_dup_policy(vma, new_vma)) 3373 goto out_free_vma; 3374 if (anon_vma_clone(new_vma, vma)) 3375 goto out_free_mempol; 3376 if (new_vma->vm_file) 3377 get_file(new_vma->vm_file); 3378 if (new_vma->vm_ops && new_vma->vm_ops->open) 3379 new_vma->vm_ops->open(new_vma); 3380 if (vma_link(mm, new_vma)) 3381 goto out_vma_link; 3382 *need_rmap_locks = false; 3383 } 3384 return new_vma; 3385 3386 out_vma_link: 3387 if (new_vma->vm_ops && new_vma->vm_ops->close) 3388 new_vma->vm_ops->close(new_vma); 3389 3390 if (new_vma->vm_file) 3391 fput(new_vma->vm_file); 3392 3393 unlink_anon_vmas(new_vma); 3394 out_free_mempol: 3395 mpol_put(vma_policy(new_vma)); 3396 out_free_vma: 3397 vm_area_free(new_vma); 3398 out: 3399 return NULL; 3400 } 3401 3402 /* 3403 * Return true if the calling process may expand its vm space by the passed 3404 * number of pages 3405 */ 3406 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 3407 { 3408 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 3409 return false; 3410 3411 if (is_data_mapping(flags) && 3412 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 3413 /* Workaround for Valgrind */ 3414 if (rlimit(RLIMIT_DATA) == 0 && 3415 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 3416 return true; 3417 3418 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 3419 current->comm, current->pid, 3420 (mm->data_vm + npages) << PAGE_SHIFT, 3421 rlimit(RLIMIT_DATA), 3422 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 3423 3424 if (!ignore_rlimit_data) 3425 return false; 3426 } 3427 3428 return true; 3429 } 3430 3431 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 3432 { 3433 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 3434 3435 if (is_exec_mapping(flags)) 3436 mm->exec_vm += npages; 3437 else if (is_stack_mapping(flags)) 3438 mm->stack_vm += npages; 3439 else if (is_data_mapping(flags)) 3440 mm->data_vm += npages; 3441 } 3442 3443 static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 3444 3445 /* 3446 * Having a close hook prevents vma merging regardless of flags. 3447 */ 3448 static void special_mapping_close(struct vm_area_struct *vma) 3449 { 3450 } 3451 3452 static const char *special_mapping_name(struct vm_area_struct *vma) 3453 { 3454 return ((struct vm_special_mapping *)vma->vm_private_data)->name; 3455 } 3456 3457 static int special_mapping_mremap(struct vm_area_struct *new_vma) 3458 { 3459 struct vm_special_mapping *sm = new_vma->vm_private_data; 3460 3461 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 3462 return -EFAULT; 3463 3464 if (sm->mremap) 3465 return sm->mremap(sm, new_vma); 3466 3467 return 0; 3468 } 3469 3470 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 3471 { 3472 /* 3473 * Forbid splitting special mappings - kernel has expectations over 3474 * the number of pages in mapping. Together with VM_DONTEXPAND 3475 * the size of vma should stay the same over the special mapping's 3476 * lifetime. 3477 */ 3478 return -EINVAL; 3479 } 3480 3481 static const struct vm_operations_struct special_mapping_vmops = { 3482 .close = special_mapping_close, 3483 .fault = special_mapping_fault, 3484 .mremap = special_mapping_mremap, 3485 .name = special_mapping_name, 3486 /* vDSO code relies that VVAR can't be accessed remotely */ 3487 .access = NULL, 3488 .may_split = special_mapping_split, 3489 }; 3490 3491 static const struct vm_operations_struct legacy_special_mapping_vmops = { 3492 .close = special_mapping_close, 3493 .fault = special_mapping_fault, 3494 }; 3495 3496 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 3497 { 3498 struct vm_area_struct *vma = vmf->vma; 3499 pgoff_t pgoff; 3500 struct page **pages; 3501 3502 if (vma->vm_ops == &legacy_special_mapping_vmops) { 3503 pages = vma->vm_private_data; 3504 } else { 3505 struct vm_special_mapping *sm = vma->vm_private_data; 3506 3507 if (sm->fault) 3508 return sm->fault(sm, vmf->vma, vmf); 3509 3510 pages = sm->pages; 3511 } 3512 3513 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 3514 pgoff--; 3515 3516 if (*pages) { 3517 struct page *page = *pages; 3518 get_page(page); 3519 vmf->page = page; 3520 return 0; 3521 } 3522 3523 return VM_FAULT_SIGBUS; 3524 } 3525 3526 static struct vm_area_struct *__install_special_mapping( 3527 struct mm_struct *mm, 3528 unsigned long addr, unsigned long len, 3529 unsigned long vm_flags, void *priv, 3530 const struct vm_operations_struct *ops) 3531 { 3532 int ret; 3533 struct vm_area_struct *vma; 3534 3535 vma = vm_area_alloc(mm); 3536 if (unlikely(vma == NULL)) 3537 return ERR_PTR(-ENOMEM); 3538 3539 vma->vm_start = addr; 3540 vma->vm_end = addr + len; 3541 3542 vm_flags_init(vma, (vm_flags | mm->def_flags | 3543 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); 3544 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 3545 3546 vma->vm_ops = ops; 3547 vma->vm_private_data = priv; 3548 3549 ret = insert_vm_struct(mm, vma); 3550 if (ret) 3551 goto out; 3552 3553 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 3554 3555 perf_event_mmap(vma); 3556 3557 return vma; 3558 3559 out: 3560 vm_area_free(vma); 3561 return ERR_PTR(ret); 3562 } 3563 3564 bool vma_is_special_mapping(const struct vm_area_struct *vma, 3565 const struct vm_special_mapping *sm) 3566 { 3567 return vma->vm_private_data == sm && 3568 (vma->vm_ops == &special_mapping_vmops || 3569 vma->vm_ops == &legacy_special_mapping_vmops); 3570 } 3571 3572 /* 3573 * Called with mm->mmap_lock held for writing. 3574 * Insert a new vma covering the given region, with the given flags. 3575 * Its pages are supplied by the given array of struct page *. 3576 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 3577 * The region past the last page supplied will always produce SIGBUS. 3578 * The array pointer and the pages it points to are assumed to stay alive 3579 * for as long as this mapping might exist. 3580 */ 3581 struct vm_area_struct *_install_special_mapping( 3582 struct mm_struct *mm, 3583 unsigned long addr, unsigned long len, 3584 unsigned long vm_flags, const struct vm_special_mapping *spec) 3585 { 3586 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 3587 &special_mapping_vmops); 3588 } 3589 3590 int install_special_mapping(struct mm_struct *mm, 3591 unsigned long addr, unsigned long len, 3592 unsigned long vm_flags, struct page **pages) 3593 { 3594 struct vm_area_struct *vma = __install_special_mapping( 3595 mm, addr, len, vm_flags, (void *)pages, 3596 &legacy_special_mapping_vmops); 3597 3598 return PTR_ERR_OR_ZERO(vma); 3599 } 3600 3601 static DEFINE_MUTEX(mm_all_locks_mutex); 3602 3603 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 3604 { 3605 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3606 /* 3607 * The LSB of head.next can't change from under us 3608 * because we hold the mm_all_locks_mutex. 3609 */ 3610 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 3611 /* 3612 * We can safely modify head.next after taking the 3613 * anon_vma->root->rwsem. If some other vma in this mm shares 3614 * the same anon_vma we won't take it again. 3615 * 3616 * No need of atomic instructions here, head.next 3617 * can't change from under us thanks to the 3618 * anon_vma->root->rwsem. 3619 */ 3620 if (__test_and_set_bit(0, (unsigned long *) 3621 &anon_vma->root->rb_root.rb_root.rb_node)) 3622 BUG(); 3623 } 3624 } 3625 3626 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 3627 { 3628 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3629 /* 3630 * AS_MM_ALL_LOCKS can't change from under us because 3631 * we hold the mm_all_locks_mutex. 3632 * 3633 * Operations on ->flags have to be atomic because 3634 * even if AS_MM_ALL_LOCKS is stable thanks to the 3635 * mm_all_locks_mutex, there may be other cpus 3636 * changing other bitflags in parallel to us. 3637 */ 3638 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 3639 BUG(); 3640 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 3641 } 3642 } 3643 3644 /* 3645 * This operation locks against the VM for all pte/vma/mm related 3646 * operations that could ever happen on a certain mm. This includes 3647 * vmtruncate, try_to_unmap, and all page faults. 3648 * 3649 * The caller must take the mmap_lock in write mode before calling 3650 * mm_take_all_locks(). The caller isn't allowed to release the 3651 * mmap_lock until mm_drop_all_locks() returns. 3652 * 3653 * mmap_lock in write mode is required in order to block all operations 3654 * that could modify pagetables and free pages without need of 3655 * altering the vma layout. It's also needed in write mode to avoid new 3656 * anon_vmas to be associated with existing vmas. 3657 * 3658 * A single task can't take more than one mm_take_all_locks() in a row 3659 * or it would deadlock. 3660 * 3661 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 3662 * mapping->flags avoid to take the same lock twice, if more than one 3663 * vma in this mm is backed by the same anon_vma or address_space. 3664 * 3665 * We take locks in following order, accordingly to comment at beginning 3666 * of mm/rmap.c: 3667 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 3668 * hugetlb mapping); 3669 * - all vmas marked locked 3670 * - all i_mmap_rwsem locks; 3671 * - all anon_vma->rwseml 3672 * 3673 * We can take all locks within these types randomly because the VM code 3674 * doesn't nest them and we protected from parallel mm_take_all_locks() by 3675 * mm_all_locks_mutex. 3676 * 3677 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 3678 * that may have to take thousand of locks. 3679 * 3680 * mm_take_all_locks() can fail if it's interrupted by signals. 3681 */ 3682 int mm_take_all_locks(struct mm_struct *mm) 3683 { 3684 struct vm_area_struct *vma; 3685 struct anon_vma_chain *avc; 3686 MA_STATE(mas, &mm->mm_mt, 0, 0); 3687 3688 mmap_assert_write_locked(mm); 3689 3690 mutex_lock(&mm_all_locks_mutex); 3691 3692 /* 3693 * vma_start_write() does not have a complement in mm_drop_all_locks() 3694 * because vma_start_write() is always asymmetrical; it marks a VMA as 3695 * being written to until mmap_write_unlock() or mmap_write_downgrade() 3696 * is reached. 3697 */ 3698 mas_for_each(&mas, vma, ULONG_MAX) { 3699 if (signal_pending(current)) 3700 goto out_unlock; 3701 vma_start_write(vma); 3702 } 3703 3704 mas_set(&mas, 0); 3705 mas_for_each(&mas, vma, ULONG_MAX) { 3706 if (signal_pending(current)) 3707 goto out_unlock; 3708 if (vma->vm_file && vma->vm_file->f_mapping && 3709 is_vm_hugetlb_page(vma)) 3710 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3711 } 3712 3713 mas_set(&mas, 0); 3714 mas_for_each(&mas, vma, ULONG_MAX) { 3715 if (signal_pending(current)) 3716 goto out_unlock; 3717 if (vma->vm_file && vma->vm_file->f_mapping && 3718 !is_vm_hugetlb_page(vma)) 3719 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3720 } 3721 3722 mas_set(&mas, 0); 3723 mas_for_each(&mas, vma, ULONG_MAX) { 3724 if (signal_pending(current)) 3725 goto out_unlock; 3726 if (vma->anon_vma) 3727 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3728 vm_lock_anon_vma(mm, avc->anon_vma); 3729 } 3730 3731 return 0; 3732 3733 out_unlock: 3734 mm_drop_all_locks(mm); 3735 return -EINTR; 3736 } 3737 3738 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 3739 { 3740 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 3741 /* 3742 * The LSB of head.next can't change to 0 from under 3743 * us because we hold the mm_all_locks_mutex. 3744 * 3745 * We must however clear the bitflag before unlocking 3746 * the vma so the users using the anon_vma->rb_root will 3747 * never see our bitflag. 3748 * 3749 * No need of atomic instructions here, head.next 3750 * can't change from under us until we release the 3751 * anon_vma->root->rwsem. 3752 */ 3753 if (!__test_and_clear_bit(0, (unsigned long *) 3754 &anon_vma->root->rb_root.rb_root.rb_node)) 3755 BUG(); 3756 anon_vma_unlock_write(anon_vma); 3757 } 3758 } 3759 3760 static void vm_unlock_mapping(struct address_space *mapping) 3761 { 3762 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 3763 /* 3764 * AS_MM_ALL_LOCKS can't change to 0 from under us 3765 * because we hold the mm_all_locks_mutex. 3766 */ 3767 i_mmap_unlock_write(mapping); 3768 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 3769 &mapping->flags)) 3770 BUG(); 3771 } 3772 } 3773 3774 /* 3775 * The mmap_lock cannot be released by the caller until 3776 * mm_drop_all_locks() returns. 3777 */ 3778 void mm_drop_all_locks(struct mm_struct *mm) 3779 { 3780 struct vm_area_struct *vma; 3781 struct anon_vma_chain *avc; 3782 MA_STATE(mas, &mm->mm_mt, 0, 0); 3783 3784 mmap_assert_write_locked(mm); 3785 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 3786 3787 mas_for_each(&mas, vma, ULONG_MAX) { 3788 if (vma->anon_vma) 3789 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 3790 vm_unlock_anon_vma(avc->anon_vma); 3791 if (vma->vm_file && vma->vm_file->f_mapping) 3792 vm_unlock_mapping(vma->vm_file->f_mapping); 3793 } 3794 3795 mutex_unlock(&mm_all_locks_mutex); 3796 } 3797 3798 /* 3799 * initialise the percpu counter for VM 3800 */ 3801 void __init mmap_init(void) 3802 { 3803 int ret; 3804 3805 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 3806 VM_BUG_ON(ret); 3807 } 3808 3809 /* 3810 * Initialise sysctl_user_reserve_kbytes. 3811 * 3812 * This is intended to prevent a user from starting a single memory hogging 3813 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 3814 * mode. 3815 * 3816 * The default value is min(3% of free memory, 128MB) 3817 * 128MB is enough to recover with sshd/login, bash, and top/kill. 3818 */ 3819 static int init_user_reserve(void) 3820 { 3821 unsigned long free_kbytes; 3822 3823 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 3824 3825 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 3826 return 0; 3827 } 3828 subsys_initcall(init_user_reserve); 3829 3830 /* 3831 * Initialise sysctl_admin_reserve_kbytes. 3832 * 3833 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 3834 * to log in and kill a memory hogging process. 3835 * 3836 * Systems with more than 256MB will reserve 8MB, enough to recover 3837 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 3838 * only reserve 3% of free pages by default. 3839 */ 3840 static int init_admin_reserve(void) 3841 { 3842 unsigned long free_kbytes; 3843 3844 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 3845 3846 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 3847 return 0; 3848 } 3849 subsys_initcall(init_admin_reserve); 3850 3851 /* 3852 * Reinititalise user and admin reserves if memory is added or removed. 3853 * 3854 * The default user reserve max is 128MB, and the default max for the 3855 * admin reserve is 8MB. These are usually, but not always, enough to 3856 * enable recovery from a memory hogging process using login/sshd, a shell, 3857 * and tools like top. It may make sense to increase or even disable the 3858 * reserve depending on the existence of swap or variations in the recovery 3859 * tools. So, the admin may have changed them. 3860 * 3861 * If memory is added and the reserves have been eliminated or increased above 3862 * the default max, then we'll trust the admin. 3863 * 3864 * If memory is removed and there isn't enough free memory, then we 3865 * need to reset the reserves. 3866 * 3867 * Otherwise keep the reserve set by the admin. 3868 */ 3869 static int reserve_mem_notifier(struct notifier_block *nb, 3870 unsigned long action, void *data) 3871 { 3872 unsigned long tmp, free_kbytes; 3873 3874 switch (action) { 3875 case MEM_ONLINE: 3876 /* Default max is 128MB. Leave alone if modified by operator. */ 3877 tmp = sysctl_user_reserve_kbytes; 3878 if (0 < tmp && tmp < (1UL << 17)) 3879 init_user_reserve(); 3880 3881 /* Default max is 8MB. Leave alone if modified by operator. */ 3882 tmp = sysctl_admin_reserve_kbytes; 3883 if (0 < tmp && tmp < (1UL << 13)) 3884 init_admin_reserve(); 3885 3886 break; 3887 case MEM_OFFLINE: 3888 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); 3889 3890 if (sysctl_user_reserve_kbytes > free_kbytes) { 3891 init_user_reserve(); 3892 pr_info("vm.user_reserve_kbytes reset to %lu\n", 3893 sysctl_user_reserve_kbytes); 3894 } 3895 3896 if (sysctl_admin_reserve_kbytes > free_kbytes) { 3897 init_admin_reserve(); 3898 pr_info("vm.admin_reserve_kbytes reset to %lu\n", 3899 sysctl_admin_reserve_kbytes); 3900 } 3901 break; 3902 default: 3903 break; 3904 } 3905 return NOTIFY_OK; 3906 } 3907 3908 static int __meminit init_reserve_notifier(void) 3909 { 3910 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) 3911 pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 3912 3913 return 0; 3914 } 3915 subsys_initcall(init_reserve_notifier); 3916