1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 /* 41 * Maximum number of attempts we make to install guard pages before we give up 42 * and return -ERESTARTNOINTR to have userspace try again. 43 */ 44 #define MAX_MADVISE_GUARD_RETRIES 3 45 46 struct madvise_walk_private { 47 struct mmu_gather *tlb; 48 bool pageout; 49 }; 50 51 /* 52 * Any behaviour which results in changes to the vma->vm_flags needs to 53 * take mmap_lock for writing. Others, which simply traverse vmas, need 54 * to only take it for reading. 55 */ 56 static int madvise_need_mmap_write(int behavior) 57 { 58 switch (behavior) { 59 case MADV_REMOVE: 60 case MADV_WILLNEED: 61 case MADV_DONTNEED: 62 case MADV_DONTNEED_LOCKED: 63 case MADV_COLD: 64 case MADV_PAGEOUT: 65 case MADV_FREE: 66 case MADV_POPULATE_READ: 67 case MADV_POPULATE_WRITE: 68 case MADV_COLLAPSE: 69 case MADV_GUARD_INSTALL: 70 case MADV_GUARD_REMOVE: 71 return 0; 72 default: 73 /* be safe, default to 1. list exceptions explicitly */ 74 return 1; 75 } 76 } 77 78 #ifdef CONFIG_ANON_VMA_NAME 79 struct anon_vma_name *anon_vma_name_alloc(const char *name) 80 { 81 struct anon_vma_name *anon_name; 82 size_t count; 83 84 /* Add 1 for NUL terminator at the end of the anon_name->name */ 85 count = strlen(name) + 1; 86 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 87 if (anon_name) { 88 kref_init(&anon_name->kref); 89 memcpy(anon_name->name, name, count); 90 } 91 92 return anon_name; 93 } 94 95 void anon_vma_name_free(struct kref *kref) 96 { 97 struct anon_vma_name *anon_name = 98 container_of(kref, struct anon_vma_name, kref); 99 kfree(anon_name); 100 } 101 102 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 103 { 104 mmap_assert_locked(vma->vm_mm); 105 106 return vma->anon_name; 107 } 108 109 /* mmap_lock should be write-locked */ 110 static int replace_anon_vma_name(struct vm_area_struct *vma, 111 struct anon_vma_name *anon_name) 112 { 113 struct anon_vma_name *orig_name = anon_vma_name(vma); 114 115 if (!anon_name) { 116 vma->anon_name = NULL; 117 anon_vma_name_put(orig_name); 118 return 0; 119 } 120 121 if (anon_vma_name_eq(orig_name, anon_name)) 122 return 0; 123 124 vma->anon_name = anon_vma_name_reuse(anon_name); 125 anon_vma_name_put(orig_name); 126 127 return 0; 128 } 129 #else /* CONFIG_ANON_VMA_NAME */ 130 static int replace_anon_vma_name(struct vm_area_struct *vma, 131 struct anon_vma_name *anon_name) 132 { 133 if (anon_name) 134 return -EINVAL; 135 136 return 0; 137 } 138 #endif /* CONFIG_ANON_VMA_NAME */ 139 /* 140 * Update the vm_flags on region of a vma, splitting it or merging it as 141 * necessary. Must be called with mmap_lock held for writing; 142 * Caller should ensure anon_name stability by raising its refcount even when 143 * anon_name belongs to a valid vma because this function might free that vma. 144 */ 145 static int madvise_update_vma(struct vm_area_struct *vma, 146 struct vm_area_struct **prev, unsigned long start, 147 unsigned long end, unsigned long new_flags, 148 struct anon_vma_name *anon_name) 149 { 150 struct mm_struct *mm = vma->vm_mm; 151 int error; 152 VMA_ITERATOR(vmi, mm, start); 153 154 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 155 *prev = vma; 156 return 0; 157 } 158 159 vma = vma_modify_flags_name(&vmi, *prev, vma, start, end, new_flags, 160 anon_name); 161 if (IS_ERR(vma)) 162 return PTR_ERR(vma); 163 164 *prev = vma; 165 166 /* vm_flags is protected by the mmap_lock held in write mode. */ 167 vma_start_write(vma); 168 vm_flags_reset(vma, new_flags); 169 if (!vma->vm_file || vma_is_anon_shmem(vma)) { 170 error = replace_anon_vma_name(vma, anon_name); 171 if (error) 172 return error; 173 } 174 175 return 0; 176 } 177 178 #ifdef CONFIG_SWAP 179 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 180 unsigned long end, struct mm_walk *walk) 181 { 182 struct vm_area_struct *vma = walk->private; 183 struct swap_iocb *splug = NULL; 184 pte_t *ptep = NULL; 185 spinlock_t *ptl; 186 unsigned long addr; 187 188 for (addr = start; addr < end; addr += PAGE_SIZE) { 189 pte_t pte; 190 swp_entry_t entry; 191 struct folio *folio; 192 193 if (!ptep++) { 194 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 195 if (!ptep) 196 break; 197 } 198 199 pte = ptep_get(ptep); 200 if (!is_swap_pte(pte)) 201 continue; 202 entry = pte_to_swp_entry(pte); 203 if (unlikely(non_swap_entry(entry))) 204 continue; 205 206 pte_unmap_unlock(ptep, ptl); 207 ptep = NULL; 208 209 folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 210 vma, addr, &splug); 211 if (folio) 212 folio_put(folio); 213 } 214 215 if (ptep) 216 pte_unmap_unlock(ptep, ptl); 217 swap_read_unplug(splug); 218 cond_resched(); 219 220 return 0; 221 } 222 223 static const struct mm_walk_ops swapin_walk_ops = { 224 .pmd_entry = swapin_walk_pmd_entry, 225 .walk_lock = PGWALK_RDLOCK, 226 }; 227 228 static void shmem_swapin_range(struct vm_area_struct *vma, 229 unsigned long start, unsigned long end, 230 struct address_space *mapping) 231 { 232 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 233 pgoff_t end_index = linear_page_index(vma, end) - 1; 234 struct folio *folio; 235 struct swap_iocb *splug = NULL; 236 237 rcu_read_lock(); 238 xas_for_each(&xas, folio, end_index) { 239 unsigned long addr; 240 swp_entry_t entry; 241 242 if (!xa_is_value(folio)) 243 continue; 244 entry = radix_to_swp_entry(folio); 245 /* There might be swapin error entries in shmem mapping. */ 246 if (non_swap_entry(entry)) 247 continue; 248 249 addr = vma->vm_start + 250 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); 251 xas_pause(&xas); 252 rcu_read_unlock(); 253 254 folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping), 255 vma, addr, &splug); 256 if (folio) 257 folio_put(folio); 258 259 rcu_read_lock(); 260 } 261 rcu_read_unlock(); 262 swap_read_unplug(splug); 263 } 264 #endif /* CONFIG_SWAP */ 265 266 /* 267 * Schedule all required I/O operations. Do not wait for completion. 268 */ 269 static long madvise_willneed(struct vm_area_struct *vma, 270 struct vm_area_struct **prev, 271 unsigned long start, unsigned long end) 272 { 273 struct mm_struct *mm = vma->vm_mm; 274 struct file *file = vma->vm_file; 275 loff_t offset; 276 277 *prev = vma; 278 #ifdef CONFIG_SWAP 279 if (!file) { 280 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 281 lru_add_drain(); /* Push any new pages onto the LRU now */ 282 return 0; 283 } 284 285 if (shmem_mapping(file->f_mapping)) { 286 shmem_swapin_range(vma, start, end, file->f_mapping); 287 lru_add_drain(); /* Push any new pages onto the LRU now */ 288 return 0; 289 } 290 #else 291 if (!file) 292 return -EBADF; 293 #endif 294 295 if (IS_DAX(file_inode(file))) { 296 /* no bad return value, but ignore advice */ 297 return 0; 298 } 299 300 /* 301 * Filesystem's fadvise may need to take various locks. We need to 302 * explicitly grab a reference because the vma (and hence the 303 * vma's reference to the file) can go away as soon as we drop 304 * mmap_lock. 305 */ 306 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 307 get_file(file); 308 offset = (loff_t)(start - vma->vm_start) 309 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 310 mmap_read_unlock(mm); 311 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 312 fput(file); 313 mmap_read_lock(mm); 314 return 0; 315 } 316 317 static inline bool can_do_file_pageout(struct vm_area_struct *vma) 318 { 319 if (!vma->vm_file) 320 return false; 321 /* 322 * paging out pagecache only for non-anonymous mappings that correspond 323 * to the files the calling process could (if tried) open for writing; 324 * otherwise we'd be including shared non-exclusive mappings, which 325 * opens a side channel. 326 */ 327 return inode_owner_or_capable(&nop_mnt_idmap, 328 file_inode(vma->vm_file)) || 329 file_permission(vma->vm_file, MAY_WRITE) == 0; 330 } 331 332 static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end, 333 struct folio *folio, pte_t *ptep, 334 pte_t pte, bool *any_young, 335 bool *any_dirty) 336 { 337 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; 338 int max_nr = (end - addr) / PAGE_SIZE; 339 340 return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, 341 any_young, any_dirty); 342 } 343 344 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 345 unsigned long addr, unsigned long end, 346 struct mm_walk *walk) 347 { 348 struct madvise_walk_private *private = walk->private; 349 struct mmu_gather *tlb = private->tlb; 350 bool pageout = private->pageout; 351 struct mm_struct *mm = tlb->mm; 352 struct vm_area_struct *vma = walk->vma; 353 pte_t *start_pte, *pte, ptent; 354 spinlock_t *ptl; 355 struct folio *folio = NULL; 356 LIST_HEAD(folio_list); 357 bool pageout_anon_only_filter; 358 unsigned int batch_count = 0; 359 int nr; 360 361 if (fatal_signal_pending(current)) 362 return -EINTR; 363 364 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && 365 !can_do_file_pageout(vma); 366 367 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 368 if (pmd_trans_huge(*pmd)) { 369 pmd_t orig_pmd; 370 unsigned long next = pmd_addr_end(addr, end); 371 372 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 373 ptl = pmd_trans_huge_lock(pmd, vma); 374 if (!ptl) 375 return 0; 376 377 orig_pmd = *pmd; 378 if (is_huge_zero_pmd(orig_pmd)) 379 goto huge_unlock; 380 381 if (unlikely(!pmd_present(orig_pmd))) { 382 VM_BUG_ON(thp_migration_supported() && 383 !is_pmd_migration_entry(orig_pmd)); 384 goto huge_unlock; 385 } 386 387 folio = pmd_folio(orig_pmd); 388 389 /* Do not interfere with other mappings of this folio */ 390 if (folio_maybe_mapped_shared(folio)) 391 goto huge_unlock; 392 393 if (pageout_anon_only_filter && !folio_test_anon(folio)) 394 goto huge_unlock; 395 396 if (next - addr != HPAGE_PMD_SIZE) { 397 int err; 398 399 folio_get(folio); 400 spin_unlock(ptl); 401 folio_lock(folio); 402 err = split_folio(folio); 403 folio_unlock(folio); 404 folio_put(folio); 405 if (!err) 406 goto regular_folio; 407 return 0; 408 } 409 410 if (!pageout && pmd_young(orig_pmd)) { 411 pmdp_invalidate(vma, addr, pmd); 412 orig_pmd = pmd_mkold(orig_pmd); 413 414 set_pmd_at(mm, addr, pmd, orig_pmd); 415 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 416 } 417 418 folio_clear_referenced(folio); 419 folio_test_clear_young(folio); 420 if (folio_test_active(folio)) 421 folio_set_workingset(folio); 422 if (pageout) { 423 if (folio_isolate_lru(folio)) { 424 if (folio_test_unevictable(folio)) 425 folio_putback_lru(folio); 426 else 427 list_add(&folio->lru, &folio_list); 428 } 429 } else 430 folio_deactivate(folio); 431 huge_unlock: 432 spin_unlock(ptl); 433 if (pageout) 434 reclaim_pages(&folio_list); 435 return 0; 436 } 437 438 regular_folio: 439 #endif 440 tlb_change_page_size(tlb, PAGE_SIZE); 441 restart: 442 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 443 if (!start_pte) 444 return 0; 445 flush_tlb_batched_pending(mm); 446 arch_enter_lazy_mmu_mode(); 447 for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) { 448 nr = 1; 449 ptent = ptep_get(pte); 450 451 if (++batch_count == SWAP_CLUSTER_MAX) { 452 batch_count = 0; 453 if (need_resched()) { 454 arch_leave_lazy_mmu_mode(); 455 pte_unmap_unlock(start_pte, ptl); 456 cond_resched(); 457 goto restart; 458 } 459 } 460 461 if (pte_none(ptent)) 462 continue; 463 464 if (!pte_present(ptent)) 465 continue; 466 467 folio = vm_normal_folio(vma, addr, ptent); 468 if (!folio || folio_is_zone_device(folio)) 469 continue; 470 471 /* 472 * If we encounter a large folio, only split it if it is not 473 * fully mapped within the range we are operating on. Otherwise 474 * leave it as is so that it can be swapped out whole. If we 475 * fail to split a folio, leave it in place and advance to the 476 * next pte in the range. 477 */ 478 if (folio_test_large(folio)) { 479 bool any_young; 480 481 nr = madvise_folio_pte_batch(addr, end, folio, pte, 482 ptent, &any_young, NULL); 483 if (any_young) 484 ptent = pte_mkyoung(ptent); 485 486 if (nr < folio_nr_pages(folio)) { 487 int err; 488 489 if (folio_maybe_mapped_shared(folio)) 490 continue; 491 if (pageout_anon_only_filter && !folio_test_anon(folio)) 492 continue; 493 if (!folio_trylock(folio)) 494 continue; 495 folio_get(folio); 496 arch_leave_lazy_mmu_mode(); 497 pte_unmap_unlock(start_pte, ptl); 498 start_pte = NULL; 499 err = split_folio(folio); 500 folio_unlock(folio); 501 folio_put(folio); 502 start_pte = pte = 503 pte_offset_map_lock(mm, pmd, addr, &ptl); 504 if (!start_pte) 505 break; 506 arch_enter_lazy_mmu_mode(); 507 if (!err) 508 nr = 0; 509 continue; 510 } 511 } 512 513 /* 514 * Do not interfere with other mappings of this folio and 515 * non-LRU folio. If we have a large folio at this point, we 516 * know it is fully mapped so if its mapcount is the same as its 517 * number of pages, it must be exclusive. 518 */ 519 if (!folio_test_lru(folio) || 520 folio_mapcount(folio) != folio_nr_pages(folio)) 521 continue; 522 523 if (pageout_anon_only_filter && !folio_test_anon(folio)) 524 continue; 525 526 if (!pageout && pte_young(ptent)) { 527 clear_young_dirty_ptes(vma, addr, pte, nr, 528 CYDP_CLEAR_YOUNG); 529 tlb_remove_tlb_entries(tlb, pte, nr, addr); 530 } 531 532 /* 533 * We are deactivating a folio for accelerating reclaiming. 534 * VM couldn't reclaim the folio unless we clear PG_young. 535 * As a side effect, it makes confuse idle-page tracking 536 * because they will miss recent referenced history. 537 */ 538 folio_clear_referenced(folio); 539 folio_test_clear_young(folio); 540 if (folio_test_active(folio)) 541 folio_set_workingset(folio); 542 if (pageout) { 543 if (folio_isolate_lru(folio)) { 544 if (folio_test_unevictable(folio)) 545 folio_putback_lru(folio); 546 else 547 list_add(&folio->lru, &folio_list); 548 } 549 } else 550 folio_deactivate(folio); 551 } 552 553 if (start_pte) { 554 arch_leave_lazy_mmu_mode(); 555 pte_unmap_unlock(start_pte, ptl); 556 } 557 if (pageout) 558 reclaim_pages(&folio_list); 559 cond_resched(); 560 561 return 0; 562 } 563 564 static const struct mm_walk_ops cold_walk_ops = { 565 .pmd_entry = madvise_cold_or_pageout_pte_range, 566 .walk_lock = PGWALK_RDLOCK, 567 }; 568 569 static void madvise_cold_page_range(struct mmu_gather *tlb, 570 struct vm_area_struct *vma, 571 unsigned long addr, unsigned long end) 572 { 573 struct madvise_walk_private walk_private = { 574 .pageout = false, 575 .tlb = tlb, 576 }; 577 578 tlb_start_vma(tlb, vma); 579 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 580 tlb_end_vma(tlb, vma); 581 } 582 583 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 584 { 585 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 586 } 587 588 static long madvise_cold(struct vm_area_struct *vma, 589 struct vm_area_struct **prev, 590 unsigned long start_addr, unsigned long end_addr) 591 { 592 struct mm_struct *mm = vma->vm_mm; 593 struct mmu_gather tlb; 594 595 *prev = vma; 596 if (!can_madv_lru_vma(vma)) 597 return -EINVAL; 598 599 lru_add_drain(); 600 tlb_gather_mmu(&tlb, mm); 601 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 602 tlb_finish_mmu(&tlb); 603 604 return 0; 605 } 606 607 static void madvise_pageout_page_range(struct mmu_gather *tlb, 608 struct vm_area_struct *vma, 609 unsigned long addr, unsigned long end) 610 { 611 struct madvise_walk_private walk_private = { 612 .pageout = true, 613 .tlb = tlb, 614 }; 615 616 tlb_start_vma(tlb, vma); 617 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 618 tlb_end_vma(tlb, vma); 619 } 620 621 static long madvise_pageout(struct vm_area_struct *vma, 622 struct vm_area_struct **prev, 623 unsigned long start_addr, unsigned long end_addr) 624 { 625 struct mm_struct *mm = vma->vm_mm; 626 struct mmu_gather tlb; 627 628 *prev = vma; 629 if (!can_madv_lru_vma(vma)) 630 return -EINVAL; 631 632 /* 633 * If the VMA belongs to a private file mapping, there can be private 634 * dirty pages which can be paged out if even this process is neither 635 * owner nor write capable of the file. We allow private file mappings 636 * further to pageout dirty anon pages. 637 */ 638 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && 639 (vma->vm_flags & VM_MAYSHARE))) 640 return 0; 641 642 lru_add_drain(); 643 tlb_gather_mmu(&tlb, mm); 644 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 645 tlb_finish_mmu(&tlb); 646 647 return 0; 648 } 649 650 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 651 unsigned long end, struct mm_walk *walk) 652 653 { 654 const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY; 655 struct mmu_gather *tlb = walk->private; 656 struct mm_struct *mm = tlb->mm; 657 struct vm_area_struct *vma = walk->vma; 658 spinlock_t *ptl; 659 pte_t *start_pte, *pte, ptent; 660 struct folio *folio; 661 int nr_swap = 0; 662 unsigned long next; 663 int nr, max_nr; 664 665 next = pmd_addr_end(addr, end); 666 if (pmd_trans_huge(*pmd)) 667 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 668 return 0; 669 670 tlb_change_page_size(tlb, PAGE_SIZE); 671 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 672 if (!start_pte) 673 return 0; 674 flush_tlb_batched_pending(mm); 675 arch_enter_lazy_mmu_mode(); 676 for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) { 677 nr = 1; 678 ptent = ptep_get(pte); 679 680 if (pte_none(ptent)) 681 continue; 682 /* 683 * If the pte has swp_entry, just clear page table to 684 * prevent swap-in which is more expensive rather than 685 * (page allocation + zeroing). 686 */ 687 if (!pte_present(ptent)) { 688 swp_entry_t entry; 689 690 entry = pte_to_swp_entry(ptent); 691 if (!non_swap_entry(entry)) { 692 max_nr = (end - addr) / PAGE_SIZE; 693 nr = swap_pte_batch(pte, max_nr, ptent); 694 nr_swap -= nr; 695 free_swap_and_cache_nr(entry, nr); 696 clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); 697 } else if (is_hwpoison_entry(entry) || 698 is_poisoned_swp_entry(entry)) { 699 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 700 } 701 continue; 702 } 703 704 folio = vm_normal_folio(vma, addr, ptent); 705 if (!folio || folio_is_zone_device(folio)) 706 continue; 707 708 /* 709 * If we encounter a large folio, only split it if it is not 710 * fully mapped within the range we are operating on. Otherwise 711 * leave it as is so that it can be marked as lazyfree. If we 712 * fail to split a folio, leave it in place and advance to the 713 * next pte in the range. 714 */ 715 if (folio_test_large(folio)) { 716 bool any_young, any_dirty; 717 718 nr = madvise_folio_pte_batch(addr, end, folio, pte, 719 ptent, &any_young, &any_dirty); 720 721 if (nr < folio_nr_pages(folio)) { 722 int err; 723 724 if (folio_maybe_mapped_shared(folio)) 725 continue; 726 if (!folio_trylock(folio)) 727 continue; 728 folio_get(folio); 729 arch_leave_lazy_mmu_mode(); 730 pte_unmap_unlock(start_pte, ptl); 731 start_pte = NULL; 732 err = split_folio(folio); 733 folio_unlock(folio); 734 folio_put(folio); 735 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 736 start_pte = pte; 737 if (!start_pte) 738 break; 739 arch_enter_lazy_mmu_mode(); 740 if (!err) 741 nr = 0; 742 continue; 743 } 744 745 if (any_young) 746 ptent = pte_mkyoung(ptent); 747 if (any_dirty) 748 ptent = pte_mkdirty(ptent); 749 } 750 751 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 752 if (!folio_trylock(folio)) 753 continue; 754 /* 755 * If we have a large folio at this point, we know it is 756 * fully mapped so if its mapcount is the same as its 757 * number of pages, it must be exclusive. 758 */ 759 if (folio_mapcount(folio) != folio_nr_pages(folio)) { 760 folio_unlock(folio); 761 continue; 762 } 763 764 if (folio_test_swapcache(folio) && 765 !folio_free_swap(folio)) { 766 folio_unlock(folio); 767 continue; 768 } 769 770 folio_clear_dirty(folio); 771 folio_unlock(folio); 772 } 773 774 if (pte_young(ptent) || pte_dirty(ptent)) { 775 clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags); 776 tlb_remove_tlb_entries(tlb, pte, nr, addr); 777 } 778 folio_mark_lazyfree(folio); 779 } 780 781 if (nr_swap) 782 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 783 if (start_pte) { 784 arch_leave_lazy_mmu_mode(); 785 pte_unmap_unlock(start_pte, ptl); 786 } 787 cond_resched(); 788 789 return 0; 790 } 791 792 static const struct mm_walk_ops madvise_free_walk_ops = { 793 .pmd_entry = madvise_free_pte_range, 794 .walk_lock = PGWALK_RDLOCK, 795 }; 796 797 static int madvise_free_single_vma(struct vm_area_struct *vma, 798 unsigned long start_addr, unsigned long end_addr) 799 { 800 struct mm_struct *mm = vma->vm_mm; 801 struct mmu_notifier_range range; 802 struct mmu_gather tlb; 803 804 /* MADV_FREE works for only anon vma at the moment */ 805 if (!vma_is_anonymous(vma)) 806 return -EINVAL; 807 808 range.start = max(vma->vm_start, start_addr); 809 if (range.start >= vma->vm_end) 810 return -EINVAL; 811 range.end = min(vma->vm_end, end_addr); 812 if (range.end <= vma->vm_start) 813 return -EINVAL; 814 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 815 range.start, range.end); 816 817 lru_add_drain(); 818 tlb_gather_mmu(&tlb, mm); 819 update_hiwater_rss(mm); 820 821 mmu_notifier_invalidate_range_start(&range); 822 tlb_start_vma(&tlb, vma); 823 walk_page_range(vma->vm_mm, range.start, range.end, 824 &madvise_free_walk_ops, &tlb); 825 tlb_end_vma(&tlb, vma); 826 mmu_notifier_invalidate_range_end(&range); 827 tlb_finish_mmu(&tlb); 828 829 return 0; 830 } 831 832 /* 833 * Application no longer needs these pages. If the pages are dirty, 834 * it's OK to just throw them away. The app will be more careful about 835 * data it wants to keep. Be sure to free swap resources too. The 836 * zap_page_range_single call sets things up for shrink_active_list to actually 837 * free these pages later if no one else has touched them in the meantime, 838 * although we could add these pages to a global reuse list for 839 * shrink_active_list to pick up before reclaiming other pages. 840 * 841 * NB: This interface discards data rather than pushes it out to swap, 842 * as some implementations do. This has performance implications for 843 * applications like large transactional databases which want to discard 844 * pages in anonymous maps after committing to backing store the data 845 * that was kept in them. There is no reason to write this data out to 846 * the swap area if the application is discarding it. 847 * 848 * An interface that causes the system to free clean pages and flush 849 * dirty pages is already available as msync(MS_INVALIDATE). 850 */ 851 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, 852 unsigned long start, unsigned long end) 853 { 854 struct zap_details details = { 855 .reclaim_pt = true, 856 .even_cows = true, 857 }; 858 859 zap_page_range_single(vma, start, end - start, &details); 860 return 0; 861 } 862 863 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 864 unsigned long start, 865 unsigned long *end, 866 int behavior) 867 { 868 if (!is_vm_hugetlb_page(vma)) { 869 unsigned int forbidden = VM_PFNMAP; 870 871 if (behavior != MADV_DONTNEED_LOCKED) 872 forbidden |= VM_LOCKED; 873 874 return !(vma->vm_flags & forbidden); 875 } 876 877 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 878 return false; 879 if (start & ~huge_page_mask(hstate_vma(vma))) 880 return false; 881 882 /* 883 * Madvise callers expect the length to be rounded up to PAGE_SIZE 884 * boundaries, and may be unaware that this VMA uses huge pages. 885 * Avoid unexpected data loss by rounding down the number of 886 * huge pages freed. 887 */ 888 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); 889 890 return true; 891 } 892 893 static long madvise_dontneed_free(struct vm_area_struct *vma, 894 struct vm_area_struct **prev, 895 unsigned long start, unsigned long end, 896 int behavior) 897 { 898 struct mm_struct *mm = vma->vm_mm; 899 900 *prev = vma; 901 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 902 return -EINVAL; 903 904 if (start == end) 905 return 0; 906 907 if (!userfaultfd_remove(vma, start, end)) { 908 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 909 910 mmap_read_lock(mm); 911 vma = vma_lookup(mm, start); 912 if (!vma) 913 return -ENOMEM; 914 /* 915 * Potential end adjustment for hugetlb vma is OK as 916 * the check below keeps end within vma. 917 */ 918 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 919 behavior)) 920 return -EINVAL; 921 if (end > vma->vm_end) { 922 /* 923 * Don't fail if end > vma->vm_end. If the old 924 * vma was split while the mmap_lock was 925 * released the effect of the concurrent 926 * operation may not cause madvise() to 927 * have an undefined result. There may be an 928 * adjacent next vma that we'll walk 929 * next. userfaultfd_remove() will generate an 930 * UFFD_EVENT_REMOVE repetition on the 931 * end-vma->vm_end range, but the manager can 932 * handle a repetition fine. 933 */ 934 end = vma->vm_end; 935 } 936 /* 937 * If the memory region between start and end was 938 * originally backed by 4kB pages and then remapped to 939 * be backed by hugepages while mmap_lock was dropped, 940 * the adjustment for hugetlb vma above may have rounded 941 * end down to the start address. 942 */ 943 if (start == end) 944 return 0; 945 VM_WARN_ON(start > end); 946 } 947 948 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 949 return madvise_dontneed_single_vma(vma, start, end); 950 else if (behavior == MADV_FREE) 951 return madvise_free_single_vma(vma, start, end); 952 else 953 return -EINVAL; 954 } 955 956 static long madvise_populate(struct mm_struct *mm, unsigned long start, 957 unsigned long end, int behavior) 958 { 959 const bool write = behavior == MADV_POPULATE_WRITE; 960 int locked = 1; 961 long pages; 962 963 while (start < end) { 964 /* Populate (prefault) page tables readable/writable. */ 965 pages = faultin_page_range(mm, start, end, write, &locked); 966 if (!locked) { 967 mmap_read_lock(mm); 968 locked = 1; 969 } 970 if (pages < 0) { 971 switch (pages) { 972 case -EINTR: 973 return -EINTR; 974 case -EINVAL: /* Incompatible mappings / permissions. */ 975 return -EINVAL; 976 case -EHWPOISON: 977 return -EHWPOISON; 978 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 979 return -EFAULT; 980 default: 981 pr_warn_once("%s: unhandled return value: %ld\n", 982 __func__, pages); 983 fallthrough; 984 case -ENOMEM: /* No VMA or out of memory. */ 985 return -ENOMEM; 986 } 987 } 988 start += pages * PAGE_SIZE; 989 } 990 return 0; 991 } 992 993 /* 994 * Application wants to free up the pages and associated backing store. 995 * This is effectively punching a hole into the middle of a file. 996 */ 997 static long madvise_remove(struct vm_area_struct *vma, 998 struct vm_area_struct **prev, 999 unsigned long start, unsigned long end) 1000 { 1001 loff_t offset; 1002 int error; 1003 struct file *f; 1004 struct mm_struct *mm = vma->vm_mm; 1005 1006 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 1007 1008 if (vma->vm_flags & VM_LOCKED) 1009 return -EINVAL; 1010 1011 f = vma->vm_file; 1012 1013 if (!f || !f->f_mapping || !f->f_mapping->host) { 1014 return -EINVAL; 1015 } 1016 1017 if (!vma_is_shared_maywrite(vma)) 1018 return -EACCES; 1019 1020 offset = (loff_t)(start - vma->vm_start) 1021 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 1022 1023 /* 1024 * Filesystem's fallocate may need to take i_rwsem. We need to 1025 * explicitly grab a reference because the vma (and hence the 1026 * vma's reference to the file) can go away as soon as we drop 1027 * mmap_lock. 1028 */ 1029 get_file(f); 1030 if (userfaultfd_remove(vma, start, end)) { 1031 /* mmap_lock was not released by userfaultfd_remove() */ 1032 mmap_read_unlock(mm); 1033 } 1034 error = vfs_fallocate(f, 1035 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1036 offset, end - start); 1037 fput(f); 1038 mmap_read_lock(mm); 1039 return error; 1040 } 1041 1042 static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked) 1043 { 1044 vm_flags_t disallowed = VM_SPECIAL | VM_HUGETLB; 1045 1046 /* 1047 * A user could lock after setting a guard range but that's fine, as 1048 * they'd not be able to fault in. The issue arises when we try to zap 1049 * existing locked VMAs. We don't want to do that. 1050 */ 1051 if (!allow_locked) 1052 disallowed |= VM_LOCKED; 1053 1054 return !(vma->vm_flags & disallowed); 1055 } 1056 1057 static bool is_guard_pte_marker(pte_t ptent) 1058 { 1059 return is_pte_marker(ptent) && 1060 is_guard_swp_entry(pte_to_swp_entry(ptent)); 1061 } 1062 1063 static int guard_install_pud_entry(pud_t *pud, unsigned long addr, 1064 unsigned long next, struct mm_walk *walk) 1065 { 1066 pud_t pudval = pudp_get(pud); 1067 1068 /* If huge return >0 so we abort the operation + zap. */ 1069 return pud_trans_huge(pudval) || pud_devmap(pudval); 1070 } 1071 1072 static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr, 1073 unsigned long next, struct mm_walk *walk) 1074 { 1075 pmd_t pmdval = pmdp_get(pmd); 1076 1077 /* If huge return >0 so we abort the operation + zap. */ 1078 return pmd_trans_huge(pmdval) || pmd_devmap(pmdval); 1079 } 1080 1081 static int guard_install_pte_entry(pte_t *pte, unsigned long addr, 1082 unsigned long next, struct mm_walk *walk) 1083 { 1084 pte_t pteval = ptep_get(pte); 1085 unsigned long *nr_pages = (unsigned long *)walk->private; 1086 1087 /* If there is already a guard page marker, we have nothing to do. */ 1088 if (is_guard_pte_marker(pteval)) { 1089 (*nr_pages)++; 1090 1091 return 0; 1092 } 1093 1094 /* If populated return >0 so we abort the operation + zap. */ 1095 return 1; 1096 } 1097 1098 static int guard_install_set_pte(unsigned long addr, unsigned long next, 1099 pte_t *ptep, struct mm_walk *walk) 1100 { 1101 unsigned long *nr_pages = (unsigned long *)walk->private; 1102 1103 /* Simply install a PTE marker, this causes segfault on access. */ 1104 *ptep = make_pte_marker(PTE_MARKER_GUARD); 1105 (*nr_pages)++; 1106 1107 return 0; 1108 } 1109 1110 static const struct mm_walk_ops guard_install_walk_ops = { 1111 .pud_entry = guard_install_pud_entry, 1112 .pmd_entry = guard_install_pmd_entry, 1113 .pte_entry = guard_install_pte_entry, 1114 .install_pte = guard_install_set_pte, 1115 .walk_lock = PGWALK_RDLOCK, 1116 }; 1117 1118 static long madvise_guard_install(struct vm_area_struct *vma, 1119 struct vm_area_struct **prev, 1120 unsigned long start, unsigned long end) 1121 { 1122 long err; 1123 int i; 1124 1125 *prev = vma; 1126 if (!is_valid_guard_vma(vma, /* allow_locked = */false)) 1127 return -EINVAL; 1128 1129 /* 1130 * If we install guard markers, then the range is no longer 1131 * empty from a page table perspective and therefore it's 1132 * appropriate to have an anon_vma. 1133 * 1134 * This ensures that on fork, we copy page tables correctly. 1135 */ 1136 err = anon_vma_prepare(vma); 1137 if (err) 1138 return err; 1139 1140 /* 1141 * Optimistically try to install the guard marker pages first. If any 1142 * non-guard pages are encountered, give up and zap the range before 1143 * trying again. 1144 * 1145 * We try a few times before giving up and releasing back to userland to 1146 * loop around, releasing locks in the process to avoid contention. This 1147 * would only happen if there was a great many racing page faults. 1148 * 1149 * In most cases we should simply install the guard markers immediately 1150 * with no zap or looping. 1151 */ 1152 for (i = 0; i < MAX_MADVISE_GUARD_RETRIES; i++) { 1153 unsigned long nr_pages = 0; 1154 1155 /* Returns < 0 on error, == 0 if success, > 0 if zap needed. */ 1156 err = walk_page_range_mm(vma->vm_mm, start, end, 1157 &guard_install_walk_ops, &nr_pages); 1158 if (err < 0) 1159 return err; 1160 1161 if (err == 0) { 1162 unsigned long nr_expected_pages = PHYS_PFN(end - start); 1163 1164 VM_WARN_ON(nr_pages != nr_expected_pages); 1165 return 0; 1166 } 1167 1168 /* 1169 * OK some of the range have non-guard pages mapped, zap 1170 * them. This leaves existing guard pages in place. 1171 */ 1172 zap_page_range_single(vma, start, end - start, NULL); 1173 } 1174 1175 /* 1176 * We were unable to install the guard pages due to being raced by page 1177 * faults. This should not happen ordinarily. We return to userspace and 1178 * immediately retry, relieving lock contention. 1179 */ 1180 return restart_syscall(); 1181 } 1182 1183 static int guard_remove_pud_entry(pud_t *pud, unsigned long addr, 1184 unsigned long next, struct mm_walk *walk) 1185 { 1186 pud_t pudval = pudp_get(pud); 1187 1188 /* If huge, cannot have guard pages present, so no-op - skip. */ 1189 if (pud_trans_huge(pudval) || pud_devmap(pudval)) 1190 walk->action = ACTION_CONTINUE; 1191 1192 return 0; 1193 } 1194 1195 static int guard_remove_pmd_entry(pmd_t *pmd, unsigned long addr, 1196 unsigned long next, struct mm_walk *walk) 1197 { 1198 pmd_t pmdval = pmdp_get(pmd); 1199 1200 /* If huge, cannot have guard pages present, so no-op - skip. */ 1201 if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) 1202 walk->action = ACTION_CONTINUE; 1203 1204 return 0; 1205 } 1206 1207 static int guard_remove_pte_entry(pte_t *pte, unsigned long addr, 1208 unsigned long next, struct mm_walk *walk) 1209 { 1210 pte_t ptent = ptep_get(pte); 1211 1212 if (is_guard_pte_marker(ptent)) { 1213 /* Simply clear the PTE marker. */ 1214 pte_clear_not_present_full(walk->mm, addr, pte, false); 1215 update_mmu_cache(walk->vma, addr, pte); 1216 } 1217 1218 return 0; 1219 } 1220 1221 static const struct mm_walk_ops guard_remove_walk_ops = { 1222 .pud_entry = guard_remove_pud_entry, 1223 .pmd_entry = guard_remove_pmd_entry, 1224 .pte_entry = guard_remove_pte_entry, 1225 .walk_lock = PGWALK_RDLOCK, 1226 }; 1227 1228 static long madvise_guard_remove(struct vm_area_struct *vma, 1229 struct vm_area_struct **prev, 1230 unsigned long start, unsigned long end) 1231 { 1232 *prev = vma; 1233 /* 1234 * We're ok with removing guards in mlock()'d ranges, as this is a 1235 * non-destructive action. 1236 */ 1237 if (!is_valid_guard_vma(vma, /* allow_locked = */true)) 1238 return -EINVAL; 1239 1240 return walk_page_range(vma->vm_mm, start, end, 1241 &guard_remove_walk_ops, NULL); 1242 } 1243 1244 /* 1245 * Apply an madvise behavior to a region of a vma. madvise_update_vma 1246 * will handle splitting a vm area into separate areas, each area with its own 1247 * behavior. 1248 */ 1249 static int madvise_vma_behavior(struct vm_area_struct *vma, 1250 struct vm_area_struct **prev, 1251 unsigned long start, unsigned long end, 1252 unsigned long behavior) 1253 { 1254 int error; 1255 struct anon_vma_name *anon_name; 1256 unsigned long new_flags = vma->vm_flags; 1257 1258 if (unlikely(!can_modify_vma_madv(vma, behavior))) 1259 return -EPERM; 1260 1261 switch (behavior) { 1262 case MADV_REMOVE: 1263 return madvise_remove(vma, prev, start, end); 1264 case MADV_WILLNEED: 1265 return madvise_willneed(vma, prev, start, end); 1266 case MADV_COLD: 1267 return madvise_cold(vma, prev, start, end); 1268 case MADV_PAGEOUT: 1269 return madvise_pageout(vma, prev, start, end); 1270 case MADV_FREE: 1271 case MADV_DONTNEED: 1272 case MADV_DONTNEED_LOCKED: 1273 return madvise_dontneed_free(vma, prev, start, end, behavior); 1274 case MADV_NORMAL: 1275 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1276 break; 1277 case MADV_SEQUENTIAL: 1278 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1279 break; 1280 case MADV_RANDOM: 1281 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1282 break; 1283 case MADV_DONTFORK: 1284 new_flags |= VM_DONTCOPY; 1285 break; 1286 case MADV_DOFORK: 1287 if (vma->vm_flags & VM_IO) 1288 return -EINVAL; 1289 new_flags &= ~VM_DONTCOPY; 1290 break; 1291 case MADV_WIPEONFORK: 1292 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1293 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1294 return -EINVAL; 1295 new_flags |= VM_WIPEONFORK; 1296 break; 1297 case MADV_KEEPONFORK: 1298 if (vma->vm_flags & VM_DROPPABLE) 1299 return -EINVAL; 1300 new_flags &= ~VM_WIPEONFORK; 1301 break; 1302 case MADV_DONTDUMP: 1303 new_flags |= VM_DONTDUMP; 1304 break; 1305 case MADV_DODUMP: 1306 if ((!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) || 1307 (vma->vm_flags & VM_DROPPABLE)) 1308 return -EINVAL; 1309 new_flags &= ~VM_DONTDUMP; 1310 break; 1311 case MADV_MERGEABLE: 1312 case MADV_UNMERGEABLE: 1313 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1314 if (error) 1315 goto out; 1316 break; 1317 case MADV_HUGEPAGE: 1318 case MADV_NOHUGEPAGE: 1319 error = hugepage_madvise(vma, &new_flags, behavior); 1320 if (error) 1321 goto out; 1322 break; 1323 case MADV_COLLAPSE: 1324 return madvise_collapse(vma, prev, start, end); 1325 case MADV_GUARD_INSTALL: 1326 return madvise_guard_install(vma, prev, start, end); 1327 case MADV_GUARD_REMOVE: 1328 return madvise_guard_remove(vma, prev, start, end); 1329 } 1330 1331 anon_name = anon_vma_name(vma); 1332 anon_vma_name_get(anon_name); 1333 error = madvise_update_vma(vma, prev, start, end, new_flags, 1334 anon_name); 1335 anon_vma_name_put(anon_name); 1336 1337 out: 1338 /* 1339 * madvise() returns EAGAIN if kernel resources, such as 1340 * slab, are temporarily unavailable. 1341 */ 1342 if (error == -ENOMEM) 1343 error = -EAGAIN; 1344 return error; 1345 } 1346 1347 #ifdef CONFIG_MEMORY_FAILURE 1348 /* 1349 * Error injection support for memory error handling. 1350 */ 1351 static int madvise_inject_error(int behavior, 1352 unsigned long start, unsigned long end) 1353 { 1354 unsigned long size; 1355 1356 if (!capable(CAP_SYS_ADMIN)) 1357 return -EPERM; 1358 1359 1360 for (; start < end; start += size) { 1361 unsigned long pfn; 1362 struct page *page; 1363 int ret; 1364 1365 ret = get_user_pages_fast(start, 1, 0, &page); 1366 if (ret != 1) 1367 return ret; 1368 pfn = page_to_pfn(page); 1369 1370 /* 1371 * When soft offlining hugepages, after migrating the page 1372 * we dissolve it, therefore in the second loop "page" will 1373 * no longer be a compound page. 1374 */ 1375 size = page_size(compound_head(page)); 1376 1377 if (behavior == MADV_SOFT_OFFLINE) { 1378 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1379 pfn, start); 1380 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1381 } else { 1382 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1383 pfn, start); 1384 ret = memory_failure(pfn, MF_ACTION_REQUIRED | MF_COUNT_INCREASED | MF_SW_SIMULATED); 1385 if (ret == -EOPNOTSUPP) 1386 ret = 0; 1387 } 1388 1389 if (ret) 1390 return ret; 1391 } 1392 1393 return 0; 1394 } 1395 1396 static bool is_memory_failure(int behavior) 1397 { 1398 switch (behavior) { 1399 case MADV_HWPOISON: 1400 case MADV_SOFT_OFFLINE: 1401 return true; 1402 default: 1403 return false; 1404 } 1405 } 1406 1407 #else 1408 1409 static int madvise_inject_error(int behavior, 1410 unsigned long start, unsigned long end) 1411 { 1412 return 0; 1413 } 1414 1415 static bool is_memory_failure(int behavior) 1416 { 1417 return false; 1418 } 1419 1420 #endif /* CONFIG_MEMORY_FAILURE */ 1421 1422 static bool 1423 madvise_behavior_valid(int behavior) 1424 { 1425 switch (behavior) { 1426 case MADV_DOFORK: 1427 case MADV_DONTFORK: 1428 case MADV_NORMAL: 1429 case MADV_SEQUENTIAL: 1430 case MADV_RANDOM: 1431 case MADV_REMOVE: 1432 case MADV_WILLNEED: 1433 case MADV_DONTNEED: 1434 case MADV_DONTNEED_LOCKED: 1435 case MADV_FREE: 1436 case MADV_COLD: 1437 case MADV_PAGEOUT: 1438 case MADV_POPULATE_READ: 1439 case MADV_POPULATE_WRITE: 1440 #ifdef CONFIG_KSM 1441 case MADV_MERGEABLE: 1442 case MADV_UNMERGEABLE: 1443 #endif 1444 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1445 case MADV_HUGEPAGE: 1446 case MADV_NOHUGEPAGE: 1447 case MADV_COLLAPSE: 1448 #endif 1449 case MADV_DONTDUMP: 1450 case MADV_DODUMP: 1451 case MADV_WIPEONFORK: 1452 case MADV_KEEPONFORK: 1453 case MADV_GUARD_INSTALL: 1454 case MADV_GUARD_REMOVE: 1455 #ifdef CONFIG_MEMORY_FAILURE 1456 case MADV_SOFT_OFFLINE: 1457 case MADV_HWPOISON: 1458 #endif 1459 return true; 1460 1461 default: 1462 return false; 1463 } 1464 } 1465 1466 /* Can we invoke process_madvise() on a remote mm for the specified behavior? */ 1467 static bool process_madvise_remote_valid(int behavior) 1468 { 1469 switch (behavior) { 1470 case MADV_COLD: 1471 case MADV_PAGEOUT: 1472 case MADV_WILLNEED: 1473 case MADV_COLLAPSE: 1474 return true; 1475 default: 1476 return false; 1477 } 1478 } 1479 1480 /* 1481 * Walk the vmas in range [start,end), and call the visit function on each one. 1482 * The visit function will get start and end parameters that cover the overlap 1483 * between the current vma and the original range. Any unmapped regions in the 1484 * original range will result in this function returning -ENOMEM while still 1485 * calling the visit function on all of the existing vmas in the range. 1486 * Must be called with the mmap_lock held for reading or writing. 1487 */ 1488 static 1489 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1490 unsigned long end, unsigned long arg, 1491 int (*visit)(struct vm_area_struct *vma, 1492 struct vm_area_struct **prev, unsigned long start, 1493 unsigned long end, unsigned long arg)) 1494 { 1495 struct vm_area_struct *vma; 1496 struct vm_area_struct *prev; 1497 unsigned long tmp; 1498 int unmapped_error = 0; 1499 1500 /* 1501 * If the interval [start,end) covers some unmapped address 1502 * ranges, just ignore them, but return -ENOMEM at the end. 1503 * - different from the way of handling in mlock etc. 1504 */ 1505 vma = find_vma_prev(mm, start, &prev); 1506 if (vma && start > vma->vm_start) 1507 prev = vma; 1508 1509 for (;;) { 1510 int error; 1511 1512 /* Still start < end. */ 1513 if (!vma) 1514 return -ENOMEM; 1515 1516 /* Here start < (end|vma->vm_end). */ 1517 if (start < vma->vm_start) { 1518 unmapped_error = -ENOMEM; 1519 start = vma->vm_start; 1520 if (start >= end) 1521 break; 1522 } 1523 1524 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1525 tmp = vma->vm_end; 1526 if (end < tmp) 1527 tmp = end; 1528 1529 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1530 error = visit(vma, &prev, start, tmp, arg); 1531 if (error) 1532 return error; 1533 start = tmp; 1534 if (prev && start < prev->vm_end) 1535 start = prev->vm_end; 1536 if (start >= end) 1537 break; 1538 if (prev) 1539 vma = find_vma(mm, prev->vm_end); 1540 else /* madvise_remove dropped mmap_lock */ 1541 vma = find_vma(mm, start); 1542 } 1543 1544 return unmapped_error; 1545 } 1546 1547 #ifdef CONFIG_ANON_VMA_NAME 1548 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1549 struct vm_area_struct **prev, 1550 unsigned long start, unsigned long end, 1551 unsigned long anon_name) 1552 { 1553 int error; 1554 1555 /* Only anonymous mappings can be named */ 1556 if (vma->vm_file && !vma_is_anon_shmem(vma)) 1557 return -EBADF; 1558 1559 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1560 (struct anon_vma_name *)anon_name); 1561 1562 /* 1563 * madvise() returns EAGAIN if kernel resources, such as 1564 * slab, are temporarily unavailable. 1565 */ 1566 if (error == -ENOMEM) 1567 error = -EAGAIN; 1568 return error; 1569 } 1570 1571 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1572 unsigned long len_in, struct anon_vma_name *anon_name) 1573 { 1574 unsigned long end; 1575 unsigned long len; 1576 1577 if (start & ~PAGE_MASK) 1578 return -EINVAL; 1579 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1580 1581 /* Check to see whether len was rounded up from small -ve to zero */ 1582 if (len_in && !len) 1583 return -EINVAL; 1584 1585 end = start + len; 1586 if (end < start) 1587 return -EINVAL; 1588 1589 if (end == start) 1590 return 0; 1591 1592 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, 1593 madvise_vma_anon_name); 1594 } 1595 #endif /* CONFIG_ANON_VMA_NAME */ 1596 1597 static int madvise_lock(struct mm_struct *mm, int behavior) 1598 { 1599 if (is_memory_failure(behavior)) 1600 return 0; 1601 1602 if (madvise_need_mmap_write(behavior)) { 1603 if (mmap_write_lock_killable(mm)) 1604 return -EINTR; 1605 } else { 1606 mmap_read_lock(mm); 1607 } 1608 return 0; 1609 } 1610 1611 static void madvise_unlock(struct mm_struct *mm, int behavior) 1612 { 1613 if (is_memory_failure(behavior)) 1614 return; 1615 1616 if (madvise_need_mmap_write(behavior)) 1617 mmap_write_unlock(mm); 1618 else 1619 mmap_read_unlock(mm); 1620 } 1621 1622 static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) 1623 { 1624 size_t len; 1625 1626 if (!madvise_behavior_valid(behavior)) 1627 return false; 1628 1629 if (!PAGE_ALIGNED(start)) 1630 return false; 1631 len = PAGE_ALIGN(len_in); 1632 1633 /* Check to see whether len was rounded up from small -ve to zero */ 1634 if (len_in && !len) 1635 return false; 1636 1637 if (start + len < start) 1638 return false; 1639 1640 return true; 1641 } 1642 1643 /* 1644 * madvise_should_skip() - Return if the request is invalid or nothing. 1645 * @start: Start address of madvise-requested address range. 1646 * @len_in: Length of madvise-requested address range. 1647 * @behavior: Requested madvise behavor. 1648 * @err: Pointer to store an error code from the check. 1649 * 1650 * If the specified behaviour is invalid or nothing would occur, we skip the 1651 * operation. This function returns true in the cases, otherwise false. In 1652 * the former case we store an error on @err. 1653 */ 1654 static bool madvise_should_skip(unsigned long start, size_t len_in, 1655 int behavior, int *err) 1656 { 1657 if (!is_valid_madvise(start, len_in, behavior)) { 1658 *err = -EINVAL; 1659 return true; 1660 } 1661 if (start + PAGE_ALIGN(len_in) == start) { 1662 *err = 0; 1663 return true; 1664 } 1665 return false; 1666 } 1667 1668 static bool is_madvise_populate(int behavior) 1669 { 1670 switch (behavior) { 1671 case MADV_POPULATE_READ: 1672 case MADV_POPULATE_WRITE: 1673 return true; 1674 default: 1675 return false; 1676 } 1677 } 1678 1679 static int madvise_do_behavior(struct mm_struct *mm, 1680 unsigned long start, size_t len_in, int behavior) 1681 { 1682 struct blk_plug plug; 1683 unsigned long end; 1684 int error; 1685 1686 if (is_memory_failure(behavior)) 1687 return madvise_inject_error(behavior, start, start + len_in); 1688 start = untagged_addr_remote(mm, start); 1689 end = start + PAGE_ALIGN(len_in); 1690 1691 blk_start_plug(&plug); 1692 if (is_madvise_populate(behavior)) 1693 error = madvise_populate(mm, start, end, behavior); 1694 else 1695 error = madvise_walk_vmas(mm, start, end, behavior, 1696 madvise_vma_behavior); 1697 blk_finish_plug(&plug); 1698 return error; 1699 } 1700 1701 /* 1702 * The madvise(2) system call. 1703 * 1704 * Applications can use madvise() to advise the kernel how it should 1705 * handle paging I/O in this VM area. The idea is to help the kernel 1706 * use appropriate read-ahead and caching techniques. The information 1707 * provided is advisory only, and can be safely disregarded by the 1708 * kernel without affecting the correct operation of the application. 1709 * 1710 * behavior values: 1711 * MADV_NORMAL - the default behavior is to read clusters. This 1712 * results in some read-ahead and read-behind. 1713 * MADV_RANDOM - the system should read the minimum amount of data 1714 * on any access, since it is unlikely that the appli- 1715 * cation will need more than what it asks for. 1716 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1717 * once, so they can be aggressively read ahead, and 1718 * can be freed soon after they are accessed. 1719 * MADV_WILLNEED - the application is notifying the system to read 1720 * some pages ahead. 1721 * MADV_DONTNEED - the application is finished with the given range, 1722 * so the kernel can free resources associated with it. 1723 * MADV_FREE - the application marks pages in the given range as lazy free, 1724 * where actual purges are postponed until memory pressure happens. 1725 * MADV_REMOVE - the application wants to free up the given range of 1726 * pages and associated backing store. 1727 * MADV_DONTFORK - omit this area from child's address space when forking: 1728 * typically, to avoid COWing pages pinned by get_user_pages(). 1729 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1730 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1731 * range after a fork. 1732 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1733 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1734 * were corrupted by unrecoverable hardware memory failure. 1735 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1736 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1737 * this area with pages of identical content from other such areas. 1738 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1739 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1740 * huge pages in the future. Existing pages might be coalesced and 1741 * new pages might be allocated as THP. 1742 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1743 * transparent huge pages so the existing pages will not be 1744 * coalesced into THP and new pages will not be allocated as THP. 1745 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1746 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1747 * from being included in its core dump. 1748 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1749 * MADV_COLD - the application is not expected to use this memory soon, 1750 * deactivate pages in this range so that they can be reclaimed 1751 * easily if memory pressure happens. 1752 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1753 * page out the pages in this range immediately. 1754 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1755 * triggering read faults if required 1756 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1757 * triggering write faults if required 1758 * 1759 * return values: 1760 * zero - success 1761 * -EINVAL - start + len < 0, start is not page-aligned, 1762 * "behavior" is not a valid value, or application 1763 * is attempting to release locked or shared pages, 1764 * or the specified address range includes file, Huge TLB, 1765 * MAP_SHARED or VMPFNMAP range. 1766 * -ENOMEM - addresses in the specified range are not currently 1767 * mapped, or are outside the AS of the process. 1768 * -EIO - an I/O error occurred while paging in data. 1769 * -EBADF - map exists, but area maps something that isn't a file. 1770 * -EAGAIN - a kernel resource was temporarily unavailable. 1771 * -EPERM - memory is sealed. 1772 */ 1773 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1774 { 1775 int error; 1776 1777 if (madvise_should_skip(start, len_in, behavior, &error)) 1778 return error; 1779 error = madvise_lock(mm, behavior); 1780 if (error) 1781 return error; 1782 error = madvise_do_behavior(mm, start, len_in, behavior); 1783 madvise_unlock(mm, behavior); 1784 1785 return error; 1786 } 1787 1788 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1789 { 1790 return do_madvise(current->mm, start, len_in, behavior); 1791 } 1792 1793 /* Perform an madvise operation over a vector of addresses and lengths. */ 1794 static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, 1795 int behavior) 1796 { 1797 ssize_t ret = 0; 1798 size_t total_len; 1799 1800 total_len = iov_iter_count(iter); 1801 1802 ret = madvise_lock(mm, behavior); 1803 if (ret) 1804 return ret; 1805 1806 while (iov_iter_count(iter)) { 1807 unsigned long start = (unsigned long)iter_iov_addr(iter); 1808 size_t len_in = iter_iov_len(iter); 1809 int error; 1810 1811 if (madvise_should_skip(start, len_in, behavior, &error)) 1812 ret = error; 1813 else 1814 ret = madvise_do_behavior(mm, start, len_in, behavior); 1815 /* 1816 * An madvise operation is attempting to restart the syscall, 1817 * but we cannot proceed as it would not be correct to repeat 1818 * the operation in aggregate, and would be surprising to the 1819 * user. 1820 * 1821 * We drop and reacquire locks so it is safe to just loop and 1822 * try again. We check for fatal signals in case we need exit 1823 * early anyway. 1824 */ 1825 if (ret == -ERESTARTNOINTR) { 1826 if (fatal_signal_pending(current)) { 1827 ret = -EINTR; 1828 break; 1829 } 1830 1831 /* Drop and reacquire lock to unwind race. */ 1832 madvise_unlock(mm, behavior); 1833 madvise_lock(mm, behavior); 1834 continue; 1835 } 1836 if (ret < 0) 1837 break; 1838 iov_iter_advance(iter, iter_iov_len(iter)); 1839 } 1840 madvise_unlock(mm, behavior); 1841 1842 ret = (total_len - iov_iter_count(iter)) ? : ret; 1843 1844 return ret; 1845 } 1846 1847 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1848 size_t, vlen, int, behavior, unsigned int, flags) 1849 { 1850 ssize_t ret; 1851 struct iovec iovstack[UIO_FASTIOV]; 1852 struct iovec *iov = iovstack; 1853 struct iov_iter iter; 1854 struct task_struct *task; 1855 struct mm_struct *mm; 1856 unsigned int f_flags; 1857 1858 if (flags != 0) { 1859 ret = -EINVAL; 1860 goto out; 1861 } 1862 1863 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1864 if (ret < 0) 1865 goto out; 1866 1867 task = pidfd_get_task(pidfd, &f_flags); 1868 if (IS_ERR(task)) { 1869 ret = PTR_ERR(task); 1870 goto free_iov; 1871 } 1872 1873 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1874 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1875 if (IS_ERR(mm)) { 1876 ret = PTR_ERR(mm); 1877 goto release_task; 1878 } 1879 1880 /* 1881 * We need only perform this check if we are attempting to manipulate a 1882 * remote process's address space. 1883 */ 1884 if (mm != current->mm && !process_madvise_remote_valid(behavior)) { 1885 ret = -EINVAL; 1886 goto release_mm; 1887 } 1888 1889 /* 1890 * Require CAP_SYS_NICE for influencing process performance. Note that 1891 * only non-destructive hints are currently supported for remote 1892 * processes. 1893 */ 1894 if (mm != current->mm && !capable(CAP_SYS_NICE)) { 1895 ret = -EPERM; 1896 goto release_mm; 1897 } 1898 1899 ret = vector_madvise(mm, &iter, behavior); 1900 1901 release_mm: 1902 mmput(mm); 1903 release_task: 1904 put_task_struct(task); 1905 free_iov: 1906 kfree(iov); 1907 out: 1908 return ret; 1909 } 1910