1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 /* 41 * Maximum number of attempts we make to install guard pages before we give up 42 * and return -ERESTARTNOINTR to have userspace try again. 43 */ 44 #define MAX_MADVISE_GUARD_RETRIES 3 45 46 struct madvise_walk_private { 47 struct mmu_gather *tlb; 48 bool pageout; 49 }; 50 51 struct madvise_behavior { 52 int behavior; 53 struct mmu_gather *tlb; 54 }; 55 56 /* 57 * Any behaviour which results in changes to the vma->vm_flags needs to 58 * take mmap_lock for writing. Others, which simply traverse vmas, need 59 * to only take it for reading. 60 */ 61 static int madvise_need_mmap_write(int behavior) 62 { 63 switch (behavior) { 64 case MADV_REMOVE: 65 case MADV_WILLNEED: 66 case MADV_DONTNEED: 67 case MADV_DONTNEED_LOCKED: 68 case MADV_COLD: 69 case MADV_PAGEOUT: 70 case MADV_FREE: 71 case MADV_POPULATE_READ: 72 case MADV_POPULATE_WRITE: 73 case MADV_COLLAPSE: 74 case MADV_GUARD_INSTALL: 75 case MADV_GUARD_REMOVE: 76 return 0; 77 default: 78 /* be safe, default to 1. list exceptions explicitly */ 79 return 1; 80 } 81 } 82 83 #ifdef CONFIG_ANON_VMA_NAME 84 struct anon_vma_name *anon_vma_name_alloc(const char *name) 85 { 86 struct anon_vma_name *anon_name; 87 size_t count; 88 89 /* Add 1 for NUL terminator at the end of the anon_name->name */ 90 count = strlen(name) + 1; 91 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 92 if (anon_name) { 93 kref_init(&anon_name->kref); 94 memcpy(anon_name->name, name, count); 95 } 96 97 return anon_name; 98 } 99 100 void anon_vma_name_free(struct kref *kref) 101 { 102 struct anon_vma_name *anon_name = 103 container_of(kref, struct anon_vma_name, kref); 104 kfree(anon_name); 105 } 106 107 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 108 { 109 mmap_assert_locked(vma->vm_mm); 110 111 return vma->anon_name; 112 } 113 114 /* mmap_lock should be write-locked */ 115 static int replace_anon_vma_name(struct vm_area_struct *vma, 116 struct anon_vma_name *anon_name) 117 { 118 struct anon_vma_name *orig_name = anon_vma_name(vma); 119 120 if (!anon_name) { 121 vma->anon_name = NULL; 122 anon_vma_name_put(orig_name); 123 return 0; 124 } 125 126 if (anon_vma_name_eq(orig_name, anon_name)) 127 return 0; 128 129 vma->anon_name = anon_vma_name_reuse(anon_name); 130 anon_vma_name_put(orig_name); 131 132 return 0; 133 } 134 #else /* CONFIG_ANON_VMA_NAME */ 135 static int replace_anon_vma_name(struct vm_area_struct *vma, 136 struct anon_vma_name *anon_name) 137 { 138 if (anon_name) 139 return -EINVAL; 140 141 return 0; 142 } 143 #endif /* CONFIG_ANON_VMA_NAME */ 144 /* 145 * Update the vm_flags on region of a vma, splitting it or merging it as 146 * necessary. Must be called with mmap_lock held for writing; 147 * Caller should ensure anon_name stability by raising its refcount even when 148 * anon_name belongs to a valid vma because this function might free that vma. 149 */ 150 static int madvise_update_vma(struct vm_area_struct *vma, 151 struct vm_area_struct **prev, unsigned long start, 152 unsigned long end, unsigned long new_flags, 153 struct anon_vma_name *anon_name) 154 { 155 struct mm_struct *mm = vma->vm_mm; 156 int error; 157 VMA_ITERATOR(vmi, mm, start); 158 159 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 160 *prev = vma; 161 return 0; 162 } 163 164 vma = vma_modify_flags_name(&vmi, *prev, vma, start, end, new_flags, 165 anon_name); 166 if (IS_ERR(vma)) 167 return PTR_ERR(vma); 168 169 *prev = vma; 170 171 /* vm_flags is protected by the mmap_lock held in write mode. */ 172 vma_start_write(vma); 173 vm_flags_reset(vma, new_flags); 174 if (!vma->vm_file || vma_is_anon_shmem(vma)) { 175 error = replace_anon_vma_name(vma, anon_name); 176 if (error) 177 return error; 178 } 179 180 return 0; 181 } 182 183 #ifdef CONFIG_SWAP 184 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 185 unsigned long end, struct mm_walk *walk) 186 { 187 struct vm_area_struct *vma = walk->private; 188 struct swap_iocb *splug = NULL; 189 pte_t *ptep = NULL; 190 spinlock_t *ptl; 191 unsigned long addr; 192 193 for (addr = start; addr < end; addr += PAGE_SIZE) { 194 pte_t pte; 195 swp_entry_t entry; 196 struct folio *folio; 197 198 if (!ptep++) { 199 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 200 if (!ptep) 201 break; 202 } 203 204 pte = ptep_get(ptep); 205 if (!is_swap_pte(pte)) 206 continue; 207 entry = pte_to_swp_entry(pte); 208 if (unlikely(non_swap_entry(entry))) 209 continue; 210 211 pte_unmap_unlock(ptep, ptl); 212 ptep = NULL; 213 214 folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 215 vma, addr, &splug); 216 if (folio) 217 folio_put(folio); 218 } 219 220 if (ptep) 221 pte_unmap_unlock(ptep, ptl); 222 swap_read_unplug(splug); 223 cond_resched(); 224 225 return 0; 226 } 227 228 static const struct mm_walk_ops swapin_walk_ops = { 229 .pmd_entry = swapin_walk_pmd_entry, 230 .walk_lock = PGWALK_RDLOCK, 231 }; 232 233 static void shmem_swapin_range(struct vm_area_struct *vma, 234 unsigned long start, unsigned long end, 235 struct address_space *mapping) 236 { 237 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 238 pgoff_t end_index = linear_page_index(vma, end) - 1; 239 struct folio *folio; 240 struct swap_iocb *splug = NULL; 241 242 rcu_read_lock(); 243 xas_for_each(&xas, folio, end_index) { 244 unsigned long addr; 245 swp_entry_t entry; 246 247 if (!xa_is_value(folio)) 248 continue; 249 entry = radix_to_swp_entry(folio); 250 /* There might be swapin error entries in shmem mapping. */ 251 if (non_swap_entry(entry)) 252 continue; 253 254 addr = vma->vm_start + 255 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); 256 xas_pause(&xas); 257 rcu_read_unlock(); 258 259 folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping), 260 vma, addr, &splug); 261 if (folio) 262 folio_put(folio); 263 264 rcu_read_lock(); 265 } 266 rcu_read_unlock(); 267 swap_read_unplug(splug); 268 } 269 #endif /* CONFIG_SWAP */ 270 271 /* 272 * Schedule all required I/O operations. Do not wait for completion. 273 */ 274 static long madvise_willneed(struct vm_area_struct *vma, 275 struct vm_area_struct **prev, 276 unsigned long start, unsigned long end) 277 { 278 struct mm_struct *mm = vma->vm_mm; 279 struct file *file = vma->vm_file; 280 loff_t offset; 281 282 *prev = vma; 283 #ifdef CONFIG_SWAP 284 if (!file) { 285 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 286 lru_add_drain(); /* Push any new pages onto the LRU now */ 287 return 0; 288 } 289 290 if (shmem_mapping(file->f_mapping)) { 291 shmem_swapin_range(vma, start, end, file->f_mapping); 292 lru_add_drain(); /* Push any new pages onto the LRU now */ 293 return 0; 294 } 295 #else 296 if (!file) 297 return -EBADF; 298 #endif 299 300 if (IS_DAX(file_inode(file))) { 301 /* no bad return value, but ignore advice */ 302 return 0; 303 } 304 305 /* 306 * Filesystem's fadvise may need to take various locks. We need to 307 * explicitly grab a reference because the vma (and hence the 308 * vma's reference to the file) can go away as soon as we drop 309 * mmap_lock. 310 */ 311 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 312 get_file(file); 313 offset = (loff_t)(start - vma->vm_start) 314 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 315 mmap_read_unlock(mm); 316 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 317 fput(file); 318 mmap_read_lock(mm); 319 return 0; 320 } 321 322 static inline bool can_do_file_pageout(struct vm_area_struct *vma) 323 { 324 if (!vma->vm_file) 325 return false; 326 /* 327 * paging out pagecache only for non-anonymous mappings that correspond 328 * to the files the calling process could (if tried) open for writing; 329 * otherwise we'd be including shared non-exclusive mappings, which 330 * opens a side channel. 331 */ 332 return inode_owner_or_capable(&nop_mnt_idmap, 333 file_inode(vma->vm_file)) || 334 file_permission(vma->vm_file, MAY_WRITE) == 0; 335 } 336 337 static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end, 338 struct folio *folio, pte_t *ptep, 339 pte_t pte, bool *any_young, 340 bool *any_dirty) 341 { 342 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; 343 int max_nr = (end - addr) / PAGE_SIZE; 344 345 return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, 346 any_young, any_dirty); 347 } 348 349 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 350 unsigned long addr, unsigned long end, 351 struct mm_walk *walk) 352 { 353 struct madvise_walk_private *private = walk->private; 354 struct mmu_gather *tlb = private->tlb; 355 bool pageout = private->pageout; 356 struct mm_struct *mm = tlb->mm; 357 struct vm_area_struct *vma = walk->vma; 358 pte_t *start_pte, *pte, ptent; 359 spinlock_t *ptl; 360 struct folio *folio = NULL; 361 LIST_HEAD(folio_list); 362 bool pageout_anon_only_filter; 363 unsigned int batch_count = 0; 364 int nr; 365 366 if (fatal_signal_pending(current)) 367 return -EINTR; 368 369 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && 370 !can_do_file_pageout(vma); 371 372 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 373 if (pmd_trans_huge(*pmd)) { 374 pmd_t orig_pmd; 375 unsigned long next = pmd_addr_end(addr, end); 376 377 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 378 ptl = pmd_trans_huge_lock(pmd, vma); 379 if (!ptl) 380 return 0; 381 382 orig_pmd = *pmd; 383 if (is_huge_zero_pmd(orig_pmd)) 384 goto huge_unlock; 385 386 if (unlikely(!pmd_present(orig_pmd))) { 387 VM_BUG_ON(thp_migration_supported() && 388 !is_pmd_migration_entry(orig_pmd)); 389 goto huge_unlock; 390 } 391 392 folio = pmd_folio(orig_pmd); 393 394 /* Do not interfere with other mappings of this folio */ 395 if (folio_maybe_mapped_shared(folio)) 396 goto huge_unlock; 397 398 if (pageout_anon_only_filter && !folio_test_anon(folio)) 399 goto huge_unlock; 400 401 if (next - addr != HPAGE_PMD_SIZE) { 402 int err; 403 404 folio_get(folio); 405 spin_unlock(ptl); 406 folio_lock(folio); 407 err = split_folio(folio); 408 folio_unlock(folio); 409 folio_put(folio); 410 if (!err) 411 goto regular_folio; 412 return 0; 413 } 414 415 if (!pageout && pmd_young(orig_pmd)) { 416 pmdp_invalidate(vma, addr, pmd); 417 orig_pmd = pmd_mkold(orig_pmd); 418 419 set_pmd_at(mm, addr, pmd, orig_pmd); 420 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 421 } 422 423 folio_clear_referenced(folio); 424 folio_test_clear_young(folio); 425 if (folio_test_active(folio)) 426 folio_set_workingset(folio); 427 if (pageout) { 428 if (folio_isolate_lru(folio)) { 429 if (folio_test_unevictable(folio)) 430 folio_putback_lru(folio); 431 else 432 list_add(&folio->lru, &folio_list); 433 } 434 } else 435 folio_deactivate(folio); 436 huge_unlock: 437 spin_unlock(ptl); 438 if (pageout) 439 reclaim_pages(&folio_list); 440 return 0; 441 } 442 443 regular_folio: 444 #endif 445 tlb_change_page_size(tlb, PAGE_SIZE); 446 restart: 447 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 448 if (!start_pte) 449 return 0; 450 flush_tlb_batched_pending(mm); 451 arch_enter_lazy_mmu_mode(); 452 for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) { 453 nr = 1; 454 ptent = ptep_get(pte); 455 456 if (++batch_count == SWAP_CLUSTER_MAX) { 457 batch_count = 0; 458 if (need_resched()) { 459 arch_leave_lazy_mmu_mode(); 460 pte_unmap_unlock(start_pte, ptl); 461 cond_resched(); 462 goto restart; 463 } 464 } 465 466 if (pte_none(ptent)) 467 continue; 468 469 if (!pte_present(ptent)) 470 continue; 471 472 folio = vm_normal_folio(vma, addr, ptent); 473 if (!folio || folio_is_zone_device(folio)) 474 continue; 475 476 /* 477 * If we encounter a large folio, only split it if it is not 478 * fully mapped within the range we are operating on. Otherwise 479 * leave it as is so that it can be swapped out whole. If we 480 * fail to split a folio, leave it in place and advance to the 481 * next pte in the range. 482 */ 483 if (folio_test_large(folio)) { 484 bool any_young; 485 486 nr = madvise_folio_pte_batch(addr, end, folio, pte, 487 ptent, &any_young, NULL); 488 if (any_young) 489 ptent = pte_mkyoung(ptent); 490 491 if (nr < folio_nr_pages(folio)) { 492 int err; 493 494 if (folio_maybe_mapped_shared(folio)) 495 continue; 496 if (pageout_anon_only_filter && !folio_test_anon(folio)) 497 continue; 498 if (!folio_trylock(folio)) 499 continue; 500 folio_get(folio); 501 arch_leave_lazy_mmu_mode(); 502 pte_unmap_unlock(start_pte, ptl); 503 start_pte = NULL; 504 err = split_folio(folio); 505 folio_unlock(folio); 506 folio_put(folio); 507 start_pte = pte = 508 pte_offset_map_lock(mm, pmd, addr, &ptl); 509 if (!start_pte) 510 break; 511 arch_enter_lazy_mmu_mode(); 512 if (!err) 513 nr = 0; 514 continue; 515 } 516 } 517 518 /* 519 * Do not interfere with other mappings of this folio and 520 * non-LRU folio. If we have a large folio at this point, we 521 * know it is fully mapped so if its mapcount is the same as its 522 * number of pages, it must be exclusive. 523 */ 524 if (!folio_test_lru(folio) || 525 folio_mapcount(folio) != folio_nr_pages(folio)) 526 continue; 527 528 if (pageout_anon_only_filter && !folio_test_anon(folio)) 529 continue; 530 531 if (!pageout && pte_young(ptent)) { 532 clear_young_dirty_ptes(vma, addr, pte, nr, 533 CYDP_CLEAR_YOUNG); 534 tlb_remove_tlb_entries(tlb, pte, nr, addr); 535 } 536 537 /* 538 * We are deactivating a folio for accelerating reclaiming. 539 * VM couldn't reclaim the folio unless we clear PG_young. 540 * As a side effect, it makes confuse idle-page tracking 541 * because they will miss recent referenced history. 542 */ 543 folio_clear_referenced(folio); 544 folio_test_clear_young(folio); 545 if (folio_test_active(folio)) 546 folio_set_workingset(folio); 547 if (pageout) { 548 if (folio_isolate_lru(folio)) { 549 if (folio_test_unevictable(folio)) 550 folio_putback_lru(folio); 551 else 552 list_add(&folio->lru, &folio_list); 553 } 554 } else 555 folio_deactivate(folio); 556 } 557 558 if (start_pte) { 559 arch_leave_lazy_mmu_mode(); 560 pte_unmap_unlock(start_pte, ptl); 561 } 562 if (pageout) 563 reclaim_pages(&folio_list); 564 cond_resched(); 565 566 return 0; 567 } 568 569 static const struct mm_walk_ops cold_walk_ops = { 570 .pmd_entry = madvise_cold_or_pageout_pte_range, 571 .walk_lock = PGWALK_RDLOCK, 572 }; 573 574 static void madvise_cold_page_range(struct mmu_gather *tlb, 575 struct vm_area_struct *vma, 576 unsigned long addr, unsigned long end) 577 { 578 struct madvise_walk_private walk_private = { 579 .pageout = false, 580 .tlb = tlb, 581 }; 582 583 tlb_start_vma(tlb, vma); 584 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 585 tlb_end_vma(tlb, vma); 586 } 587 588 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 589 { 590 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 591 } 592 593 static long madvise_cold(struct vm_area_struct *vma, 594 struct vm_area_struct **prev, 595 unsigned long start_addr, unsigned long end_addr) 596 { 597 struct mm_struct *mm = vma->vm_mm; 598 struct mmu_gather tlb; 599 600 *prev = vma; 601 if (!can_madv_lru_vma(vma)) 602 return -EINVAL; 603 604 lru_add_drain(); 605 tlb_gather_mmu(&tlb, mm); 606 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 607 tlb_finish_mmu(&tlb); 608 609 return 0; 610 } 611 612 static void madvise_pageout_page_range(struct mmu_gather *tlb, 613 struct vm_area_struct *vma, 614 unsigned long addr, unsigned long end) 615 { 616 struct madvise_walk_private walk_private = { 617 .pageout = true, 618 .tlb = tlb, 619 }; 620 621 tlb_start_vma(tlb, vma); 622 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 623 tlb_end_vma(tlb, vma); 624 } 625 626 static long madvise_pageout(struct vm_area_struct *vma, 627 struct vm_area_struct **prev, 628 unsigned long start_addr, unsigned long end_addr) 629 { 630 struct mm_struct *mm = vma->vm_mm; 631 struct mmu_gather tlb; 632 633 *prev = vma; 634 if (!can_madv_lru_vma(vma)) 635 return -EINVAL; 636 637 /* 638 * If the VMA belongs to a private file mapping, there can be private 639 * dirty pages which can be paged out if even this process is neither 640 * owner nor write capable of the file. We allow private file mappings 641 * further to pageout dirty anon pages. 642 */ 643 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && 644 (vma->vm_flags & VM_MAYSHARE))) 645 return 0; 646 647 lru_add_drain(); 648 tlb_gather_mmu(&tlb, mm); 649 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 650 tlb_finish_mmu(&tlb); 651 652 return 0; 653 } 654 655 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 656 unsigned long end, struct mm_walk *walk) 657 658 { 659 const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY; 660 struct mmu_gather *tlb = walk->private; 661 struct mm_struct *mm = tlb->mm; 662 struct vm_area_struct *vma = walk->vma; 663 spinlock_t *ptl; 664 pte_t *start_pte, *pte, ptent; 665 struct folio *folio; 666 int nr_swap = 0; 667 unsigned long next; 668 int nr, max_nr; 669 670 next = pmd_addr_end(addr, end); 671 if (pmd_trans_huge(*pmd)) 672 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 673 return 0; 674 675 tlb_change_page_size(tlb, PAGE_SIZE); 676 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 677 if (!start_pte) 678 return 0; 679 flush_tlb_batched_pending(mm); 680 arch_enter_lazy_mmu_mode(); 681 for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) { 682 nr = 1; 683 ptent = ptep_get(pte); 684 685 if (pte_none(ptent)) 686 continue; 687 /* 688 * If the pte has swp_entry, just clear page table to 689 * prevent swap-in which is more expensive rather than 690 * (page allocation + zeroing). 691 */ 692 if (!pte_present(ptent)) { 693 swp_entry_t entry; 694 695 entry = pte_to_swp_entry(ptent); 696 if (!non_swap_entry(entry)) { 697 max_nr = (end - addr) / PAGE_SIZE; 698 nr = swap_pte_batch(pte, max_nr, ptent); 699 nr_swap -= nr; 700 free_swap_and_cache_nr(entry, nr); 701 clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); 702 } else if (is_hwpoison_entry(entry) || 703 is_poisoned_swp_entry(entry)) { 704 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 705 } 706 continue; 707 } 708 709 folio = vm_normal_folio(vma, addr, ptent); 710 if (!folio || folio_is_zone_device(folio)) 711 continue; 712 713 /* 714 * If we encounter a large folio, only split it if it is not 715 * fully mapped within the range we are operating on. Otherwise 716 * leave it as is so that it can be marked as lazyfree. If we 717 * fail to split a folio, leave it in place and advance to the 718 * next pte in the range. 719 */ 720 if (folio_test_large(folio)) { 721 bool any_young, any_dirty; 722 723 nr = madvise_folio_pte_batch(addr, end, folio, pte, 724 ptent, &any_young, &any_dirty); 725 726 if (nr < folio_nr_pages(folio)) { 727 int err; 728 729 if (folio_maybe_mapped_shared(folio)) 730 continue; 731 if (!folio_trylock(folio)) 732 continue; 733 folio_get(folio); 734 arch_leave_lazy_mmu_mode(); 735 pte_unmap_unlock(start_pte, ptl); 736 start_pte = NULL; 737 err = split_folio(folio); 738 folio_unlock(folio); 739 folio_put(folio); 740 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 741 start_pte = pte; 742 if (!start_pte) 743 break; 744 arch_enter_lazy_mmu_mode(); 745 if (!err) 746 nr = 0; 747 continue; 748 } 749 750 if (any_young) 751 ptent = pte_mkyoung(ptent); 752 if (any_dirty) 753 ptent = pte_mkdirty(ptent); 754 } 755 756 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 757 if (!folio_trylock(folio)) 758 continue; 759 /* 760 * If we have a large folio at this point, we know it is 761 * fully mapped so if its mapcount is the same as its 762 * number of pages, it must be exclusive. 763 */ 764 if (folio_mapcount(folio) != folio_nr_pages(folio)) { 765 folio_unlock(folio); 766 continue; 767 } 768 769 if (folio_test_swapcache(folio) && 770 !folio_free_swap(folio)) { 771 folio_unlock(folio); 772 continue; 773 } 774 775 folio_clear_dirty(folio); 776 folio_unlock(folio); 777 } 778 779 if (pte_young(ptent) || pte_dirty(ptent)) { 780 clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags); 781 tlb_remove_tlb_entries(tlb, pte, nr, addr); 782 } 783 folio_mark_lazyfree(folio); 784 } 785 786 if (nr_swap) 787 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 788 if (start_pte) { 789 arch_leave_lazy_mmu_mode(); 790 pte_unmap_unlock(start_pte, ptl); 791 } 792 cond_resched(); 793 794 return 0; 795 } 796 797 static const struct mm_walk_ops madvise_free_walk_ops = { 798 .pmd_entry = madvise_free_pte_range, 799 .walk_lock = PGWALK_RDLOCK, 800 }; 801 802 static int madvise_free_single_vma(struct madvise_behavior *madv_behavior, 803 struct vm_area_struct *vma, 804 unsigned long start_addr, unsigned long end_addr) 805 { 806 struct mm_struct *mm = vma->vm_mm; 807 struct mmu_notifier_range range; 808 struct mmu_gather *tlb = madv_behavior->tlb; 809 810 /* MADV_FREE works for only anon vma at the moment */ 811 if (!vma_is_anonymous(vma)) 812 return -EINVAL; 813 814 range.start = max(vma->vm_start, start_addr); 815 if (range.start >= vma->vm_end) 816 return -EINVAL; 817 range.end = min(vma->vm_end, end_addr); 818 if (range.end <= vma->vm_start) 819 return -EINVAL; 820 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 821 range.start, range.end); 822 823 lru_add_drain(); 824 update_hiwater_rss(mm); 825 826 mmu_notifier_invalidate_range_start(&range); 827 tlb_start_vma(tlb, vma); 828 walk_page_range(vma->vm_mm, range.start, range.end, 829 &madvise_free_walk_ops, tlb); 830 tlb_end_vma(tlb, vma); 831 mmu_notifier_invalidate_range_end(&range); 832 return 0; 833 } 834 835 /* 836 * Application no longer needs these pages. If the pages are dirty, 837 * it's OK to just throw them away. The app will be more careful about 838 * data it wants to keep. Be sure to free swap resources too. The 839 * zap_page_range_single call sets things up for shrink_active_list to actually 840 * free these pages later if no one else has touched them in the meantime, 841 * although we could add these pages to a global reuse list for 842 * shrink_active_list to pick up before reclaiming other pages. 843 * 844 * NB: This interface discards data rather than pushes it out to swap, 845 * as some implementations do. This has performance implications for 846 * applications like large transactional databases which want to discard 847 * pages in anonymous maps after committing to backing store the data 848 * that was kept in them. There is no reason to write this data out to 849 * the swap area if the application is discarding it. 850 * 851 * An interface that causes the system to free clean pages and flush 852 * dirty pages is already available as msync(MS_INVALIDATE). 853 */ 854 static long madvise_dontneed_single_vma(struct madvise_behavior *madv_behavior, 855 struct vm_area_struct *vma, 856 unsigned long start, unsigned long end) 857 { 858 struct zap_details details = { 859 .reclaim_pt = true, 860 .even_cows = true, 861 }; 862 863 zap_page_range_single_batched( 864 madv_behavior->tlb, vma, start, end - start, &details); 865 return 0; 866 } 867 868 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 869 unsigned long start, 870 unsigned long *end, 871 int behavior) 872 { 873 if (!is_vm_hugetlb_page(vma)) { 874 unsigned int forbidden = VM_PFNMAP; 875 876 if (behavior != MADV_DONTNEED_LOCKED) 877 forbidden |= VM_LOCKED; 878 879 return !(vma->vm_flags & forbidden); 880 } 881 882 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 883 return false; 884 if (start & ~huge_page_mask(hstate_vma(vma))) 885 return false; 886 887 /* 888 * Madvise callers expect the length to be rounded up to PAGE_SIZE 889 * boundaries, and may be unaware that this VMA uses huge pages. 890 * Avoid unexpected data loss by rounding down the number of 891 * huge pages freed. 892 */ 893 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); 894 895 return true; 896 } 897 898 static long madvise_dontneed_free(struct vm_area_struct *vma, 899 struct vm_area_struct **prev, 900 unsigned long start, unsigned long end, 901 struct madvise_behavior *madv_behavior) 902 { 903 int behavior = madv_behavior->behavior; 904 struct mm_struct *mm = vma->vm_mm; 905 906 *prev = vma; 907 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 908 return -EINVAL; 909 910 if (start == end) 911 return 0; 912 913 if (!userfaultfd_remove(vma, start, end)) { 914 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 915 916 mmap_read_lock(mm); 917 vma = vma_lookup(mm, start); 918 if (!vma) 919 return -ENOMEM; 920 /* 921 * Potential end adjustment for hugetlb vma is OK as 922 * the check below keeps end within vma. 923 */ 924 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 925 behavior)) 926 return -EINVAL; 927 if (end > vma->vm_end) { 928 /* 929 * Don't fail if end > vma->vm_end. If the old 930 * vma was split while the mmap_lock was 931 * released the effect of the concurrent 932 * operation may not cause madvise() to 933 * have an undefined result. There may be an 934 * adjacent next vma that we'll walk 935 * next. userfaultfd_remove() will generate an 936 * UFFD_EVENT_REMOVE repetition on the 937 * end-vma->vm_end range, but the manager can 938 * handle a repetition fine. 939 */ 940 end = vma->vm_end; 941 } 942 /* 943 * If the memory region between start and end was 944 * originally backed by 4kB pages and then remapped to 945 * be backed by hugepages while mmap_lock was dropped, 946 * the adjustment for hugetlb vma above may have rounded 947 * end down to the start address. 948 */ 949 if (start == end) 950 return 0; 951 VM_WARN_ON(start > end); 952 } 953 954 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 955 return madvise_dontneed_single_vma( 956 madv_behavior, vma, start, end); 957 else if (behavior == MADV_FREE) 958 return madvise_free_single_vma(madv_behavior, vma, start, end); 959 else 960 return -EINVAL; 961 } 962 963 static long madvise_populate(struct mm_struct *mm, unsigned long start, 964 unsigned long end, int behavior) 965 { 966 const bool write = behavior == MADV_POPULATE_WRITE; 967 int locked = 1; 968 long pages; 969 970 while (start < end) { 971 /* Populate (prefault) page tables readable/writable. */ 972 pages = faultin_page_range(mm, start, end, write, &locked); 973 if (!locked) { 974 mmap_read_lock(mm); 975 locked = 1; 976 } 977 if (pages < 0) { 978 switch (pages) { 979 case -EINTR: 980 return -EINTR; 981 case -EINVAL: /* Incompatible mappings / permissions. */ 982 return -EINVAL; 983 case -EHWPOISON: 984 return -EHWPOISON; 985 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 986 return -EFAULT; 987 default: 988 pr_warn_once("%s: unhandled return value: %ld\n", 989 __func__, pages); 990 fallthrough; 991 case -ENOMEM: /* No VMA or out of memory. */ 992 return -ENOMEM; 993 } 994 } 995 start += pages * PAGE_SIZE; 996 } 997 return 0; 998 } 999 1000 /* 1001 * Application wants to free up the pages and associated backing store. 1002 * This is effectively punching a hole into the middle of a file. 1003 */ 1004 static long madvise_remove(struct vm_area_struct *vma, 1005 struct vm_area_struct **prev, 1006 unsigned long start, unsigned long end) 1007 { 1008 loff_t offset; 1009 int error; 1010 struct file *f; 1011 struct mm_struct *mm = vma->vm_mm; 1012 1013 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 1014 1015 if (vma->vm_flags & VM_LOCKED) 1016 return -EINVAL; 1017 1018 f = vma->vm_file; 1019 1020 if (!f || !f->f_mapping || !f->f_mapping->host) { 1021 return -EINVAL; 1022 } 1023 1024 if (!vma_is_shared_maywrite(vma)) 1025 return -EACCES; 1026 1027 offset = (loff_t)(start - vma->vm_start) 1028 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 1029 1030 /* 1031 * Filesystem's fallocate may need to take i_rwsem. We need to 1032 * explicitly grab a reference because the vma (and hence the 1033 * vma's reference to the file) can go away as soon as we drop 1034 * mmap_lock. 1035 */ 1036 get_file(f); 1037 if (userfaultfd_remove(vma, start, end)) { 1038 /* mmap_lock was not released by userfaultfd_remove() */ 1039 mmap_read_unlock(mm); 1040 } 1041 error = vfs_fallocate(f, 1042 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1043 offset, end - start); 1044 fput(f); 1045 mmap_read_lock(mm); 1046 return error; 1047 } 1048 1049 static bool is_valid_guard_vma(struct vm_area_struct *vma, bool allow_locked) 1050 { 1051 vm_flags_t disallowed = VM_SPECIAL | VM_HUGETLB; 1052 1053 /* 1054 * A user could lock after setting a guard range but that's fine, as 1055 * they'd not be able to fault in. The issue arises when we try to zap 1056 * existing locked VMAs. We don't want to do that. 1057 */ 1058 if (!allow_locked) 1059 disallowed |= VM_LOCKED; 1060 1061 return !(vma->vm_flags & disallowed); 1062 } 1063 1064 static bool is_guard_pte_marker(pte_t ptent) 1065 { 1066 return is_pte_marker(ptent) && 1067 is_guard_swp_entry(pte_to_swp_entry(ptent)); 1068 } 1069 1070 static int guard_install_pud_entry(pud_t *pud, unsigned long addr, 1071 unsigned long next, struct mm_walk *walk) 1072 { 1073 pud_t pudval = pudp_get(pud); 1074 1075 /* If huge return >0 so we abort the operation + zap. */ 1076 return pud_trans_huge(pudval) || pud_devmap(pudval); 1077 } 1078 1079 static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr, 1080 unsigned long next, struct mm_walk *walk) 1081 { 1082 pmd_t pmdval = pmdp_get(pmd); 1083 1084 /* If huge return >0 so we abort the operation + zap. */ 1085 return pmd_trans_huge(pmdval) || pmd_devmap(pmdval); 1086 } 1087 1088 static int guard_install_pte_entry(pte_t *pte, unsigned long addr, 1089 unsigned long next, struct mm_walk *walk) 1090 { 1091 pte_t pteval = ptep_get(pte); 1092 unsigned long *nr_pages = (unsigned long *)walk->private; 1093 1094 /* If there is already a guard page marker, we have nothing to do. */ 1095 if (is_guard_pte_marker(pteval)) { 1096 (*nr_pages)++; 1097 1098 return 0; 1099 } 1100 1101 /* If populated return >0 so we abort the operation + zap. */ 1102 return 1; 1103 } 1104 1105 static int guard_install_set_pte(unsigned long addr, unsigned long next, 1106 pte_t *ptep, struct mm_walk *walk) 1107 { 1108 unsigned long *nr_pages = (unsigned long *)walk->private; 1109 1110 /* Simply install a PTE marker, this causes segfault on access. */ 1111 *ptep = make_pte_marker(PTE_MARKER_GUARD); 1112 (*nr_pages)++; 1113 1114 return 0; 1115 } 1116 1117 static const struct mm_walk_ops guard_install_walk_ops = { 1118 .pud_entry = guard_install_pud_entry, 1119 .pmd_entry = guard_install_pmd_entry, 1120 .pte_entry = guard_install_pte_entry, 1121 .install_pte = guard_install_set_pte, 1122 .walk_lock = PGWALK_RDLOCK, 1123 }; 1124 1125 static long madvise_guard_install(struct vm_area_struct *vma, 1126 struct vm_area_struct **prev, 1127 unsigned long start, unsigned long end) 1128 { 1129 long err; 1130 int i; 1131 1132 *prev = vma; 1133 if (!is_valid_guard_vma(vma, /* allow_locked = */false)) 1134 return -EINVAL; 1135 1136 /* 1137 * If we install guard markers, then the range is no longer 1138 * empty from a page table perspective and therefore it's 1139 * appropriate to have an anon_vma. 1140 * 1141 * This ensures that on fork, we copy page tables correctly. 1142 */ 1143 err = anon_vma_prepare(vma); 1144 if (err) 1145 return err; 1146 1147 /* 1148 * Optimistically try to install the guard marker pages first. If any 1149 * non-guard pages are encountered, give up and zap the range before 1150 * trying again. 1151 * 1152 * We try a few times before giving up and releasing back to userland to 1153 * loop around, releasing locks in the process to avoid contention. This 1154 * would only happen if there was a great many racing page faults. 1155 * 1156 * In most cases we should simply install the guard markers immediately 1157 * with no zap or looping. 1158 */ 1159 for (i = 0; i < MAX_MADVISE_GUARD_RETRIES; i++) { 1160 unsigned long nr_pages = 0; 1161 1162 /* Returns < 0 on error, == 0 if success, > 0 if zap needed. */ 1163 err = walk_page_range_mm(vma->vm_mm, start, end, 1164 &guard_install_walk_ops, &nr_pages); 1165 if (err < 0) 1166 return err; 1167 1168 if (err == 0) { 1169 unsigned long nr_expected_pages = PHYS_PFN(end - start); 1170 1171 VM_WARN_ON(nr_pages != nr_expected_pages); 1172 return 0; 1173 } 1174 1175 /* 1176 * OK some of the range have non-guard pages mapped, zap 1177 * them. This leaves existing guard pages in place. 1178 */ 1179 zap_page_range_single(vma, start, end - start, NULL); 1180 } 1181 1182 /* 1183 * We were unable to install the guard pages due to being raced by page 1184 * faults. This should not happen ordinarily. We return to userspace and 1185 * immediately retry, relieving lock contention. 1186 */ 1187 return restart_syscall(); 1188 } 1189 1190 static int guard_remove_pud_entry(pud_t *pud, unsigned long addr, 1191 unsigned long next, struct mm_walk *walk) 1192 { 1193 pud_t pudval = pudp_get(pud); 1194 1195 /* If huge, cannot have guard pages present, so no-op - skip. */ 1196 if (pud_trans_huge(pudval) || pud_devmap(pudval)) 1197 walk->action = ACTION_CONTINUE; 1198 1199 return 0; 1200 } 1201 1202 static int guard_remove_pmd_entry(pmd_t *pmd, unsigned long addr, 1203 unsigned long next, struct mm_walk *walk) 1204 { 1205 pmd_t pmdval = pmdp_get(pmd); 1206 1207 /* If huge, cannot have guard pages present, so no-op - skip. */ 1208 if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) 1209 walk->action = ACTION_CONTINUE; 1210 1211 return 0; 1212 } 1213 1214 static int guard_remove_pte_entry(pte_t *pte, unsigned long addr, 1215 unsigned long next, struct mm_walk *walk) 1216 { 1217 pte_t ptent = ptep_get(pte); 1218 1219 if (is_guard_pte_marker(ptent)) { 1220 /* Simply clear the PTE marker. */ 1221 pte_clear_not_present_full(walk->mm, addr, pte, false); 1222 update_mmu_cache(walk->vma, addr, pte); 1223 } 1224 1225 return 0; 1226 } 1227 1228 static const struct mm_walk_ops guard_remove_walk_ops = { 1229 .pud_entry = guard_remove_pud_entry, 1230 .pmd_entry = guard_remove_pmd_entry, 1231 .pte_entry = guard_remove_pte_entry, 1232 .walk_lock = PGWALK_RDLOCK, 1233 }; 1234 1235 static long madvise_guard_remove(struct vm_area_struct *vma, 1236 struct vm_area_struct **prev, 1237 unsigned long start, unsigned long end) 1238 { 1239 *prev = vma; 1240 /* 1241 * We're ok with removing guards in mlock()'d ranges, as this is a 1242 * non-destructive action. 1243 */ 1244 if (!is_valid_guard_vma(vma, /* allow_locked = */true)) 1245 return -EINVAL; 1246 1247 return walk_page_range(vma->vm_mm, start, end, 1248 &guard_remove_walk_ops, NULL); 1249 } 1250 1251 /* 1252 * Apply an madvise behavior to a region of a vma. madvise_update_vma 1253 * will handle splitting a vm area into separate areas, each area with its own 1254 * behavior. 1255 */ 1256 static int madvise_vma_behavior(struct vm_area_struct *vma, 1257 struct vm_area_struct **prev, 1258 unsigned long start, unsigned long end, 1259 void *behavior_arg) 1260 { 1261 struct madvise_behavior *arg = behavior_arg; 1262 int behavior = arg->behavior; 1263 int error; 1264 struct anon_vma_name *anon_name; 1265 unsigned long new_flags = vma->vm_flags; 1266 1267 if (unlikely(!can_modify_vma_madv(vma, behavior))) 1268 return -EPERM; 1269 1270 switch (behavior) { 1271 case MADV_REMOVE: 1272 return madvise_remove(vma, prev, start, end); 1273 case MADV_WILLNEED: 1274 return madvise_willneed(vma, prev, start, end); 1275 case MADV_COLD: 1276 return madvise_cold(vma, prev, start, end); 1277 case MADV_PAGEOUT: 1278 return madvise_pageout(vma, prev, start, end); 1279 case MADV_FREE: 1280 case MADV_DONTNEED: 1281 case MADV_DONTNEED_LOCKED: 1282 return madvise_dontneed_free(vma, prev, start, end, arg); 1283 case MADV_NORMAL: 1284 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1285 break; 1286 case MADV_SEQUENTIAL: 1287 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1288 break; 1289 case MADV_RANDOM: 1290 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1291 break; 1292 case MADV_DONTFORK: 1293 new_flags |= VM_DONTCOPY; 1294 break; 1295 case MADV_DOFORK: 1296 if (vma->vm_flags & VM_IO) 1297 return -EINVAL; 1298 new_flags &= ~VM_DONTCOPY; 1299 break; 1300 case MADV_WIPEONFORK: 1301 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1302 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1303 return -EINVAL; 1304 new_flags |= VM_WIPEONFORK; 1305 break; 1306 case MADV_KEEPONFORK: 1307 if (vma->vm_flags & VM_DROPPABLE) 1308 return -EINVAL; 1309 new_flags &= ~VM_WIPEONFORK; 1310 break; 1311 case MADV_DONTDUMP: 1312 new_flags |= VM_DONTDUMP; 1313 break; 1314 case MADV_DODUMP: 1315 if ((!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) || 1316 (vma->vm_flags & VM_DROPPABLE)) 1317 return -EINVAL; 1318 new_flags &= ~VM_DONTDUMP; 1319 break; 1320 case MADV_MERGEABLE: 1321 case MADV_UNMERGEABLE: 1322 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1323 if (error) 1324 goto out; 1325 break; 1326 case MADV_HUGEPAGE: 1327 case MADV_NOHUGEPAGE: 1328 error = hugepage_madvise(vma, &new_flags, behavior); 1329 if (error) 1330 goto out; 1331 break; 1332 case MADV_COLLAPSE: 1333 return madvise_collapse(vma, prev, start, end); 1334 case MADV_GUARD_INSTALL: 1335 return madvise_guard_install(vma, prev, start, end); 1336 case MADV_GUARD_REMOVE: 1337 return madvise_guard_remove(vma, prev, start, end); 1338 } 1339 1340 anon_name = anon_vma_name(vma); 1341 anon_vma_name_get(anon_name); 1342 error = madvise_update_vma(vma, prev, start, end, new_flags, 1343 anon_name); 1344 anon_vma_name_put(anon_name); 1345 1346 out: 1347 /* 1348 * madvise() returns EAGAIN if kernel resources, such as 1349 * slab, are temporarily unavailable. 1350 */ 1351 if (error == -ENOMEM) 1352 error = -EAGAIN; 1353 return error; 1354 } 1355 1356 #ifdef CONFIG_MEMORY_FAILURE 1357 /* 1358 * Error injection support for memory error handling. 1359 */ 1360 static int madvise_inject_error(int behavior, 1361 unsigned long start, unsigned long end) 1362 { 1363 unsigned long size; 1364 1365 if (!capable(CAP_SYS_ADMIN)) 1366 return -EPERM; 1367 1368 1369 for (; start < end; start += size) { 1370 unsigned long pfn; 1371 struct page *page; 1372 int ret; 1373 1374 ret = get_user_pages_fast(start, 1, 0, &page); 1375 if (ret != 1) 1376 return ret; 1377 pfn = page_to_pfn(page); 1378 1379 /* 1380 * When soft offlining hugepages, after migrating the page 1381 * we dissolve it, therefore in the second loop "page" will 1382 * no longer be a compound page. 1383 */ 1384 size = page_size(compound_head(page)); 1385 1386 if (behavior == MADV_SOFT_OFFLINE) { 1387 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1388 pfn, start); 1389 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1390 } else { 1391 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1392 pfn, start); 1393 ret = memory_failure(pfn, MF_ACTION_REQUIRED | MF_COUNT_INCREASED | MF_SW_SIMULATED); 1394 if (ret == -EOPNOTSUPP) 1395 ret = 0; 1396 } 1397 1398 if (ret) 1399 return ret; 1400 } 1401 1402 return 0; 1403 } 1404 1405 static bool is_memory_failure(int behavior) 1406 { 1407 switch (behavior) { 1408 case MADV_HWPOISON: 1409 case MADV_SOFT_OFFLINE: 1410 return true; 1411 default: 1412 return false; 1413 } 1414 } 1415 1416 #else 1417 1418 static int madvise_inject_error(int behavior, 1419 unsigned long start, unsigned long end) 1420 { 1421 return 0; 1422 } 1423 1424 static bool is_memory_failure(int behavior) 1425 { 1426 return false; 1427 } 1428 1429 #endif /* CONFIG_MEMORY_FAILURE */ 1430 1431 static bool 1432 madvise_behavior_valid(int behavior) 1433 { 1434 switch (behavior) { 1435 case MADV_DOFORK: 1436 case MADV_DONTFORK: 1437 case MADV_NORMAL: 1438 case MADV_SEQUENTIAL: 1439 case MADV_RANDOM: 1440 case MADV_REMOVE: 1441 case MADV_WILLNEED: 1442 case MADV_DONTNEED: 1443 case MADV_DONTNEED_LOCKED: 1444 case MADV_FREE: 1445 case MADV_COLD: 1446 case MADV_PAGEOUT: 1447 case MADV_POPULATE_READ: 1448 case MADV_POPULATE_WRITE: 1449 #ifdef CONFIG_KSM 1450 case MADV_MERGEABLE: 1451 case MADV_UNMERGEABLE: 1452 #endif 1453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1454 case MADV_HUGEPAGE: 1455 case MADV_NOHUGEPAGE: 1456 case MADV_COLLAPSE: 1457 #endif 1458 case MADV_DONTDUMP: 1459 case MADV_DODUMP: 1460 case MADV_WIPEONFORK: 1461 case MADV_KEEPONFORK: 1462 case MADV_GUARD_INSTALL: 1463 case MADV_GUARD_REMOVE: 1464 #ifdef CONFIG_MEMORY_FAILURE 1465 case MADV_SOFT_OFFLINE: 1466 case MADV_HWPOISON: 1467 #endif 1468 return true; 1469 1470 default: 1471 return false; 1472 } 1473 } 1474 1475 /* Can we invoke process_madvise() on a remote mm for the specified behavior? */ 1476 static bool process_madvise_remote_valid(int behavior) 1477 { 1478 switch (behavior) { 1479 case MADV_COLD: 1480 case MADV_PAGEOUT: 1481 case MADV_WILLNEED: 1482 case MADV_COLLAPSE: 1483 return true; 1484 default: 1485 return false; 1486 } 1487 } 1488 1489 /* 1490 * Walk the vmas in range [start,end), and call the visit function on each one. 1491 * The visit function will get start and end parameters that cover the overlap 1492 * between the current vma and the original range. Any unmapped regions in the 1493 * original range will result in this function returning -ENOMEM while still 1494 * calling the visit function on all of the existing vmas in the range. 1495 * Must be called with the mmap_lock held for reading or writing. 1496 */ 1497 static 1498 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1499 unsigned long end, void *arg, 1500 int (*visit)(struct vm_area_struct *vma, 1501 struct vm_area_struct **prev, unsigned long start, 1502 unsigned long end, void *arg)) 1503 { 1504 struct vm_area_struct *vma; 1505 struct vm_area_struct *prev; 1506 unsigned long tmp; 1507 int unmapped_error = 0; 1508 1509 /* 1510 * If the interval [start,end) covers some unmapped address 1511 * ranges, just ignore them, but return -ENOMEM at the end. 1512 * - different from the way of handling in mlock etc. 1513 */ 1514 vma = find_vma_prev(mm, start, &prev); 1515 if (vma && start > vma->vm_start) 1516 prev = vma; 1517 1518 for (;;) { 1519 int error; 1520 1521 /* Still start < end. */ 1522 if (!vma) 1523 return -ENOMEM; 1524 1525 /* Here start < (end|vma->vm_end). */ 1526 if (start < vma->vm_start) { 1527 unmapped_error = -ENOMEM; 1528 start = vma->vm_start; 1529 if (start >= end) 1530 break; 1531 } 1532 1533 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1534 tmp = vma->vm_end; 1535 if (end < tmp) 1536 tmp = end; 1537 1538 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1539 error = visit(vma, &prev, start, tmp, arg); 1540 if (error) 1541 return error; 1542 start = tmp; 1543 if (prev && start < prev->vm_end) 1544 start = prev->vm_end; 1545 if (start >= end) 1546 break; 1547 if (prev) 1548 vma = find_vma(mm, prev->vm_end); 1549 else /* madvise_remove dropped mmap_lock */ 1550 vma = find_vma(mm, start); 1551 } 1552 1553 return unmapped_error; 1554 } 1555 1556 #ifdef CONFIG_ANON_VMA_NAME 1557 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1558 struct vm_area_struct **prev, 1559 unsigned long start, unsigned long end, 1560 void *anon_name) 1561 { 1562 int error; 1563 1564 /* Only anonymous mappings can be named */ 1565 if (vma->vm_file && !vma_is_anon_shmem(vma)) 1566 return -EBADF; 1567 1568 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1569 anon_name); 1570 1571 /* 1572 * madvise() returns EAGAIN if kernel resources, such as 1573 * slab, are temporarily unavailable. 1574 */ 1575 if (error == -ENOMEM) 1576 error = -EAGAIN; 1577 return error; 1578 } 1579 1580 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1581 unsigned long len_in, struct anon_vma_name *anon_name) 1582 { 1583 unsigned long end; 1584 unsigned long len; 1585 1586 if (start & ~PAGE_MASK) 1587 return -EINVAL; 1588 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1589 1590 /* Check to see whether len was rounded up from small -ve to zero */ 1591 if (len_in && !len) 1592 return -EINVAL; 1593 1594 end = start + len; 1595 if (end < start) 1596 return -EINVAL; 1597 1598 if (end == start) 1599 return 0; 1600 1601 return madvise_walk_vmas(mm, start, end, anon_name, 1602 madvise_vma_anon_name); 1603 } 1604 #endif /* CONFIG_ANON_VMA_NAME */ 1605 1606 static int madvise_lock(struct mm_struct *mm, int behavior) 1607 { 1608 if (is_memory_failure(behavior)) 1609 return 0; 1610 1611 if (madvise_need_mmap_write(behavior)) { 1612 if (mmap_write_lock_killable(mm)) 1613 return -EINTR; 1614 } else { 1615 mmap_read_lock(mm); 1616 } 1617 return 0; 1618 } 1619 1620 static void madvise_unlock(struct mm_struct *mm, int behavior) 1621 { 1622 if (is_memory_failure(behavior)) 1623 return; 1624 1625 if (madvise_need_mmap_write(behavior)) 1626 mmap_write_unlock(mm); 1627 else 1628 mmap_read_unlock(mm); 1629 } 1630 1631 static bool madvise_batch_tlb_flush(int behavior) 1632 { 1633 switch (behavior) { 1634 case MADV_DONTNEED: 1635 case MADV_DONTNEED_LOCKED: 1636 case MADV_FREE: 1637 return true; 1638 default: 1639 return false; 1640 } 1641 } 1642 1643 static void madvise_init_tlb(struct madvise_behavior *madv_behavior, 1644 struct mm_struct *mm) 1645 { 1646 if (madvise_batch_tlb_flush(madv_behavior->behavior)) 1647 tlb_gather_mmu(madv_behavior->tlb, mm); 1648 } 1649 1650 static void madvise_finish_tlb(struct madvise_behavior *madv_behavior) 1651 { 1652 if (madvise_batch_tlb_flush(madv_behavior->behavior)) 1653 tlb_finish_mmu(madv_behavior->tlb); 1654 } 1655 1656 static bool is_valid_madvise(unsigned long start, size_t len_in, int behavior) 1657 { 1658 size_t len; 1659 1660 if (!madvise_behavior_valid(behavior)) 1661 return false; 1662 1663 if (!PAGE_ALIGNED(start)) 1664 return false; 1665 len = PAGE_ALIGN(len_in); 1666 1667 /* Check to see whether len was rounded up from small -ve to zero */ 1668 if (len_in && !len) 1669 return false; 1670 1671 if (start + len < start) 1672 return false; 1673 1674 return true; 1675 } 1676 1677 /* 1678 * madvise_should_skip() - Return if the request is invalid or nothing. 1679 * @start: Start address of madvise-requested address range. 1680 * @len_in: Length of madvise-requested address range. 1681 * @behavior: Requested madvise behavor. 1682 * @err: Pointer to store an error code from the check. 1683 * 1684 * If the specified behaviour is invalid or nothing would occur, we skip the 1685 * operation. This function returns true in the cases, otherwise false. In 1686 * the former case we store an error on @err. 1687 */ 1688 static bool madvise_should_skip(unsigned long start, size_t len_in, 1689 int behavior, int *err) 1690 { 1691 if (!is_valid_madvise(start, len_in, behavior)) { 1692 *err = -EINVAL; 1693 return true; 1694 } 1695 if (start + PAGE_ALIGN(len_in) == start) { 1696 *err = 0; 1697 return true; 1698 } 1699 return false; 1700 } 1701 1702 static bool is_madvise_populate(int behavior) 1703 { 1704 switch (behavior) { 1705 case MADV_POPULATE_READ: 1706 case MADV_POPULATE_WRITE: 1707 return true; 1708 default: 1709 return false; 1710 } 1711 } 1712 1713 static int madvise_do_behavior(struct mm_struct *mm, 1714 unsigned long start, size_t len_in, 1715 struct madvise_behavior *madv_behavior) 1716 { 1717 int behavior = madv_behavior->behavior; 1718 struct blk_plug plug; 1719 unsigned long end; 1720 int error; 1721 1722 if (is_memory_failure(behavior)) 1723 return madvise_inject_error(behavior, start, start + len_in); 1724 start = untagged_addr_remote(mm, start); 1725 end = start + PAGE_ALIGN(len_in); 1726 1727 blk_start_plug(&plug); 1728 if (is_madvise_populate(behavior)) 1729 error = madvise_populate(mm, start, end, behavior); 1730 else 1731 error = madvise_walk_vmas(mm, start, end, madv_behavior, 1732 madvise_vma_behavior); 1733 blk_finish_plug(&plug); 1734 return error; 1735 } 1736 1737 /* 1738 * The madvise(2) system call. 1739 * 1740 * Applications can use madvise() to advise the kernel how it should 1741 * handle paging I/O in this VM area. The idea is to help the kernel 1742 * use appropriate read-ahead and caching techniques. The information 1743 * provided is advisory only, and can be safely disregarded by the 1744 * kernel without affecting the correct operation of the application. 1745 * 1746 * behavior values: 1747 * MADV_NORMAL - the default behavior is to read clusters. This 1748 * results in some read-ahead and read-behind. 1749 * MADV_RANDOM - the system should read the minimum amount of data 1750 * on any access, since it is unlikely that the appli- 1751 * cation will need more than what it asks for. 1752 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1753 * once, so they can be aggressively read ahead, and 1754 * can be freed soon after they are accessed. 1755 * MADV_WILLNEED - the application is notifying the system to read 1756 * some pages ahead. 1757 * MADV_DONTNEED - the application is finished with the given range, 1758 * so the kernel can free resources associated with it. 1759 * MADV_FREE - the application marks pages in the given range as lazy free, 1760 * where actual purges are postponed until memory pressure happens. 1761 * MADV_REMOVE - the application wants to free up the given range of 1762 * pages and associated backing store. 1763 * MADV_DONTFORK - omit this area from child's address space when forking: 1764 * typically, to avoid COWing pages pinned by get_user_pages(). 1765 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1766 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1767 * range after a fork. 1768 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1769 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1770 * were corrupted by unrecoverable hardware memory failure. 1771 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1772 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1773 * this area with pages of identical content from other such areas. 1774 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1775 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1776 * huge pages in the future. Existing pages might be coalesced and 1777 * new pages might be allocated as THP. 1778 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1779 * transparent huge pages so the existing pages will not be 1780 * coalesced into THP and new pages will not be allocated as THP. 1781 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1782 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1783 * from being included in its core dump. 1784 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1785 * MADV_COLD - the application is not expected to use this memory soon, 1786 * deactivate pages in this range so that they can be reclaimed 1787 * easily if memory pressure happens. 1788 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1789 * page out the pages in this range immediately. 1790 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1791 * triggering read faults if required 1792 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1793 * triggering write faults if required 1794 * 1795 * return values: 1796 * zero - success 1797 * -EINVAL - start + len < 0, start is not page-aligned, 1798 * "behavior" is not a valid value, or application 1799 * is attempting to release locked or shared pages, 1800 * or the specified address range includes file, Huge TLB, 1801 * MAP_SHARED or VMPFNMAP range. 1802 * -ENOMEM - addresses in the specified range are not currently 1803 * mapped, or are outside the AS of the process. 1804 * -EIO - an I/O error occurred while paging in data. 1805 * -EBADF - map exists, but area maps something that isn't a file. 1806 * -EAGAIN - a kernel resource was temporarily unavailable. 1807 * -EPERM - memory is sealed. 1808 */ 1809 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1810 { 1811 int error; 1812 struct mmu_gather tlb; 1813 struct madvise_behavior madv_behavior = { 1814 .behavior = behavior, 1815 .tlb = &tlb, 1816 }; 1817 1818 if (madvise_should_skip(start, len_in, behavior, &error)) 1819 return error; 1820 error = madvise_lock(mm, behavior); 1821 if (error) 1822 return error; 1823 madvise_init_tlb(&madv_behavior, mm); 1824 error = madvise_do_behavior(mm, start, len_in, &madv_behavior); 1825 madvise_finish_tlb(&madv_behavior); 1826 madvise_unlock(mm, behavior); 1827 1828 return error; 1829 } 1830 1831 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1832 { 1833 return do_madvise(current->mm, start, len_in, behavior); 1834 } 1835 1836 /* Perform an madvise operation over a vector of addresses and lengths. */ 1837 static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter, 1838 int behavior) 1839 { 1840 ssize_t ret = 0; 1841 size_t total_len; 1842 struct mmu_gather tlb; 1843 struct madvise_behavior madv_behavior = { 1844 .behavior = behavior, 1845 .tlb = &tlb, 1846 }; 1847 1848 total_len = iov_iter_count(iter); 1849 1850 ret = madvise_lock(mm, behavior); 1851 if (ret) 1852 return ret; 1853 madvise_init_tlb(&madv_behavior, mm); 1854 1855 while (iov_iter_count(iter)) { 1856 unsigned long start = (unsigned long)iter_iov_addr(iter); 1857 size_t len_in = iter_iov_len(iter); 1858 int error; 1859 1860 if (madvise_should_skip(start, len_in, behavior, &error)) 1861 ret = error; 1862 else 1863 ret = madvise_do_behavior(mm, start, len_in, 1864 &madv_behavior); 1865 /* 1866 * An madvise operation is attempting to restart the syscall, 1867 * but we cannot proceed as it would not be correct to repeat 1868 * the operation in aggregate, and would be surprising to the 1869 * user. 1870 * 1871 * We drop and reacquire locks so it is safe to just loop and 1872 * try again. We check for fatal signals in case we need exit 1873 * early anyway. 1874 */ 1875 if (ret == -ERESTARTNOINTR) { 1876 if (fatal_signal_pending(current)) { 1877 ret = -EINTR; 1878 break; 1879 } 1880 1881 /* Drop and reacquire lock to unwind race. */ 1882 madvise_finish_tlb(&madv_behavior); 1883 madvise_unlock(mm, behavior); 1884 ret = madvise_lock(mm, behavior); 1885 if (ret) 1886 goto out; 1887 madvise_init_tlb(&madv_behavior, mm); 1888 continue; 1889 } 1890 if (ret < 0) 1891 break; 1892 iov_iter_advance(iter, iter_iov_len(iter)); 1893 } 1894 madvise_finish_tlb(&madv_behavior); 1895 madvise_unlock(mm, behavior); 1896 1897 out: 1898 ret = (total_len - iov_iter_count(iter)) ? : ret; 1899 1900 return ret; 1901 } 1902 1903 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1904 size_t, vlen, int, behavior, unsigned int, flags) 1905 { 1906 ssize_t ret; 1907 struct iovec iovstack[UIO_FASTIOV]; 1908 struct iovec *iov = iovstack; 1909 struct iov_iter iter; 1910 struct task_struct *task; 1911 struct mm_struct *mm; 1912 unsigned int f_flags; 1913 1914 if (flags != 0) { 1915 ret = -EINVAL; 1916 goto out; 1917 } 1918 1919 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1920 if (ret < 0) 1921 goto out; 1922 1923 task = pidfd_get_task(pidfd, &f_flags); 1924 if (IS_ERR(task)) { 1925 ret = PTR_ERR(task); 1926 goto free_iov; 1927 } 1928 1929 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1930 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1931 if (IS_ERR(mm)) { 1932 ret = PTR_ERR(mm); 1933 goto release_task; 1934 } 1935 1936 /* 1937 * We need only perform this check if we are attempting to manipulate a 1938 * remote process's address space. 1939 */ 1940 if (mm != current->mm && !process_madvise_remote_valid(behavior)) { 1941 ret = -EINVAL; 1942 goto release_mm; 1943 } 1944 1945 /* 1946 * Require CAP_SYS_NICE for influencing process performance. Note that 1947 * only non-destructive hints are currently supported for remote 1948 * processes. 1949 */ 1950 if (mm != current->mm && !capable(CAP_SYS_NICE)) { 1951 ret = -EPERM; 1952 goto release_mm; 1953 } 1954 1955 ret = vector_madvise(mm, &iter, behavior); 1956 1957 release_mm: 1958 mmput(mm); 1959 release_task: 1960 put_task_struct(task); 1961 free_iov: 1962 kfree(iov); 1963 out: 1964 return ret; 1965 } 1966