1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 struct madvise_walk_private { 41 struct mmu_gather *tlb; 42 bool pageout; 43 }; 44 45 /* 46 * Any behaviour which results in changes to the vma->vm_flags needs to 47 * take mmap_lock for writing. Others, which simply traverse vmas, need 48 * to only take it for reading. 49 */ 50 static int madvise_need_mmap_write(int behavior) 51 { 52 switch (behavior) { 53 case MADV_REMOVE: 54 case MADV_WILLNEED: 55 case MADV_DONTNEED: 56 case MADV_DONTNEED_LOCKED: 57 case MADV_COLD: 58 case MADV_PAGEOUT: 59 case MADV_FREE: 60 case MADV_POPULATE_READ: 61 case MADV_POPULATE_WRITE: 62 case MADV_COLLAPSE: 63 return 0; 64 default: 65 /* be safe, default to 1. list exceptions explicitly */ 66 return 1; 67 } 68 } 69 70 #ifdef CONFIG_ANON_VMA_NAME 71 struct anon_vma_name *anon_vma_name_alloc(const char *name) 72 { 73 struct anon_vma_name *anon_name; 74 size_t count; 75 76 /* Add 1 for NUL terminator at the end of the anon_name->name */ 77 count = strlen(name) + 1; 78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 79 if (anon_name) { 80 kref_init(&anon_name->kref); 81 memcpy(anon_name->name, name, count); 82 } 83 84 return anon_name; 85 } 86 87 void anon_vma_name_free(struct kref *kref) 88 { 89 struct anon_vma_name *anon_name = 90 container_of(kref, struct anon_vma_name, kref); 91 kfree(anon_name); 92 } 93 94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 95 { 96 mmap_assert_locked(vma->vm_mm); 97 98 return vma->anon_name; 99 } 100 101 /* mmap_lock should be write-locked */ 102 static int replace_anon_vma_name(struct vm_area_struct *vma, 103 struct anon_vma_name *anon_name) 104 { 105 struct anon_vma_name *orig_name = anon_vma_name(vma); 106 107 if (!anon_name) { 108 vma->anon_name = NULL; 109 anon_vma_name_put(orig_name); 110 return 0; 111 } 112 113 if (anon_vma_name_eq(orig_name, anon_name)) 114 return 0; 115 116 vma->anon_name = anon_vma_name_reuse(anon_name); 117 anon_vma_name_put(orig_name); 118 119 return 0; 120 } 121 #else /* CONFIG_ANON_VMA_NAME */ 122 static int replace_anon_vma_name(struct vm_area_struct *vma, 123 struct anon_vma_name *anon_name) 124 { 125 if (anon_name) 126 return -EINVAL; 127 128 return 0; 129 } 130 #endif /* CONFIG_ANON_VMA_NAME */ 131 /* 132 * Update the vm_flags on region of a vma, splitting it or merging it as 133 * necessary. Must be called with mmap_lock held for writing; 134 * Caller should ensure anon_name stability by raising its refcount even when 135 * anon_name belongs to a valid vma because this function might free that vma. 136 */ 137 static int madvise_update_vma(struct vm_area_struct *vma, 138 struct vm_area_struct **prev, unsigned long start, 139 unsigned long end, unsigned long new_flags, 140 struct anon_vma_name *anon_name) 141 { 142 struct mm_struct *mm = vma->vm_mm; 143 int error; 144 VMA_ITERATOR(vmi, mm, start); 145 146 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 147 *prev = vma; 148 return 0; 149 } 150 151 vma = vma_modify_flags_name(&vmi, *prev, vma, start, end, new_flags, 152 anon_name); 153 if (IS_ERR(vma)) 154 return PTR_ERR(vma); 155 156 *prev = vma; 157 158 /* vm_flags is protected by the mmap_lock held in write mode. */ 159 vma_start_write(vma); 160 vm_flags_reset(vma, new_flags); 161 if (!vma->vm_file || vma_is_anon_shmem(vma)) { 162 error = replace_anon_vma_name(vma, anon_name); 163 if (error) 164 return error; 165 } 166 167 return 0; 168 } 169 170 #ifdef CONFIG_SWAP 171 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 172 unsigned long end, struct mm_walk *walk) 173 { 174 struct vm_area_struct *vma = walk->private; 175 struct swap_iocb *splug = NULL; 176 pte_t *ptep = NULL; 177 spinlock_t *ptl; 178 unsigned long addr; 179 180 for (addr = start; addr < end; addr += PAGE_SIZE) { 181 pte_t pte; 182 swp_entry_t entry; 183 struct folio *folio; 184 185 if (!ptep++) { 186 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 187 if (!ptep) 188 break; 189 } 190 191 pte = ptep_get(ptep); 192 if (!is_swap_pte(pte)) 193 continue; 194 entry = pte_to_swp_entry(pte); 195 if (unlikely(non_swap_entry(entry))) 196 continue; 197 198 pte_unmap_unlock(ptep, ptl); 199 ptep = NULL; 200 201 folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 202 vma, addr, &splug); 203 if (folio) 204 folio_put(folio); 205 } 206 207 if (ptep) 208 pte_unmap_unlock(ptep, ptl); 209 swap_read_unplug(splug); 210 cond_resched(); 211 212 return 0; 213 } 214 215 static const struct mm_walk_ops swapin_walk_ops = { 216 .pmd_entry = swapin_walk_pmd_entry, 217 .walk_lock = PGWALK_RDLOCK, 218 }; 219 220 static void shmem_swapin_range(struct vm_area_struct *vma, 221 unsigned long start, unsigned long end, 222 struct address_space *mapping) 223 { 224 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 225 pgoff_t end_index = linear_page_index(vma, end) - 1; 226 struct folio *folio; 227 struct swap_iocb *splug = NULL; 228 229 rcu_read_lock(); 230 xas_for_each(&xas, folio, end_index) { 231 unsigned long addr; 232 swp_entry_t entry; 233 234 if (!xa_is_value(folio)) 235 continue; 236 entry = radix_to_swp_entry(folio); 237 /* There might be swapin error entries in shmem mapping. */ 238 if (non_swap_entry(entry)) 239 continue; 240 241 addr = vma->vm_start + 242 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); 243 xas_pause(&xas); 244 rcu_read_unlock(); 245 246 folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping), 247 vma, addr, &splug); 248 if (folio) 249 folio_put(folio); 250 251 rcu_read_lock(); 252 } 253 rcu_read_unlock(); 254 swap_read_unplug(splug); 255 } 256 #endif /* CONFIG_SWAP */ 257 258 /* 259 * Schedule all required I/O operations. Do not wait for completion. 260 */ 261 static long madvise_willneed(struct vm_area_struct *vma, 262 struct vm_area_struct **prev, 263 unsigned long start, unsigned long end) 264 { 265 struct mm_struct *mm = vma->vm_mm; 266 struct file *file = vma->vm_file; 267 loff_t offset; 268 269 *prev = vma; 270 #ifdef CONFIG_SWAP 271 if (!file) { 272 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 273 lru_add_drain(); /* Push any new pages onto the LRU now */ 274 return 0; 275 } 276 277 if (shmem_mapping(file->f_mapping)) { 278 shmem_swapin_range(vma, start, end, file->f_mapping); 279 lru_add_drain(); /* Push any new pages onto the LRU now */ 280 return 0; 281 } 282 #else 283 if (!file) 284 return -EBADF; 285 #endif 286 287 if (IS_DAX(file_inode(file))) { 288 /* no bad return value, but ignore advice */ 289 return 0; 290 } 291 292 /* 293 * Filesystem's fadvise may need to take various locks. We need to 294 * explicitly grab a reference because the vma (and hence the 295 * vma's reference to the file) can go away as soon as we drop 296 * mmap_lock. 297 */ 298 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 299 get_file(file); 300 offset = (loff_t)(start - vma->vm_start) 301 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 302 mmap_read_unlock(mm); 303 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 304 fput(file); 305 mmap_read_lock(mm); 306 return 0; 307 } 308 309 static inline bool can_do_file_pageout(struct vm_area_struct *vma) 310 { 311 if (!vma->vm_file) 312 return false; 313 /* 314 * paging out pagecache only for non-anonymous mappings that correspond 315 * to the files the calling process could (if tried) open for writing; 316 * otherwise we'd be including shared non-exclusive mappings, which 317 * opens a side channel. 318 */ 319 return inode_owner_or_capable(&nop_mnt_idmap, 320 file_inode(vma->vm_file)) || 321 file_permission(vma->vm_file, MAY_WRITE) == 0; 322 } 323 324 static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end, 325 struct folio *folio, pte_t *ptep, 326 pte_t pte, bool *any_young, 327 bool *any_dirty) 328 { 329 const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; 330 int max_nr = (end - addr) / PAGE_SIZE; 331 332 return folio_pte_batch(folio, addr, ptep, pte, max_nr, fpb_flags, NULL, 333 any_young, any_dirty); 334 } 335 336 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 337 unsigned long addr, unsigned long end, 338 struct mm_walk *walk) 339 { 340 struct madvise_walk_private *private = walk->private; 341 struct mmu_gather *tlb = private->tlb; 342 bool pageout = private->pageout; 343 struct mm_struct *mm = tlb->mm; 344 struct vm_area_struct *vma = walk->vma; 345 pte_t *start_pte, *pte, ptent; 346 spinlock_t *ptl; 347 struct folio *folio = NULL; 348 LIST_HEAD(folio_list); 349 bool pageout_anon_only_filter; 350 unsigned int batch_count = 0; 351 int nr; 352 353 if (fatal_signal_pending(current)) 354 return -EINTR; 355 356 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && 357 !can_do_file_pageout(vma); 358 359 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 360 if (pmd_trans_huge(*pmd)) { 361 pmd_t orig_pmd; 362 unsigned long next = pmd_addr_end(addr, end); 363 364 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 365 ptl = pmd_trans_huge_lock(pmd, vma); 366 if (!ptl) 367 return 0; 368 369 orig_pmd = *pmd; 370 if (is_huge_zero_pmd(orig_pmd)) 371 goto huge_unlock; 372 373 if (unlikely(!pmd_present(orig_pmd))) { 374 VM_BUG_ON(thp_migration_supported() && 375 !is_pmd_migration_entry(orig_pmd)); 376 goto huge_unlock; 377 } 378 379 folio = pmd_folio(orig_pmd); 380 381 /* Do not interfere with other mappings of this folio */ 382 if (folio_likely_mapped_shared(folio)) 383 goto huge_unlock; 384 385 if (pageout_anon_only_filter && !folio_test_anon(folio)) 386 goto huge_unlock; 387 388 if (next - addr != HPAGE_PMD_SIZE) { 389 int err; 390 391 folio_get(folio); 392 spin_unlock(ptl); 393 folio_lock(folio); 394 err = split_folio(folio); 395 folio_unlock(folio); 396 folio_put(folio); 397 if (!err) 398 goto regular_folio; 399 return 0; 400 } 401 402 if (!pageout && pmd_young(orig_pmd)) { 403 pmdp_invalidate(vma, addr, pmd); 404 orig_pmd = pmd_mkold(orig_pmd); 405 406 set_pmd_at(mm, addr, pmd, orig_pmd); 407 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 408 } 409 410 folio_clear_referenced(folio); 411 folio_test_clear_young(folio); 412 if (folio_test_active(folio)) 413 folio_set_workingset(folio); 414 if (pageout) { 415 if (folio_isolate_lru(folio)) { 416 if (folio_test_unevictable(folio)) 417 folio_putback_lru(folio); 418 else 419 list_add(&folio->lru, &folio_list); 420 } 421 } else 422 folio_deactivate(folio); 423 huge_unlock: 424 spin_unlock(ptl); 425 if (pageout) 426 reclaim_pages(&folio_list); 427 return 0; 428 } 429 430 regular_folio: 431 #endif 432 tlb_change_page_size(tlb, PAGE_SIZE); 433 restart: 434 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 435 if (!start_pte) 436 return 0; 437 flush_tlb_batched_pending(mm); 438 arch_enter_lazy_mmu_mode(); 439 for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) { 440 nr = 1; 441 ptent = ptep_get(pte); 442 443 if (++batch_count == SWAP_CLUSTER_MAX) { 444 batch_count = 0; 445 if (need_resched()) { 446 arch_leave_lazy_mmu_mode(); 447 pte_unmap_unlock(start_pte, ptl); 448 cond_resched(); 449 goto restart; 450 } 451 } 452 453 if (pte_none(ptent)) 454 continue; 455 456 if (!pte_present(ptent)) 457 continue; 458 459 folio = vm_normal_folio(vma, addr, ptent); 460 if (!folio || folio_is_zone_device(folio)) 461 continue; 462 463 /* 464 * If we encounter a large folio, only split it if it is not 465 * fully mapped within the range we are operating on. Otherwise 466 * leave it as is so that it can be swapped out whole. If we 467 * fail to split a folio, leave it in place and advance to the 468 * next pte in the range. 469 */ 470 if (folio_test_large(folio)) { 471 bool any_young; 472 473 nr = madvise_folio_pte_batch(addr, end, folio, pte, 474 ptent, &any_young, NULL); 475 if (any_young) 476 ptent = pte_mkyoung(ptent); 477 478 if (nr < folio_nr_pages(folio)) { 479 int err; 480 481 if (folio_likely_mapped_shared(folio)) 482 continue; 483 if (pageout_anon_only_filter && !folio_test_anon(folio)) 484 continue; 485 if (!folio_trylock(folio)) 486 continue; 487 folio_get(folio); 488 arch_leave_lazy_mmu_mode(); 489 pte_unmap_unlock(start_pte, ptl); 490 start_pte = NULL; 491 err = split_folio(folio); 492 folio_unlock(folio); 493 folio_put(folio); 494 start_pte = pte = 495 pte_offset_map_lock(mm, pmd, addr, &ptl); 496 if (!start_pte) 497 break; 498 arch_enter_lazy_mmu_mode(); 499 if (!err) 500 nr = 0; 501 continue; 502 } 503 } 504 505 /* 506 * Do not interfere with other mappings of this folio and 507 * non-LRU folio. If we have a large folio at this point, we 508 * know it is fully mapped so if its mapcount is the same as its 509 * number of pages, it must be exclusive. 510 */ 511 if (!folio_test_lru(folio) || 512 folio_mapcount(folio) != folio_nr_pages(folio)) 513 continue; 514 515 if (pageout_anon_only_filter && !folio_test_anon(folio)) 516 continue; 517 518 if (!pageout && pte_young(ptent)) { 519 clear_young_dirty_ptes(vma, addr, pte, nr, 520 CYDP_CLEAR_YOUNG); 521 tlb_remove_tlb_entries(tlb, pte, nr, addr); 522 } 523 524 /* 525 * We are deactivating a folio for accelerating reclaiming. 526 * VM couldn't reclaim the folio unless we clear PG_young. 527 * As a side effect, it makes confuse idle-page tracking 528 * because they will miss recent referenced history. 529 */ 530 folio_clear_referenced(folio); 531 folio_test_clear_young(folio); 532 if (folio_test_active(folio)) 533 folio_set_workingset(folio); 534 if (pageout) { 535 if (folio_isolate_lru(folio)) { 536 if (folio_test_unevictable(folio)) 537 folio_putback_lru(folio); 538 else 539 list_add(&folio->lru, &folio_list); 540 } 541 } else 542 folio_deactivate(folio); 543 } 544 545 if (start_pte) { 546 arch_leave_lazy_mmu_mode(); 547 pte_unmap_unlock(start_pte, ptl); 548 } 549 if (pageout) 550 reclaim_pages(&folio_list); 551 cond_resched(); 552 553 return 0; 554 } 555 556 static const struct mm_walk_ops cold_walk_ops = { 557 .pmd_entry = madvise_cold_or_pageout_pte_range, 558 .walk_lock = PGWALK_RDLOCK, 559 }; 560 561 static void madvise_cold_page_range(struct mmu_gather *tlb, 562 struct vm_area_struct *vma, 563 unsigned long addr, unsigned long end) 564 { 565 struct madvise_walk_private walk_private = { 566 .pageout = false, 567 .tlb = tlb, 568 }; 569 570 tlb_start_vma(tlb, vma); 571 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 572 tlb_end_vma(tlb, vma); 573 } 574 575 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 576 { 577 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 578 } 579 580 static long madvise_cold(struct vm_area_struct *vma, 581 struct vm_area_struct **prev, 582 unsigned long start_addr, unsigned long end_addr) 583 { 584 struct mm_struct *mm = vma->vm_mm; 585 struct mmu_gather tlb; 586 587 *prev = vma; 588 if (!can_madv_lru_vma(vma)) 589 return -EINVAL; 590 591 lru_add_drain(); 592 tlb_gather_mmu(&tlb, mm); 593 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 594 tlb_finish_mmu(&tlb); 595 596 return 0; 597 } 598 599 static void madvise_pageout_page_range(struct mmu_gather *tlb, 600 struct vm_area_struct *vma, 601 unsigned long addr, unsigned long end) 602 { 603 struct madvise_walk_private walk_private = { 604 .pageout = true, 605 .tlb = tlb, 606 }; 607 608 tlb_start_vma(tlb, vma); 609 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 610 tlb_end_vma(tlb, vma); 611 } 612 613 static long madvise_pageout(struct vm_area_struct *vma, 614 struct vm_area_struct **prev, 615 unsigned long start_addr, unsigned long end_addr) 616 { 617 struct mm_struct *mm = vma->vm_mm; 618 struct mmu_gather tlb; 619 620 *prev = vma; 621 if (!can_madv_lru_vma(vma)) 622 return -EINVAL; 623 624 /* 625 * If the VMA belongs to a private file mapping, there can be private 626 * dirty pages which can be paged out if even this process is neither 627 * owner nor write capable of the file. We allow private file mappings 628 * further to pageout dirty anon pages. 629 */ 630 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && 631 (vma->vm_flags & VM_MAYSHARE))) 632 return 0; 633 634 lru_add_drain(); 635 tlb_gather_mmu(&tlb, mm); 636 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 637 tlb_finish_mmu(&tlb); 638 639 return 0; 640 } 641 642 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 643 unsigned long end, struct mm_walk *walk) 644 645 { 646 const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY; 647 struct mmu_gather *tlb = walk->private; 648 struct mm_struct *mm = tlb->mm; 649 struct vm_area_struct *vma = walk->vma; 650 spinlock_t *ptl; 651 pte_t *start_pte, *pte, ptent; 652 struct folio *folio; 653 int nr_swap = 0; 654 unsigned long next; 655 int nr, max_nr; 656 657 next = pmd_addr_end(addr, end); 658 if (pmd_trans_huge(*pmd)) 659 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 660 return 0; 661 662 tlb_change_page_size(tlb, PAGE_SIZE); 663 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 664 if (!start_pte) 665 return 0; 666 flush_tlb_batched_pending(mm); 667 arch_enter_lazy_mmu_mode(); 668 for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) { 669 nr = 1; 670 ptent = ptep_get(pte); 671 672 if (pte_none(ptent)) 673 continue; 674 /* 675 * If the pte has swp_entry, just clear page table to 676 * prevent swap-in which is more expensive rather than 677 * (page allocation + zeroing). 678 */ 679 if (!pte_present(ptent)) { 680 swp_entry_t entry; 681 682 entry = pte_to_swp_entry(ptent); 683 if (!non_swap_entry(entry)) { 684 max_nr = (end - addr) / PAGE_SIZE; 685 nr = swap_pte_batch(pte, max_nr, ptent); 686 nr_swap -= nr; 687 free_swap_and_cache_nr(entry, nr); 688 clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); 689 } else if (is_hwpoison_entry(entry) || 690 is_poisoned_swp_entry(entry)) { 691 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 692 } 693 continue; 694 } 695 696 folio = vm_normal_folio(vma, addr, ptent); 697 if (!folio || folio_is_zone_device(folio)) 698 continue; 699 700 /* 701 * If we encounter a large folio, only split it if it is not 702 * fully mapped within the range we are operating on. Otherwise 703 * leave it as is so that it can be marked as lazyfree. If we 704 * fail to split a folio, leave it in place and advance to the 705 * next pte in the range. 706 */ 707 if (folio_test_large(folio)) { 708 bool any_young, any_dirty; 709 710 nr = madvise_folio_pte_batch(addr, end, folio, pte, 711 ptent, &any_young, &any_dirty); 712 713 if (nr < folio_nr_pages(folio)) { 714 int err; 715 716 if (folio_likely_mapped_shared(folio)) 717 continue; 718 if (!folio_trylock(folio)) 719 continue; 720 folio_get(folio); 721 arch_leave_lazy_mmu_mode(); 722 pte_unmap_unlock(start_pte, ptl); 723 start_pte = NULL; 724 err = split_folio(folio); 725 folio_unlock(folio); 726 folio_put(folio); 727 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 728 start_pte = pte; 729 if (!start_pte) 730 break; 731 arch_enter_lazy_mmu_mode(); 732 if (!err) 733 nr = 0; 734 continue; 735 } 736 737 if (any_young) 738 ptent = pte_mkyoung(ptent); 739 if (any_dirty) 740 ptent = pte_mkdirty(ptent); 741 } 742 743 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 744 if (!folio_trylock(folio)) 745 continue; 746 /* 747 * If we have a large folio at this point, we know it is 748 * fully mapped so if its mapcount is the same as its 749 * number of pages, it must be exclusive. 750 */ 751 if (folio_mapcount(folio) != folio_nr_pages(folio)) { 752 folio_unlock(folio); 753 continue; 754 } 755 756 if (folio_test_swapcache(folio) && 757 !folio_free_swap(folio)) { 758 folio_unlock(folio); 759 continue; 760 } 761 762 folio_clear_dirty(folio); 763 folio_unlock(folio); 764 } 765 766 if (pte_young(ptent) || pte_dirty(ptent)) { 767 clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags); 768 tlb_remove_tlb_entries(tlb, pte, nr, addr); 769 } 770 folio_mark_lazyfree(folio); 771 } 772 773 if (nr_swap) 774 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 775 if (start_pte) { 776 arch_leave_lazy_mmu_mode(); 777 pte_unmap_unlock(start_pte, ptl); 778 } 779 cond_resched(); 780 781 return 0; 782 } 783 784 static const struct mm_walk_ops madvise_free_walk_ops = { 785 .pmd_entry = madvise_free_pte_range, 786 .walk_lock = PGWALK_RDLOCK, 787 }; 788 789 static int madvise_free_single_vma(struct vm_area_struct *vma, 790 unsigned long start_addr, unsigned long end_addr) 791 { 792 struct mm_struct *mm = vma->vm_mm; 793 struct mmu_notifier_range range; 794 struct mmu_gather tlb; 795 796 /* MADV_FREE works for only anon vma at the moment */ 797 if (!vma_is_anonymous(vma)) 798 return -EINVAL; 799 800 range.start = max(vma->vm_start, start_addr); 801 if (range.start >= vma->vm_end) 802 return -EINVAL; 803 range.end = min(vma->vm_end, end_addr); 804 if (range.end <= vma->vm_start) 805 return -EINVAL; 806 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 807 range.start, range.end); 808 809 lru_add_drain(); 810 tlb_gather_mmu(&tlb, mm); 811 update_hiwater_rss(mm); 812 813 mmu_notifier_invalidate_range_start(&range); 814 tlb_start_vma(&tlb, vma); 815 walk_page_range(vma->vm_mm, range.start, range.end, 816 &madvise_free_walk_ops, &tlb); 817 tlb_end_vma(&tlb, vma); 818 mmu_notifier_invalidate_range_end(&range); 819 tlb_finish_mmu(&tlb); 820 821 return 0; 822 } 823 824 /* 825 * Application no longer needs these pages. If the pages are dirty, 826 * it's OK to just throw them away. The app will be more careful about 827 * data it wants to keep. Be sure to free swap resources too. The 828 * zap_page_range_single call sets things up for shrink_active_list to actually 829 * free these pages later if no one else has touched them in the meantime, 830 * although we could add these pages to a global reuse list for 831 * shrink_active_list to pick up before reclaiming other pages. 832 * 833 * NB: This interface discards data rather than pushes it out to swap, 834 * as some implementations do. This has performance implications for 835 * applications like large transactional databases which want to discard 836 * pages in anonymous maps after committing to backing store the data 837 * that was kept in them. There is no reason to write this data out to 838 * the swap area if the application is discarding it. 839 * 840 * An interface that causes the system to free clean pages and flush 841 * dirty pages is already available as msync(MS_INVALIDATE). 842 */ 843 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, 844 unsigned long start, unsigned long end) 845 { 846 zap_page_range_single(vma, start, end - start, NULL); 847 return 0; 848 } 849 850 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 851 unsigned long start, 852 unsigned long *end, 853 int behavior) 854 { 855 if (!is_vm_hugetlb_page(vma)) { 856 unsigned int forbidden = VM_PFNMAP; 857 858 if (behavior != MADV_DONTNEED_LOCKED) 859 forbidden |= VM_LOCKED; 860 861 return !(vma->vm_flags & forbidden); 862 } 863 864 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 865 return false; 866 if (start & ~huge_page_mask(hstate_vma(vma))) 867 return false; 868 869 /* 870 * Madvise callers expect the length to be rounded up to PAGE_SIZE 871 * boundaries, and may be unaware that this VMA uses huge pages. 872 * Avoid unexpected data loss by rounding down the number of 873 * huge pages freed. 874 */ 875 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); 876 877 return true; 878 } 879 880 static long madvise_dontneed_free(struct vm_area_struct *vma, 881 struct vm_area_struct **prev, 882 unsigned long start, unsigned long end, 883 int behavior) 884 { 885 struct mm_struct *mm = vma->vm_mm; 886 887 *prev = vma; 888 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 889 return -EINVAL; 890 891 if (start == end) 892 return 0; 893 894 if (!userfaultfd_remove(vma, start, end)) { 895 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 896 897 mmap_read_lock(mm); 898 vma = vma_lookup(mm, start); 899 if (!vma) 900 return -ENOMEM; 901 /* 902 * Potential end adjustment for hugetlb vma is OK as 903 * the check below keeps end within vma. 904 */ 905 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 906 behavior)) 907 return -EINVAL; 908 if (end > vma->vm_end) { 909 /* 910 * Don't fail if end > vma->vm_end. If the old 911 * vma was split while the mmap_lock was 912 * released the effect of the concurrent 913 * operation may not cause madvise() to 914 * have an undefined result. There may be an 915 * adjacent next vma that we'll walk 916 * next. userfaultfd_remove() will generate an 917 * UFFD_EVENT_REMOVE repetition on the 918 * end-vma->vm_end range, but the manager can 919 * handle a repetition fine. 920 */ 921 end = vma->vm_end; 922 } 923 VM_WARN_ON(start >= end); 924 } 925 926 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 927 return madvise_dontneed_single_vma(vma, start, end); 928 else if (behavior == MADV_FREE) 929 return madvise_free_single_vma(vma, start, end); 930 else 931 return -EINVAL; 932 } 933 934 static long madvise_populate(struct mm_struct *mm, unsigned long start, 935 unsigned long end, int behavior) 936 { 937 const bool write = behavior == MADV_POPULATE_WRITE; 938 int locked = 1; 939 long pages; 940 941 while (start < end) { 942 /* Populate (prefault) page tables readable/writable. */ 943 pages = faultin_page_range(mm, start, end, write, &locked); 944 if (!locked) { 945 mmap_read_lock(mm); 946 locked = 1; 947 } 948 if (pages < 0) { 949 switch (pages) { 950 case -EINTR: 951 return -EINTR; 952 case -EINVAL: /* Incompatible mappings / permissions. */ 953 return -EINVAL; 954 case -EHWPOISON: 955 return -EHWPOISON; 956 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 957 return -EFAULT; 958 default: 959 pr_warn_once("%s: unhandled return value: %ld\n", 960 __func__, pages); 961 fallthrough; 962 case -ENOMEM: /* No VMA or out of memory. */ 963 return -ENOMEM; 964 } 965 } 966 start += pages * PAGE_SIZE; 967 } 968 return 0; 969 } 970 971 /* 972 * Application wants to free up the pages and associated backing store. 973 * This is effectively punching a hole into the middle of a file. 974 */ 975 static long madvise_remove(struct vm_area_struct *vma, 976 struct vm_area_struct **prev, 977 unsigned long start, unsigned long end) 978 { 979 loff_t offset; 980 int error; 981 struct file *f; 982 struct mm_struct *mm = vma->vm_mm; 983 984 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 985 986 if (vma->vm_flags & VM_LOCKED) 987 return -EINVAL; 988 989 f = vma->vm_file; 990 991 if (!f || !f->f_mapping || !f->f_mapping->host) { 992 return -EINVAL; 993 } 994 995 if (!vma_is_shared_maywrite(vma)) 996 return -EACCES; 997 998 offset = (loff_t)(start - vma->vm_start) 999 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 1000 1001 /* 1002 * Filesystem's fallocate may need to take i_rwsem. We need to 1003 * explicitly grab a reference because the vma (and hence the 1004 * vma's reference to the file) can go away as soon as we drop 1005 * mmap_lock. 1006 */ 1007 get_file(f); 1008 if (userfaultfd_remove(vma, start, end)) { 1009 /* mmap_lock was not released by userfaultfd_remove() */ 1010 mmap_read_unlock(mm); 1011 } 1012 error = vfs_fallocate(f, 1013 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1014 offset, end - start); 1015 fput(f); 1016 mmap_read_lock(mm); 1017 return error; 1018 } 1019 1020 /* 1021 * Apply an madvise behavior to a region of a vma. madvise_update_vma 1022 * will handle splitting a vm area into separate areas, each area with its own 1023 * behavior. 1024 */ 1025 static int madvise_vma_behavior(struct vm_area_struct *vma, 1026 struct vm_area_struct **prev, 1027 unsigned long start, unsigned long end, 1028 unsigned long behavior) 1029 { 1030 int error; 1031 struct anon_vma_name *anon_name; 1032 unsigned long new_flags = vma->vm_flags; 1033 1034 switch (behavior) { 1035 case MADV_REMOVE: 1036 return madvise_remove(vma, prev, start, end); 1037 case MADV_WILLNEED: 1038 return madvise_willneed(vma, prev, start, end); 1039 case MADV_COLD: 1040 return madvise_cold(vma, prev, start, end); 1041 case MADV_PAGEOUT: 1042 return madvise_pageout(vma, prev, start, end); 1043 case MADV_FREE: 1044 case MADV_DONTNEED: 1045 case MADV_DONTNEED_LOCKED: 1046 return madvise_dontneed_free(vma, prev, start, end, behavior); 1047 case MADV_NORMAL: 1048 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1049 break; 1050 case MADV_SEQUENTIAL: 1051 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1052 break; 1053 case MADV_RANDOM: 1054 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1055 break; 1056 case MADV_DONTFORK: 1057 new_flags |= VM_DONTCOPY; 1058 break; 1059 case MADV_DOFORK: 1060 if (vma->vm_flags & VM_IO) 1061 return -EINVAL; 1062 new_flags &= ~VM_DONTCOPY; 1063 break; 1064 case MADV_WIPEONFORK: 1065 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1066 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1067 return -EINVAL; 1068 new_flags |= VM_WIPEONFORK; 1069 break; 1070 case MADV_KEEPONFORK: 1071 new_flags &= ~VM_WIPEONFORK; 1072 break; 1073 case MADV_DONTDUMP: 1074 new_flags |= VM_DONTDUMP; 1075 break; 1076 case MADV_DODUMP: 1077 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) 1078 return -EINVAL; 1079 new_flags &= ~VM_DONTDUMP; 1080 break; 1081 case MADV_MERGEABLE: 1082 case MADV_UNMERGEABLE: 1083 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1084 if (error) 1085 goto out; 1086 break; 1087 case MADV_HUGEPAGE: 1088 case MADV_NOHUGEPAGE: 1089 error = hugepage_madvise(vma, &new_flags, behavior); 1090 if (error) 1091 goto out; 1092 break; 1093 case MADV_COLLAPSE: 1094 return madvise_collapse(vma, prev, start, end); 1095 } 1096 1097 anon_name = anon_vma_name(vma); 1098 anon_vma_name_get(anon_name); 1099 error = madvise_update_vma(vma, prev, start, end, new_flags, 1100 anon_name); 1101 anon_vma_name_put(anon_name); 1102 1103 out: 1104 /* 1105 * madvise() returns EAGAIN if kernel resources, such as 1106 * slab, are temporarily unavailable. 1107 */ 1108 if (error == -ENOMEM) 1109 error = -EAGAIN; 1110 return error; 1111 } 1112 1113 #ifdef CONFIG_MEMORY_FAILURE 1114 /* 1115 * Error injection support for memory error handling. 1116 */ 1117 static int madvise_inject_error(int behavior, 1118 unsigned long start, unsigned long end) 1119 { 1120 unsigned long size; 1121 1122 if (!capable(CAP_SYS_ADMIN)) 1123 return -EPERM; 1124 1125 1126 for (; start < end; start += size) { 1127 unsigned long pfn; 1128 struct page *page; 1129 int ret; 1130 1131 ret = get_user_pages_fast(start, 1, 0, &page); 1132 if (ret != 1) 1133 return ret; 1134 pfn = page_to_pfn(page); 1135 1136 /* 1137 * When soft offlining hugepages, after migrating the page 1138 * we dissolve it, therefore in the second loop "page" will 1139 * no longer be a compound page. 1140 */ 1141 size = page_size(compound_head(page)); 1142 1143 if (behavior == MADV_SOFT_OFFLINE) { 1144 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1145 pfn, start); 1146 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1147 } else { 1148 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1149 pfn, start); 1150 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); 1151 if (ret == -EOPNOTSUPP) 1152 ret = 0; 1153 } 1154 1155 if (ret) 1156 return ret; 1157 } 1158 1159 return 0; 1160 } 1161 #endif 1162 1163 static bool 1164 madvise_behavior_valid(int behavior) 1165 { 1166 switch (behavior) { 1167 case MADV_DOFORK: 1168 case MADV_DONTFORK: 1169 case MADV_NORMAL: 1170 case MADV_SEQUENTIAL: 1171 case MADV_RANDOM: 1172 case MADV_REMOVE: 1173 case MADV_WILLNEED: 1174 case MADV_DONTNEED: 1175 case MADV_DONTNEED_LOCKED: 1176 case MADV_FREE: 1177 case MADV_COLD: 1178 case MADV_PAGEOUT: 1179 case MADV_POPULATE_READ: 1180 case MADV_POPULATE_WRITE: 1181 #ifdef CONFIG_KSM 1182 case MADV_MERGEABLE: 1183 case MADV_UNMERGEABLE: 1184 #endif 1185 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1186 case MADV_HUGEPAGE: 1187 case MADV_NOHUGEPAGE: 1188 case MADV_COLLAPSE: 1189 #endif 1190 case MADV_DONTDUMP: 1191 case MADV_DODUMP: 1192 case MADV_WIPEONFORK: 1193 case MADV_KEEPONFORK: 1194 #ifdef CONFIG_MEMORY_FAILURE 1195 case MADV_SOFT_OFFLINE: 1196 case MADV_HWPOISON: 1197 #endif 1198 return true; 1199 1200 default: 1201 return false; 1202 } 1203 } 1204 1205 static bool process_madvise_behavior_valid(int behavior) 1206 { 1207 switch (behavior) { 1208 case MADV_COLD: 1209 case MADV_PAGEOUT: 1210 case MADV_WILLNEED: 1211 case MADV_COLLAPSE: 1212 return true; 1213 default: 1214 return false; 1215 } 1216 } 1217 1218 /* 1219 * Walk the vmas in range [start,end), and call the visit function on each one. 1220 * The visit function will get start and end parameters that cover the overlap 1221 * between the current vma and the original range. Any unmapped regions in the 1222 * original range will result in this function returning -ENOMEM while still 1223 * calling the visit function on all of the existing vmas in the range. 1224 * Must be called with the mmap_lock held for reading or writing. 1225 */ 1226 static 1227 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1228 unsigned long end, unsigned long arg, 1229 int (*visit)(struct vm_area_struct *vma, 1230 struct vm_area_struct **prev, unsigned long start, 1231 unsigned long end, unsigned long arg)) 1232 { 1233 struct vm_area_struct *vma; 1234 struct vm_area_struct *prev; 1235 unsigned long tmp; 1236 int unmapped_error = 0; 1237 1238 /* 1239 * If the interval [start,end) covers some unmapped address 1240 * ranges, just ignore them, but return -ENOMEM at the end. 1241 * - different from the way of handling in mlock etc. 1242 */ 1243 vma = find_vma_prev(mm, start, &prev); 1244 if (vma && start > vma->vm_start) 1245 prev = vma; 1246 1247 for (;;) { 1248 int error; 1249 1250 /* Still start < end. */ 1251 if (!vma) 1252 return -ENOMEM; 1253 1254 /* Here start < (end|vma->vm_end). */ 1255 if (start < vma->vm_start) { 1256 unmapped_error = -ENOMEM; 1257 start = vma->vm_start; 1258 if (start >= end) 1259 break; 1260 } 1261 1262 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1263 tmp = vma->vm_end; 1264 if (end < tmp) 1265 tmp = end; 1266 1267 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1268 error = visit(vma, &prev, start, tmp, arg); 1269 if (error) 1270 return error; 1271 start = tmp; 1272 if (prev && start < prev->vm_end) 1273 start = prev->vm_end; 1274 if (start >= end) 1275 break; 1276 if (prev) 1277 vma = find_vma(mm, prev->vm_end); 1278 else /* madvise_remove dropped mmap_lock */ 1279 vma = find_vma(mm, start); 1280 } 1281 1282 return unmapped_error; 1283 } 1284 1285 #ifdef CONFIG_ANON_VMA_NAME 1286 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1287 struct vm_area_struct **prev, 1288 unsigned long start, unsigned long end, 1289 unsigned long anon_name) 1290 { 1291 int error; 1292 1293 /* Only anonymous mappings can be named */ 1294 if (vma->vm_file && !vma_is_anon_shmem(vma)) 1295 return -EBADF; 1296 1297 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1298 (struct anon_vma_name *)anon_name); 1299 1300 /* 1301 * madvise() returns EAGAIN if kernel resources, such as 1302 * slab, are temporarily unavailable. 1303 */ 1304 if (error == -ENOMEM) 1305 error = -EAGAIN; 1306 return error; 1307 } 1308 1309 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1310 unsigned long len_in, struct anon_vma_name *anon_name) 1311 { 1312 unsigned long end; 1313 unsigned long len; 1314 1315 if (start & ~PAGE_MASK) 1316 return -EINVAL; 1317 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1318 1319 /* Check to see whether len was rounded up from small -ve to zero */ 1320 if (len_in && !len) 1321 return -EINVAL; 1322 1323 end = start + len; 1324 if (end < start) 1325 return -EINVAL; 1326 1327 if (end == start) 1328 return 0; 1329 1330 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, 1331 madvise_vma_anon_name); 1332 } 1333 #endif /* CONFIG_ANON_VMA_NAME */ 1334 /* 1335 * The madvise(2) system call. 1336 * 1337 * Applications can use madvise() to advise the kernel how it should 1338 * handle paging I/O in this VM area. The idea is to help the kernel 1339 * use appropriate read-ahead and caching techniques. The information 1340 * provided is advisory only, and can be safely disregarded by the 1341 * kernel without affecting the correct operation of the application. 1342 * 1343 * behavior values: 1344 * MADV_NORMAL - the default behavior is to read clusters. This 1345 * results in some read-ahead and read-behind. 1346 * MADV_RANDOM - the system should read the minimum amount of data 1347 * on any access, since it is unlikely that the appli- 1348 * cation will need more than what it asks for. 1349 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1350 * once, so they can be aggressively read ahead, and 1351 * can be freed soon after they are accessed. 1352 * MADV_WILLNEED - the application is notifying the system to read 1353 * some pages ahead. 1354 * MADV_DONTNEED - the application is finished with the given range, 1355 * so the kernel can free resources associated with it. 1356 * MADV_FREE - the application marks pages in the given range as lazy free, 1357 * where actual purges are postponed until memory pressure happens. 1358 * MADV_REMOVE - the application wants to free up the given range of 1359 * pages and associated backing store. 1360 * MADV_DONTFORK - omit this area from child's address space when forking: 1361 * typically, to avoid COWing pages pinned by get_user_pages(). 1362 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1363 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1364 * range after a fork. 1365 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1366 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1367 * were corrupted by unrecoverable hardware memory failure. 1368 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1369 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1370 * this area with pages of identical content from other such areas. 1371 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1372 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1373 * huge pages in the future. Existing pages might be coalesced and 1374 * new pages might be allocated as THP. 1375 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1376 * transparent huge pages so the existing pages will not be 1377 * coalesced into THP and new pages will not be allocated as THP. 1378 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1379 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1380 * from being included in its core dump. 1381 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1382 * MADV_COLD - the application is not expected to use this memory soon, 1383 * deactivate pages in this range so that they can be reclaimed 1384 * easily if memory pressure happens. 1385 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1386 * page out the pages in this range immediately. 1387 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1388 * triggering read faults if required 1389 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1390 * triggering write faults if required 1391 * 1392 * return values: 1393 * zero - success 1394 * -EINVAL - start + len < 0, start is not page-aligned, 1395 * "behavior" is not a valid value, or application 1396 * is attempting to release locked or shared pages, 1397 * or the specified address range includes file, Huge TLB, 1398 * MAP_SHARED or VMPFNMAP range. 1399 * -ENOMEM - addresses in the specified range are not currently 1400 * mapped, or are outside the AS of the process. 1401 * -EIO - an I/O error occurred while paging in data. 1402 * -EBADF - map exists, but area maps something that isn't a file. 1403 * -EAGAIN - a kernel resource was temporarily unavailable. 1404 * -EPERM - memory is sealed. 1405 */ 1406 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1407 { 1408 unsigned long end; 1409 int error; 1410 int write; 1411 size_t len; 1412 struct blk_plug plug; 1413 1414 if (!madvise_behavior_valid(behavior)) 1415 return -EINVAL; 1416 1417 if (!PAGE_ALIGNED(start)) 1418 return -EINVAL; 1419 len = PAGE_ALIGN(len_in); 1420 1421 /* Check to see whether len was rounded up from small -ve to zero */ 1422 if (len_in && !len) 1423 return -EINVAL; 1424 1425 end = start + len; 1426 if (end < start) 1427 return -EINVAL; 1428 1429 if (end == start) 1430 return 0; 1431 1432 #ifdef CONFIG_MEMORY_FAILURE 1433 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) 1434 return madvise_inject_error(behavior, start, start + len_in); 1435 #endif 1436 1437 write = madvise_need_mmap_write(behavior); 1438 if (write) { 1439 if (mmap_write_lock_killable(mm)) 1440 return -EINTR; 1441 } else { 1442 mmap_read_lock(mm); 1443 } 1444 1445 start = untagged_addr_remote(mm, start); 1446 end = start + len; 1447 1448 /* 1449 * Check if the address range is sealed for do_madvise(). 1450 * can_modify_mm_madv assumes we have acquired the lock on MM. 1451 */ 1452 if (unlikely(!can_modify_mm_madv(mm, start, end, behavior))) { 1453 error = -EPERM; 1454 goto out; 1455 } 1456 1457 blk_start_plug(&plug); 1458 switch (behavior) { 1459 case MADV_POPULATE_READ: 1460 case MADV_POPULATE_WRITE: 1461 error = madvise_populate(mm, start, end, behavior); 1462 break; 1463 default: 1464 error = madvise_walk_vmas(mm, start, end, behavior, 1465 madvise_vma_behavior); 1466 break; 1467 } 1468 blk_finish_plug(&plug); 1469 1470 out: 1471 if (write) 1472 mmap_write_unlock(mm); 1473 else 1474 mmap_read_unlock(mm); 1475 1476 return error; 1477 } 1478 1479 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1480 { 1481 return do_madvise(current->mm, start, len_in, behavior); 1482 } 1483 1484 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1485 size_t, vlen, int, behavior, unsigned int, flags) 1486 { 1487 ssize_t ret; 1488 struct iovec iovstack[UIO_FASTIOV]; 1489 struct iovec *iov = iovstack; 1490 struct iov_iter iter; 1491 struct task_struct *task; 1492 struct mm_struct *mm; 1493 size_t total_len; 1494 unsigned int f_flags; 1495 1496 if (flags != 0) { 1497 ret = -EINVAL; 1498 goto out; 1499 } 1500 1501 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1502 if (ret < 0) 1503 goto out; 1504 1505 task = pidfd_get_task(pidfd, &f_flags); 1506 if (IS_ERR(task)) { 1507 ret = PTR_ERR(task); 1508 goto free_iov; 1509 } 1510 1511 if (!process_madvise_behavior_valid(behavior)) { 1512 ret = -EINVAL; 1513 goto release_task; 1514 } 1515 1516 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1517 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1518 if (IS_ERR_OR_NULL(mm)) { 1519 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 1520 goto release_task; 1521 } 1522 1523 /* 1524 * Require CAP_SYS_NICE for influencing process performance. Note that 1525 * only non-destructive hints are currently supported. 1526 */ 1527 if (!capable(CAP_SYS_NICE)) { 1528 ret = -EPERM; 1529 goto release_mm; 1530 } 1531 1532 total_len = iov_iter_count(&iter); 1533 1534 while (iov_iter_count(&iter)) { 1535 ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter), 1536 iter_iov_len(&iter), behavior); 1537 if (ret < 0) 1538 break; 1539 iov_iter_advance(&iter, iter_iov_len(&iter)); 1540 } 1541 1542 ret = (total_len - iov_iter_count(&iter)) ? : ret; 1543 1544 release_mm: 1545 mmput(mm); 1546 release_task: 1547 put_task_struct(task); 1548 free_iov: 1549 kfree(iov); 1550 out: 1551 return ret; 1552 } 1553