1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 struct madvise_walk_private { 41 struct mmu_gather *tlb; 42 bool pageout; 43 }; 44 45 /* 46 * Any behaviour which results in changes to the vma->vm_flags needs to 47 * take mmap_lock for writing. Others, which simply traverse vmas, need 48 * to only take it for reading. 49 */ 50 static int madvise_need_mmap_write(int behavior) 51 { 52 switch (behavior) { 53 case MADV_REMOVE: 54 case MADV_WILLNEED: 55 case MADV_DONTNEED: 56 case MADV_DONTNEED_LOCKED: 57 case MADV_COLD: 58 case MADV_PAGEOUT: 59 case MADV_FREE: 60 case MADV_POPULATE_READ: 61 case MADV_POPULATE_WRITE: 62 case MADV_COLLAPSE: 63 return 0; 64 default: 65 /* be safe, default to 1. list exceptions explicitly */ 66 return 1; 67 } 68 } 69 70 #ifdef CONFIG_ANON_VMA_NAME 71 struct anon_vma_name *anon_vma_name_alloc(const char *name) 72 { 73 struct anon_vma_name *anon_name; 74 size_t count; 75 76 /* Add 1 for NUL terminator at the end of the anon_name->name */ 77 count = strlen(name) + 1; 78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 79 if (anon_name) { 80 kref_init(&anon_name->kref); 81 memcpy(anon_name->name, name, count); 82 } 83 84 return anon_name; 85 } 86 87 void anon_vma_name_free(struct kref *kref) 88 { 89 struct anon_vma_name *anon_name = 90 container_of(kref, struct anon_vma_name, kref); 91 kfree(anon_name); 92 } 93 94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 95 { 96 mmap_assert_locked(vma->vm_mm); 97 98 return vma->anon_name; 99 } 100 101 /* mmap_lock should be write-locked */ 102 static int replace_anon_vma_name(struct vm_area_struct *vma, 103 struct anon_vma_name *anon_name) 104 { 105 struct anon_vma_name *orig_name = anon_vma_name(vma); 106 107 if (!anon_name) { 108 vma->anon_name = NULL; 109 anon_vma_name_put(orig_name); 110 return 0; 111 } 112 113 if (anon_vma_name_eq(orig_name, anon_name)) 114 return 0; 115 116 vma->anon_name = anon_vma_name_reuse(anon_name); 117 anon_vma_name_put(orig_name); 118 119 return 0; 120 } 121 #else /* CONFIG_ANON_VMA_NAME */ 122 static int replace_anon_vma_name(struct vm_area_struct *vma, 123 struct anon_vma_name *anon_name) 124 { 125 if (anon_name) 126 return -EINVAL; 127 128 return 0; 129 } 130 #endif /* CONFIG_ANON_VMA_NAME */ 131 /* 132 * Update the vm_flags on region of a vma, splitting it or merging it as 133 * necessary. Must be called with mmap_lock held for writing; 134 * Caller should ensure anon_name stability by raising its refcount even when 135 * anon_name belongs to a valid vma because this function might free that vma. 136 */ 137 static int madvise_update_vma(struct vm_area_struct *vma, 138 struct vm_area_struct **prev, unsigned long start, 139 unsigned long end, unsigned long new_flags, 140 struct anon_vma_name *anon_name) 141 { 142 struct mm_struct *mm = vma->vm_mm; 143 int error; 144 VMA_ITERATOR(vmi, mm, start); 145 146 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 147 *prev = vma; 148 return 0; 149 } 150 151 vma = vma_modify_flags_name(&vmi, *prev, vma, start, end, new_flags, 152 anon_name); 153 if (IS_ERR(vma)) 154 return PTR_ERR(vma); 155 156 *prev = vma; 157 158 /* vm_flags is protected by the mmap_lock held in write mode. */ 159 vma_start_write(vma); 160 vm_flags_reset(vma, new_flags); 161 if (!vma->vm_file || vma_is_anon_shmem(vma)) { 162 error = replace_anon_vma_name(vma, anon_name); 163 if (error) 164 return error; 165 } 166 167 return 0; 168 } 169 170 #ifdef CONFIG_SWAP 171 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 172 unsigned long end, struct mm_walk *walk) 173 { 174 struct vm_area_struct *vma = walk->private; 175 struct swap_iocb *splug = NULL; 176 pte_t *ptep = NULL; 177 spinlock_t *ptl; 178 unsigned long addr; 179 180 for (addr = start; addr < end; addr += PAGE_SIZE) { 181 pte_t pte; 182 swp_entry_t entry; 183 struct folio *folio; 184 185 if (!ptep++) { 186 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 187 if (!ptep) 188 break; 189 } 190 191 pte = ptep_get(ptep); 192 if (!is_swap_pte(pte)) 193 continue; 194 entry = pte_to_swp_entry(pte); 195 if (unlikely(non_swap_entry(entry))) 196 continue; 197 198 pte_unmap_unlock(ptep, ptl); 199 ptep = NULL; 200 201 folio = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 202 vma, addr, &splug); 203 if (folio) 204 folio_put(folio); 205 } 206 207 if (ptep) 208 pte_unmap_unlock(ptep, ptl); 209 swap_read_unplug(splug); 210 cond_resched(); 211 212 return 0; 213 } 214 215 static const struct mm_walk_ops swapin_walk_ops = { 216 .pmd_entry = swapin_walk_pmd_entry, 217 .walk_lock = PGWALK_RDLOCK, 218 }; 219 220 static void shmem_swapin_range(struct vm_area_struct *vma, 221 unsigned long start, unsigned long end, 222 struct address_space *mapping) 223 { 224 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 225 pgoff_t end_index = linear_page_index(vma, end) - 1; 226 struct folio *folio; 227 struct swap_iocb *splug = NULL; 228 229 rcu_read_lock(); 230 xas_for_each(&xas, folio, end_index) { 231 unsigned long addr; 232 swp_entry_t entry; 233 234 if (!xa_is_value(folio)) 235 continue; 236 entry = radix_to_swp_entry(folio); 237 /* There might be swapin error entries in shmem mapping. */ 238 if (non_swap_entry(entry)) 239 continue; 240 241 addr = vma->vm_start + 242 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); 243 xas_pause(&xas); 244 rcu_read_unlock(); 245 246 folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping), 247 vma, addr, &splug); 248 if (folio) 249 folio_put(folio); 250 251 rcu_read_lock(); 252 } 253 rcu_read_unlock(); 254 swap_read_unplug(splug); 255 } 256 #endif /* CONFIG_SWAP */ 257 258 /* 259 * Schedule all required I/O operations. Do not wait for completion. 260 */ 261 static long madvise_willneed(struct vm_area_struct *vma, 262 struct vm_area_struct **prev, 263 unsigned long start, unsigned long end) 264 { 265 struct mm_struct *mm = vma->vm_mm; 266 struct file *file = vma->vm_file; 267 loff_t offset; 268 269 *prev = vma; 270 #ifdef CONFIG_SWAP 271 if (!file) { 272 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 273 lru_add_drain(); /* Push any new pages onto the LRU now */ 274 return 0; 275 } 276 277 if (shmem_mapping(file->f_mapping)) { 278 shmem_swapin_range(vma, start, end, file->f_mapping); 279 lru_add_drain(); /* Push any new pages onto the LRU now */ 280 return 0; 281 } 282 #else 283 if (!file) 284 return -EBADF; 285 #endif 286 287 if (IS_DAX(file_inode(file))) { 288 /* no bad return value, but ignore advice */ 289 return 0; 290 } 291 292 /* 293 * Filesystem's fadvise may need to take various locks. We need to 294 * explicitly grab a reference because the vma (and hence the 295 * vma's reference to the file) can go away as soon as we drop 296 * mmap_lock. 297 */ 298 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 299 get_file(file); 300 offset = (loff_t)(start - vma->vm_start) 301 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 302 mmap_read_unlock(mm); 303 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 304 fput(file); 305 mmap_read_lock(mm); 306 return 0; 307 } 308 309 static inline bool can_do_file_pageout(struct vm_area_struct *vma) 310 { 311 if (!vma->vm_file) 312 return false; 313 /* 314 * paging out pagecache only for non-anonymous mappings that correspond 315 * to the files the calling process could (if tried) open for writing; 316 * otherwise we'd be including shared non-exclusive mappings, which 317 * opens a side channel. 318 */ 319 return inode_owner_or_capable(&nop_mnt_idmap, 320 file_inode(vma->vm_file)) || 321 file_permission(vma->vm_file, MAY_WRITE) == 0; 322 } 323 324 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 325 unsigned long addr, unsigned long end, 326 struct mm_walk *walk) 327 { 328 struct madvise_walk_private *private = walk->private; 329 struct mmu_gather *tlb = private->tlb; 330 bool pageout = private->pageout; 331 struct mm_struct *mm = tlb->mm; 332 struct vm_area_struct *vma = walk->vma; 333 pte_t *start_pte, *pte, ptent; 334 spinlock_t *ptl; 335 struct folio *folio = NULL; 336 LIST_HEAD(folio_list); 337 bool pageout_anon_only_filter; 338 unsigned int batch_count = 0; 339 340 if (fatal_signal_pending(current)) 341 return -EINTR; 342 343 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && 344 !can_do_file_pageout(vma); 345 346 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 347 if (pmd_trans_huge(*pmd)) { 348 pmd_t orig_pmd; 349 unsigned long next = pmd_addr_end(addr, end); 350 351 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 352 ptl = pmd_trans_huge_lock(pmd, vma); 353 if (!ptl) 354 return 0; 355 356 orig_pmd = *pmd; 357 if (is_huge_zero_pmd(orig_pmd)) 358 goto huge_unlock; 359 360 if (unlikely(!pmd_present(orig_pmd))) { 361 VM_BUG_ON(thp_migration_supported() && 362 !is_pmd_migration_entry(orig_pmd)); 363 goto huge_unlock; 364 } 365 366 folio = pfn_folio(pmd_pfn(orig_pmd)); 367 368 /* Do not interfere with other mappings of this folio */ 369 if (folio_estimated_sharers(folio) != 1) 370 goto huge_unlock; 371 372 if (pageout_anon_only_filter && !folio_test_anon(folio)) 373 goto huge_unlock; 374 375 if (next - addr != HPAGE_PMD_SIZE) { 376 int err; 377 378 folio_get(folio); 379 spin_unlock(ptl); 380 folio_lock(folio); 381 err = split_folio(folio); 382 folio_unlock(folio); 383 folio_put(folio); 384 if (!err) 385 goto regular_folio; 386 return 0; 387 } 388 389 if (pmd_young(orig_pmd)) { 390 pmdp_invalidate(vma, addr, pmd); 391 orig_pmd = pmd_mkold(orig_pmd); 392 393 set_pmd_at(mm, addr, pmd, orig_pmd); 394 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 395 } 396 397 folio_clear_referenced(folio); 398 folio_test_clear_young(folio); 399 if (folio_test_active(folio)) 400 folio_set_workingset(folio); 401 if (pageout) { 402 if (folio_isolate_lru(folio)) { 403 if (folio_test_unevictable(folio)) 404 folio_putback_lru(folio); 405 else 406 list_add(&folio->lru, &folio_list); 407 } 408 } else 409 folio_deactivate(folio); 410 huge_unlock: 411 spin_unlock(ptl); 412 if (pageout) 413 reclaim_pages(&folio_list); 414 return 0; 415 } 416 417 regular_folio: 418 #endif 419 tlb_change_page_size(tlb, PAGE_SIZE); 420 restart: 421 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 422 if (!start_pte) 423 return 0; 424 flush_tlb_batched_pending(mm); 425 arch_enter_lazy_mmu_mode(); 426 for (; addr < end; pte++, addr += PAGE_SIZE) { 427 ptent = ptep_get(pte); 428 429 if (++batch_count == SWAP_CLUSTER_MAX) { 430 batch_count = 0; 431 if (need_resched()) { 432 pte_unmap_unlock(start_pte, ptl); 433 cond_resched(); 434 goto restart; 435 } 436 } 437 438 if (pte_none(ptent)) 439 continue; 440 441 if (!pte_present(ptent)) 442 continue; 443 444 folio = vm_normal_folio(vma, addr, ptent); 445 if (!folio || folio_is_zone_device(folio)) 446 continue; 447 448 /* 449 * Creating a THP page is expensive so split it only if we 450 * are sure it's worth. Split it if we are only owner. 451 */ 452 if (folio_test_large(folio)) { 453 int err; 454 455 if (folio_estimated_sharers(folio) != 1) 456 break; 457 if (pageout_anon_only_filter && !folio_test_anon(folio)) 458 break; 459 if (!folio_trylock(folio)) 460 break; 461 folio_get(folio); 462 arch_leave_lazy_mmu_mode(); 463 pte_unmap_unlock(start_pte, ptl); 464 start_pte = NULL; 465 err = split_folio(folio); 466 folio_unlock(folio); 467 folio_put(folio); 468 if (err) 469 break; 470 start_pte = pte = 471 pte_offset_map_lock(mm, pmd, addr, &ptl); 472 if (!start_pte) 473 break; 474 arch_enter_lazy_mmu_mode(); 475 pte--; 476 addr -= PAGE_SIZE; 477 continue; 478 } 479 480 /* 481 * Do not interfere with other mappings of this folio and 482 * non-LRU folio. 483 */ 484 if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) 485 continue; 486 487 if (pageout_anon_only_filter && !folio_test_anon(folio)) 488 continue; 489 490 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 491 492 if (pte_young(ptent)) { 493 ptent = ptep_get_and_clear_full(mm, addr, pte, 494 tlb->fullmm); 495 ptent = pte_mkold(ptent); 496 set_pte_at(mm, addr, pte, ptent); 497 tlb_remove_tlb_entry(tlb, pte, addr); 498 } 499 500 /* 501 * We are deactivating a folio for accelerating reclaiming. 502 * VM couldn't reclaim the folio unless we clear PG_young. 503 * As a side effect, it makes confuse idle-page tracking 504 * because they will miss recent referenced history. 505 */ 506 folio_clear_referenced(folio); 507 folio_test_clear_young(folio); 508 if (folio_test_active(folio)) 509 folio_set_workingset(folio); 510 if (pageout) { 511 if (folio_isolate_lru(folio)) { 512 if (folio_test_unevictable(folio)) 513 folio_putback_lru(folio); 514 else 515 list_add(&folio->lru, &folio_list); 516 } 517 } else 518 folio_deactivate(folio); 519 } 520 521 if (start_pte) { 522 arch_leave_lazy_mmu_mode(); 523 pte_unmap_unlock(start_pte, ptl); 524 } 525 if (pageout) 526 reclaim_pages(&folio_list); 527 cond_resched(); 528 529 return 0; 530 } 531 532 static const struct mm_walk_ops cold_walk_ops = { 533 .pmd_entry = madvise_cold_or_pageout_pte_range, 534 .walk_lock = PGWALK_RDLOCK, 535 }; 536 537 static void madvise_cold_page_range(struct mmu_gather *tlb, 538 struct vm_area_struct *vma, 539 unsigned long addr, unsigned long end) 540 { 541 struct madvise_walk_private walk_private = { 542 .pageout = false, 543 .tlb = tlb, 544 }; 545 546 tlb_start_vma(tlb, vma); 547 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 548 tlb_end_vma(tlb, vma); 549 } 550 551 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 552 { 553 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 554 } 555 556 static long madvise_cold(struct vm_area_struct *vma, 557 struct vm_area_struct **prev, 558 unsigned long start_addr, unsigned long end_addr) 559 { 560 struct mm_struct *mm = vma->vm_mm; 561 struct mmu_gather tlb; 562 563 *prev = vma; 564 if (!can_madv_lru_vma(vma)) 565 return -EINVAL; 566 567 lru_add_drain(); 568 tlb_gather_mmu(&tlb, mm); 569 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 570 tlb_finish_mmu(&tlb); 571 572 return 0; 573 } 574 575 static void madvise_pageout_page_range(struct mmu_gather *tlb, 576 struct vm_area_struct *vma, 577 unsigned long addr, unsigned long end) 578 { 579 struct madvise_walk_private walk_private = { 580 .pageout = true, 581 .tlb = tlb, 582 }; 583 584 tlb_start_vma(tlb, vma); 585 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 586 tlb_end_vma(tlb, vma); 587 } 588 589 static long madvise_pageout(struct vm_area_struct *vma, 590 struct vm_area_struct **prev, 591 unsigned long start_addr, unsigned long end_addr) 592 { 593 struct mm_struct *mm = vma->vm_mm; 594 struct mmu_gather tlb; 595 596 *prev = vma; 597 if (!can_madv_lru_vma(vma)) 598 return -EINVAL; 599 600 /* 601 * If the VMA belongs to a private file mapping, there can be private 602 * dirty pages which can be paged out if even this process is neither 603 * owner nor write capable of the file. We allow private file mappings 604 * further to pageout dirty anon pages. 605 */ 606 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && 607 (vma->vm_flags & VM_MAYSHARE))) 608 return 0; 609 610 lru_add_drain(); 611 tlb_gather_mmu(&tlb, mm); 612 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 613 tlb_finish_mmu(&tlb); 614 615 return 0; 616 } 617 618 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 619 unsigned long end, struct mm_walk *walk) 620 621 { 622 struct mmu_gather *tlb = walk->private; 623 struct mm_struct *mm = tlb->mm; 624 struct vm_area_struct *vma = walk->vma; 625 spinlock_t *ptl; 626 pte_t *start_pte, *pte, ptent; 627 struct folio *folio; 628 int nr_swap = 0; 629 unsigned long next; 630 631 next = pmd_addr_end(addr, end); 632 if (pmd_trans_huge(*pmd)) 633 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 634 return 0; 635 636 tlb_change_page_size(tlb, PAGE_SIZE); 637 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 638 if (!start_pte) 639 return 0; 640 flush_tlb_batched_pending(mm); 641 arch_enter_lazy_mmu_mode(); 642 for (; addr != end; pte++, addr += PAGE_SIZE) { 643 ptent = ptep_get(pte); 644 645 if (pte_none(ptent)) 646 continue; 647 /* 648 * If the pte has swp_entry, just clear page table to 649 * prevent swap-in which is more expensive rather than 650 * (page allocation + zeroing). 651 */ 652 if (!pte_present(ptent)) { 653 swp_entry_t entry; 654 655 entry = pte_to_swp_entry(ptent); 656 if (!non_swap_entry(entry)) { 657 nr_swap--; 658 free_swap_and_cache(entry); 659 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 660 } else if (is_hwpoison_entry(entry) || 661 is_poisoned_swp_entry(entry)) { 662 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 663 } 664 continue; 665 } 666 667 folio = vm_normal_folio(vma, addr, ptent); 668 if (!folio || folio_is_zone_device(folio)) 669 continue; 670 671 /* 672 * If pmd isn't transhuge but the folio is large and 673 * is owned by only this process, split it and 674 * deactivate all pages. 675 */ 676 if (folio_test_large(folio)) { 677 int err; 678 679 if (folio_estimated_sharers(folio) != 1) 680 break; 681 if (!folio_trylock(folio)) 682 break; 683 folio_get(folio); 684 arch_leave_lazy_mmu_mode(); 685 pte_unmap_unlock(start_pte, ptl); 686 start_pte = NULL; 687 err = split_folio(folio); 688 folio_unlock(folio); 689 folio_put(folio); 690 if (err) 691 break; 692 start_pte = pte = 693 pte_offset_map_lock(mm, pmd, addr, &ptl); 694 if (!start_pte) 695 break; 696 arch_enter_lazy_mmu_mode(); 697 pte--; 698 addr -= PAGE_SIZE; 699 continue; 700 } 701 702 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 703 if (!folio_trylock(folio)) 704 continue; 705 /* 706 * If folio is shared with others, we mustn't clear 707 * the folio's dirty flag. 708 */ 709 if (folio_mapcount(folio) != 1) { 710 folio_unlock(folio); 711 continue; 712 } 713 714 if (folio_test_swapcache(folio) && 715 !folio_free_swap(folio)) { 716 folio_unlock(folio); 717 continue; 718 } 719 720 folio_clear_dirty(folio); 721 folio_unlock(folio); 722 } 723 724 if (pte_young(ptent) || pte_dirty(ptent)) { 725 /* 726 * Some of architecture(ex, PPC) don't update TLB 727 * with set_pte_at and tlb_remove_tlb_entry so for 728 * the portability, remap the pte with old|clean 729 * after pte clearing. 730 */ 731 ptent = ptep_get_and_clear_full(mm, addr, pte, 732 tlb->fullmm); 733 734 ptent = pte_mkold(ptent); 735 ptent = pte_mkclean(ptent); 736 set_pte_at(mm, addr, pte, ptent); 737 tlb_remove_tlb_entry(tlb, pte, addr); 738 } 739 folio_mark_lazyfree(folio); 740 } 741 742 if (nr_swap) 743 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 744 if (start_pte) { 745 arch_leave_lazy_mmu_mode(); 746 pte_unmap_unlock(start_pte, ptl); 747 } 748 cond_resched(); 749 750 return 0; 751 } 752 753 static const struct mm_walk_ops madvise_free_walk_ops = { 754 .pmd_entry = madvise_free_pte_range, 755 .walk_lock = PGWALK_RDLOCK, 756 }; 757 758 static int madvise_free_single_vma(struct vm_area_struct *vma, 759 unsigned long start_addr, unsigned long end_addr) 760 { 761 struct mm_struct *mm = vma->vm_mm; 762 struct mmu_notifier_range range; 763 struct mmu_gather tlb; 764 765 /* MADV_FREE works for only anon vma at the moment */ 766 if (!vma_is_anonymous(vma)) 767 return -EINVAL; 768 769 range.start = max(vma->vm_start, start_addr); 770 if (range.start >= vma->vm_end) 771 return -EINVAL; 772 range.end = min(vma->vm_end, end_addr); 773 if (range.end <= vma->vm_start) 774 return -EINVAL; 775 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 776 range.start, range.end); 777 778 lru_add_drain(); 779 tlb_gather_mmu(&tlb, mm); 780 update_hiwater_rss(mm); 781 782 mmu_notifier_invalidate_range_start(&range); 783 tlb_start_vma(&tlb, vma); 784 walk_page_range(vma->vm_mm, range.start, range.end, 785 &madvise_free_walk_ops, &tlb); 786 tlb_end_vma(&tlb, vma); 787 mmu_notifier_invalidate_range_end(&range); 788 tlb_finish_mmu(&tlb); 789 790 return 0; 791 } 792 793 /* 794 * Application no longer needs these pages. If the pages are dirty, 795 * it's OK to just throw them away. The app will be more careful about 796 * data it wants to keep. Be sure to free swap resources too. The 797 * zap_page_range_single call sets things up for shrink_active_list to actually 798 * free these pages later if no one else has touched them in the meantime, 799 * although we could add these pages to a global reuse list for 800 * shrink_active_list to pick up before reclaiming other pages. 801 * 802 * NB: This interface discards data rather than pushes it out to swap, 803 * as some implementations do. This has performance implications for 804 * applications like large transactional databases which want to discard 805 * pages in anonymous maps after committing to backing store the data 806 * that was kept in them. There is no reason to write this data out to 807 * the swap area if the application is discarding it. 808 * 809 * An interface that causes the system to free clean pages and flush 810 * dirty pages is already available as msync(MS_INVALIDATE). 811 */ 812 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, 813 unsigned long start, unsigned long end) 814 { 815 zap_page_range_single(vma, start, end - start, NULL); 816 return 0; 817 } 818 819 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 820 unsigned long start, 821 unsigned long *end, 822 int behavior) 823 { 824 if (!is_vm_hugetlb_page(vma)) { 825 unsigned int forbidden = VM_PFNMAP; 826 827 if (behavior != MADV_DONTNEED_LOCKED) 828 forbidden |= VM_LOCKED; 829 830 return !(vma->vm_flags & forbidden); 831 } 832 833 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 834 return false; 835 if (start & ~huge_page_mask(hstate_vma(vma))) 836 return false; 837 838 /* 839 * Madvise callers expect the length to be rounded up to PAGE_SIZE 840 * boundaries, and may be unaware that this VMA uses huge pages. 841 * Avoid unexpected data loss by rounding down the number of 842 * huge pages freed. 843 */ 844 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); 845 846 return true; 847 } 848 849 static long madvise_dontneed_free(struct vm_area_struct *vma, 850 struct vm_area_struct **prev, 851 unsigned long start, unsigned long end, 852 int behavior) 853 { 854 struct mm_struct *mm = vma->vm_mm; 855 856 *prev = vma; 857 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 858 return -EINVAL; 859 860 if (start == end) 861 return 0; 862 863 if (!userfaultfd_remove(vma, start, end)) { 864 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 865 866 mmap_read_lock(mm); 867 vma = vma_lookup(mm, start); 868 if (!vma) 869 return -ENOMEM; 870 /* 871 * Potential end adjustment for hugetlb vma is OK as 872 * the check below keeps end within vma. 873 */ 874 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 875 behavior)) 876 return -EINVAL; 877 if (end > vma->vm_end) { 878 /* 879 * Don't fail if end > vma->vm_end. If the old 880 * vma was split while the mmap_lock was 881 * released the effect of the concurrent 882 * operation may not cause madvise() to 883 * have an undefined result. There may be an 884 * adjacent next vma that we'll walk 885 * next. userfaultfd_remove() will generate an 886 * UFFD_EVENT_REMOVE repetition on the 887 * end-vma->vm_end range, but the manager can 888 * handle a repetition fine. 889 */ 890 end = vma->vm_end; 891 } 892 VM_WARN_ON(start >= end); 893 } 894 895 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 896 return madvise_dontneed_single_vma(vma, start, end); 897 else if (behavior == MADV_FREE) 898 return madvise_free_single_vma(vma, start, end); 899 else 900 return -EINVAL; 901 } 902 903 static long madvise_populate(struct vm_area_struct *vma, 904 struct vm_area_struct **prev, 905 unsigned long start, unsigned long end, 906 int behavior) 907 { 908 const bool write = behavior == MADV_POPULATE_WRITE; 909 struct mm_struct *mm = vma->vm_mm; 910 unsigned long tmp_end; 911 int locked = 1; 912 long pages; 913 914 *prev = vma; 915 916 while (start < end) { 917 /* 918 * We might have temporarily dropped the lock. For example, 919 * our VMA might have been split. 920 */ 921 if (!vma || start >= vma->vm_end) { 922 vma = vma_lookup(mm, start); 923 if (!vma) 924 return -ENOMEM; 925 } 926 927 tmp_end = min_t(unsigned long, end, vma->vm_end); 928 /* Populate (prefault) page tables readable/writable. */ 929 pages = faultin_vma_page_range(vma, start, tmp_end, write, 930 &locked); 931 if (!locked) { 932 mmap_read_lock(mm); 933 locked = 1; 934 *prev = NULL; 935 vma = NULL; 936 } 937 if (pages < 0) { 938 switch (pages) { 939 case -EINTR: 940 return -EINTR; 941 case -EINVAL: /* Incompatible mappings / permissions. */ 942 return -EINVAL; 943 case -EHWPOISON: 944 return -EHWPOISON; 945 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 946 return -EFAULT; 947 default: 948 pr_warn_once("%s: unhandled return value: %ld\n", 949 __func__, pages); 950 fallthrough; 951 case -ENOMEM: 952 return -ENOMEM; 953 } 954 } 955 start += pages * PAGE_SIZE; 956 } 957 return 0; 958 } 959 960 /* 961 * Application wants to free up the pages and associated backing store. 962 * This is effectively punching a hole into the middle of a file. 963 */ 964 static long madvise_remove(struct vm_area_struct *vma, 965 struct vm_area_struct **prev, 966 unsigned long start, unsigned long end) 967 { 968 loff_t offset; 969 int error; 970 struct file *f; 971 struct mm_struct *mm = vma->vm_mm; 972 973 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 974 975 if (vma->vm_flags & VM_LOCKED) 976 return -EINVAL; 977 978 f = vma->vm_file; 979 980 if (!f || !f->f_mapping || !f->f_mapping->host) { 981 return -EINVAL; 982 } 983 984 if (!vma_is_shared_maywrite(vma)) 985 return -EACCES; 986 987 offset = (loff_t)(start - vma->vm_start) 988 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 989 990 /* 991 * Filesystem's fallocate may need to take i_rwsem. We need to 992 * explicitly grab a reference because the vma (and hence the 993 * vma's reference to the file) can go away as soon as we drop 994 * mmap_lock. 995 */ 996 get_file(f); 997 if (userfaultfd_remove(vma, start, end)) { 998 /* mmap_lock was not released by userfaultfd_remove() */ 999 mmap_read_unlock(mm); 1000 } 1001 error = vfs_fallocate(f, 1002 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1003 offset, end - start); 1004 fput(f); 1005 mmap_read_lock(mm); 1006 return error; 1007 } 1008 1009 /* 1010 * Apply an madvise behavior to a region of a vma. madvise_update_vma 1011 * will handle splitting a vm area into separate areas, each area with its own 1012 * behavior. 1013 */ 1014 static int madvise_vma_behavior(struct vm_area_struct *vma, 1015 struct vm_area_struct **prev, 1016 unsigned long start, unsigned long end, 1017 unsigned long behavior) 1018 { 1019 int error; 1020 struct anon_vma_name *anon_name; 1021 unsigned long new_flags = vma->vm_flags; 1022 1023 switch (behavior) { 1024 case MADV_REMOVE: 1025 return madvise_remove(vma, prev, start, end); 1026 case MADV_WILLNEED: 1027 return madvise_willneed(vma, prev, start, end); 1028 case MADV_COLD: 1029 return madvise_cold(vma, prev, start, end); 1030 case MADV_PAGEOUT: 1031 return madvise_pageout(vma, prev, start, end); 1032 case MADV_FREE: 1033 case MADV_DONTNEED: 1034 case MADV_DONTNEED_LOCKED: 1035 return madvise_dontneed_free(vma, prev, start, end, behavior); 1036 case MADV_POPULATE_READ: 1037 case MADV_POPULATE_WRITE: 1038 return madvise_populate(vma, prev, start, end, behavior); 1039 case MADV_NORMAL: 1040 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1041 break; 1042 case MADV_SEQUENTIAL: 1043 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1044 break; 1045 case MADV_RANDOM: 1046 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1047 break; 1048 case MADV_DONTFORK: 1049 new_flags |= VM_DONTCOPY; 1050 break; 1051 case MADV_DOFORK: 1052 if (vma->vm_flags & VM_IO) 1053 return -EINVAL; 1054 new_flags &= ~VM_DONTCOPY; 1055 break; 1056 case MADV_WIPEONFORK: 1057 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1058 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1059 return -EINVAL; 1060 new_flags |= VM_WIPEONFORK; 1061 break; 1062 case MADV_KEEPONFORK: 1063 new_flags &= ~VM_WIPEONFORK; 1064 break; 1065 case MADV_DONTDUMP: 1066 new_flags |= VM_DONTDUMP; 1067 break; 1068 case MADV_DODUMP: 1069 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) 1070 return -EINVAL; 1071 new_flags &= ~VM_DONTDUMP; 1072 break; 1073 case MADV_MERGEABLE: 1074 case MADV_UNMERGEABLE: 1075 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1076 if (error) 1077 goto out; 1078 break; 1079 case MADV_HUGEPAGE: 1080 case MADV_NOHUGEPAGE: 1081 error = hugepage_madvise(vma, &new_flags, behavior); 1082 if (error) 1083 goto out; 1084 break; 1085 case MADV_COLLAPSE: 1086 return madvise_collapse(vma, prev, start, end); 1087 } 1088 1089 anon_name = anon_vma_name(vma); 1090 anon_vma_name_get(anon_name); 1091 error = madvise_update_vma(vma, prev, start, end, new_flags, 1092 anon_name); 1093 anon_vma_name_put(anon_name); 1094 1095 out: 1096 /* 1097 * madvise() returns EAGAIN if kernel resources, such as 1098 * slab, are temporarily unavailable. 1099 */ 1100 if (error == -ENOMEM) 1101 error = -EAGAIN; 1102 return error; 1103 } 1104 1105 #ifdef CONFIG_MEMORY_FAILURE 1106 /* 1107 * Error injection support for memory error handling. 1108 */ 1109 static int madvise_inject_error(int behavior, 1110 unsigned long start, unsigned long end) 1111 { 1112 unsigned long size; 1113 1114 if (!capable(CAP_SYS_ADMIN)) 1115 return -EPERM; 1116 1117 1118 for (; start < end; start += size) { 1119 unsigned long pfn; 1120 struct page *page; 1121 int ret; 1122 1123 ret = get_user_pages_fast(start, 1, 0, &page); 1124 if (ret != 1) 1125 return ret; 1126 pfn = page_to_pfn(page); 1127 1128 /* 1129 * When soft offlining hugepages, after migrating the page 1130 * we dissolve it, therefore in the second loop "page" will 1131 * no longer be a compound page. 1132 */ 1133 size = page_size(compound_head(page)); 1134 1135 if (behavior == MADV_SOFT_OFFLINE) { 1136 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1137 pfn, start); 1138 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1139 } else { 1140 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1141 pfn, start); 1142 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); 1143 if (ret == -EOPNOTSUPP) 1144 ret = 0; 1145 } 1146 1147 if (ret) 1148 return ret; 1149 } 1150 1151 return 0; 1152 } 1153 #endif 1154 1155 static bool 1156 madvise_behavior_valid(int behavior) 1157 { 1158 switch (behavior) { 1159 case MADV_DOFORK: 1160 case MADV_DONTFORK: 1161 case MADV_NORMAL: 1162 case MADV_SEQUENTIAL: 1163 case MADV_RANDOM: 1164 case MADV_REMOVE: 1165 case MADV_WILLNEED: 1166 case MADV_DONTNEED: 1167 case MADV_DONTNEED_LOCKED: 1168 case MADV_FREE: 1169 case MADV_COLD: 1170 case MADV_PAGEOUT: 1171 case MADV_POPULATE_READ: 1172 case MADV_POPULATE_WRITE: 1173 #ifdef CONFIG_KSM 1174 case MADV_MERGEABLE: 1175 case MADV_UNMERGEABLE: 1176 #endif 1177 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1178 case MADV_HUGEPAGE: 1179 case MADV_NOHUGEPAGE: 1180 case MADV_COLLAPSE: 1181 #endif 1182 case MADV_DONTDUMP: 1183 case MADV_DODUMP: 1184 case MADV_WIPEONFORK: 1185 case MADV_KEEPONFORK: 1186 #ifdef CONFIG_MEMORY_FAILURE 1187 case MADV_SOFT_OFFLINE: 1188 case MADV_HWPOISON: 1189 #endif 1190 return true; 1191 1192 default: 1193 return false; 1194 } 1195 } 1196 1197 static bool process_madvise_behavior_valid(int behavior) 1198 { 1199 switch (behavior) { 1200 case MADV_COLD: 1201 case MADV_PAGEOUT: 1202 case MADV_WILLNEED: 1203 case MADV_COLLAPSE: 1204 return true; 1205 default: 1206 return false; 1207 } 1208 } 1209 1210 /* 1211 * Walk the vmas in range [start,end), and call the visit function on each one. 1212 * The visit function will get start and end parameters that cover the overlap 1213 * between the current vma and the original range. Any unmapped regions in the 1214 * original range will result in this function returning -ENOMEM while still 1215 * calling the visit function on all of the existing vmas in the range. 1216 * Must be called with the mmap_lock held for reading or writing. 1217 */ 1218 static 1219 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1220 unsigned long end, unsigned long arg, 1221 int (*visit)(struct vm_area_struct *vma, 1222 struct vm_area_struct **prev, unsigned long start, 1223 unsigned long end, unsigned long arg)) 1224 { 1225 struct vm_area_struct *vma; 1226 struct vm_area_struct *prev; 1227 unsigned long tmp; 1228 int unmapped_error = 0; 1229 1230 /* 1231 * If the interval [start,end) covers some unmapped address 1232 * ranges, just ignore them, but return -ENOMEM at the end. 1233 * - different from the way of handling in mlock etc. 1234 */ 1235 vma = find_vma_prev(mm, start, &prev); 1236 if (vma && start > vma->vm_start) 1237 prev = vma; 1238 1239 for (;;) { 1240 int error; 1241 1242 /* Still start < end. */ 1243 if (!vma) 1244 return -ENOMEM; 1245 1246 /* Here start < (end|vma->vm_end). */ 1247 if (start < vma->vm_start) { 1248 unmapped_error = -ENOMEM; 1249 start = vma->vm_start; 1250 if (start >= end) 1251 break; 1252 } 1253 1254 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1255 tmp = vma->vm_end; 1256 if (end < tmp) 1257 tmp = end; 1258 1259 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1260 error = visit(vma, &prev, start, tmp, arg); 1261 if (error) 1262 return error; 1263 start = tmp; 1264 if (prev && start < prev->vm_end) 1265 start = prev->vm_end; 1266 if (start >= end) 1267 break; 1268 if (prev) 1269 vma = find_vma(mm, prev->vm_end); 1270 else /* madvise_remove dropped mmap_lock */ 1271 vma = find_vma(mm, start); 1272 } 1273 1274 return unmapped_error; 1275 } 1276 1277 #ifdef CONFIG_ANON_VMA_NAME 1278 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1279 struct vm_area_struct **prev, 1280 unsigned long start, unsigned long end, 1281 unsigned long anon_name) 1282 { 1283 int error; 1284 1285 /* Only anonymous mappings can be named */ 1286 if (vma->vm_file && !vma_is_anon_shmem(vma)) 1287 return -EBADF; 1288 1289 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1290 (struct anon_vma_name *)anon_name); 1291 1292 /* 1293 * madvise() returns EAGAIN if kernel resources, such as 1294 * slab, are temporarily unavailable. 1295 */ 1296 if (error == -ENOMEM) 1297 error = -EAGAIN; 1298 return error; 1299 } 1300 1301 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1302 unsigned long len_in, struct anon_vma_name *anon_name) 1303 { 1304 unsigned long end; 1305 unsigned long len; 1306 1307 if (start & ~PAGE_MASK) 1308 return -EINVAL; 1309 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1310 1311 /* Check to see whether len was rounded up from small -ve to zero */ 1312 if (len_in && !len) 1313 return -EINVAL; 1314 1315 end = start + len; 1316 if (end < start) 1317 return -EINVAL; 1318 1319 if (end == start) 1320 return 0; 1321 1322 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, 1323 madvise_vma_anon_name); 1324 } 1325 #endif /* CONFIG_ANON_VMA_NAME */ 1326 /* 1327 * The madvise(2) system call. 1328 * 1329 * Applications can use madvise() to advise the kernel how it should 1330 * handle paging I/O in this VM area. The idea is to help the kernel 1331 * use appropriate read-ahead and caching techniques. The information 1332 * provided is advisory only, and can be safely disregarded by the 1333 * kernel without affecting the correct operation of the application. 1334 * 1335 * behavior values: 1336 * MADV_NORMAL - the default behavior is to read clusters. This 1337 * results in some read-ahead and read-behind. 1338 * MADV_RANDOM - the system should read the minimum amount of data 1339 * on any access, since it is unlikely that the appli- 1340 * cation will need more than what it asks for. 1341 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1342 * once, so they can be aggressively read ahead, and 1343 * can be freed soon after they are accessed. 1344 * MADV_WILLNEED - the application is notifying the system to read 1345 * some pages ahead. 1346 * MADV_DONTNEED - the application is finished with the given range, 1347 * so the kernel can free resources associated with it. 1348 * MADV_FREE - the application marks pages in the given range as lazy free, 1349 * where actual purges are postponed until memory pressure happens. 1350 * MADV_REMOVE - the application wants to free up the given range of 1351 * pages and associated backing store. 1352 * MADV_DONTFORK - omit this area from child's address space when forking: 1353 * typically, to avoid COWing pages pinned by get_user_pages(). 1354 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1355 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1356 * range after a fork. 1357 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1358 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1359 * were corrupted by unrecoverable hardware memory failure. 1360 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1361 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1362 * this area with pages of identical content from other such areas. 1363 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1364 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1365 * huge pages in the future. Existing pages might be coalesced and 1366 * new pages might be allocated as THP. 1367 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1368 * transparent huge pages so the existing pages will not be 1369 * coalesced into THP and new pages will not be allocated as THP. 1370 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1371 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1372 * from being included in its core dump. 1373 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1374 * MADV_COLD - the application is not expected to use this memory soon, 1375 * deactivate pages in this range so that they can be reclaimed 1376 * easily if memory pressure happens. 1377 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1378 * page out the pages in this range immediately. 1379 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1380 * triggering read faults if required 1381 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1382 * triggering write faults if required 1383 * 1384 * return values: 1385 * zero - success 1386 * -EINVAL - start + len < 0, start is not page-aligned, 1387 * "behavior" is not a valid value, or application 1388 * is attempting to release locked or shared pages, 1389 * or the specified address range includes file, Huge TLB, 1390 * MAP_SHARED or VMPFNMAP range. 1391 * -ENOMEM - addresses in the specified range are not currently 1392 * mapped, or are outside the AS of the process. 1393 * -EIO - an I/O error occurred while paging in data. 1394 * -EBADF - map exists, but area maps something that isn't a file. 1395 * -EAGAIN - a kernel resource was temporarily unavailable. 1396 */ 1397 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1398 { 1399 unsigned long end; 1400 int error; 1401 int write; 1402 size_t len; 1403 struct blk_plug plug; 1404 1405 if (!madvise_behavior_valid(behavior)) 1406 return -EINVAL; 1407 1408 if (!PAGE_ALIGNED(start)) 1409 return -EINVAL; 1410 len = PAGE_ALIGN(len_in); 1411 1412 /* Check to see whether len was rounded up from small -ve to zero */ 1413 if (len_in && !len) 1414 return -EINVAL; 1415 1416 end = start + len; 1417 if (end < start) 1418 return -EINVAL; 1419 1420 if (end == start) 1421 return 0; 1422 1423 #ifdef CONFIG_MEMORY_FAILURE 1424 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) 1425 return madvise_inject_error(behavior, start, start + len_in); 1426 #endif 1427 1428 write = madvise_need_mmap_write(behavior); 1429 if (write) { 1430 if (mmap_write_lock_killable(mm)) 1431 return -EINTR; 1432 } else { 1433 mmap_read_lock(mm); 1434 } 1435 1436 start = untagged_addr_remote(mm, start); 1437 end = start + len; 1438 1439 blk_start_plug(&plug); 1440 error = madvise_walk_vmas(mm, start, end, behavior, 1441 madvise_vma_behavior); 1442 blk_finish_plug(&plug); 1443 if (write) 1444 mmap_write_unlock(mm); 1445 else 1446 mmap_read_unlock(mm); 1447 1448 return error; 1449 } 1450 1451 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1452 { 1453 return do_madvise(current->mm, start, len_in, behavior); 1454 } 1455 1456 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1457 size_t, vlen, int, behavior, unsigned int, flags) 1458 { 1459 ssize_t ret; 1460 struct iovec iovstack[UIO_FASTIOV]; 1461 struct iovec *iov = iovstack; 1462 struct iov_iter iter; 1463 struct task_struct *task; 1464 struct mm_struct *mm; 1465 size_t total_len; 1466 unsigned int f_flags; 1467 1468 if (flags != 0) { 1469 ret = -EINVAL; 1470 goto out; 1471 } 1472 1473 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1474 if (ret < 0) 1475 goto out; 1476 1477 task = pidfd_get_task(pidfd, &f_flags); 1478 if (IS_ERR(task)) { 1479 ret = PTR_ERR(task); 1480 goto free_iov; 1481 } 1482 1483 if (!process_madvise_behavior_valid(behavior)) { 1484 ret = -EINVAL; 1485 goto release_task; 1486 } 1487 1488 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1489 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1490 if (IS_ERR_OR_NULL(mm)) { 1491 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 1492 goto release_task; 1493 } 1494 1495 /* 1496 * Require CAP_SYS_NICE for influencing process performance. Note that 1497 * only non-destructive hints are currently supported. 1498 */ 1499 if (!capable(CAP_SYS_NICE)) { 1500 ret = -EPERM; 1501 goto release_mm; 1502 } 1503 1504 total_len = iov_iter_count(&iter); 1505 1506 while (iov_iter_count(&iter)) { 1507 ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter), 1508 iter_iov_len(&iter), behavior); 1509 if (ret < 0) 1510 break; 1511 iov_iter_advance(&iter, iter_iov_len(&iter)); 1512 } 1513 1514 ret = (total_len - iov_iter_count(&iter)) ? : ret; 1515 1516 release_mm: 1517 mmput(mm); 1518 release_task: 1519 put_task_struct(task); 1520 free_iov: 1521 kfree(iov); 1522 out: 1523 return ret; 1524 } 1525