1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008, 2009 Intel Corporation 4 * Authors: Andi Kleen, Fengguang Wu 5 * 6 * High level machine check handler. Handles pages reported by the 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache 8 * failure. 9 * 10 * In addition there is a "soft offline" entry point that allows stop using 11 * not-yet-corrupted-by-suspicious pages without killing anything. 12 * 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 15 * other VM users, because memory failures could happen anytime and 16 * anywhere. This could violate some of their assumptions. This is why 17 * this code has to be extremely careful. Generally it tries to use 18 * normal locking rules, as in get the standard locks, even if that means 19 * the error handling takes potentially a long time. 20 * 21 * It can be very tempting to add handling for obscure cases here. 22 * In general any code for handling new cases should only be added iff: 23 * - You know how to test it. 24 * - You have a test that can be added to mce-test 25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/vm/page-types when running a real workload. 28 * 29 * There are several operations here with exponential complexity because 30 * of unsuitable VM data structures. For example the operation to map back 31 * from RMAP chains to processes has to walk the complete process list and 32 * has non linear complexity with the number. But since memory corruptions 33 * are rare we hope to get away with this. This avoids impacting the core 34 * VM. 35 */ 36 37 #define pr_fmt(fmt) "Memory failure: " fmt 38 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/page-flags.h> 42 #include <linux/kernel-page-flags.h> 43 #include <linux/sched/signal.h> 44 #include <linux/sched/task.h> 45 #include <linux/dax.h> 46 #include <linux/ksm.h> 47 #include <linux/rmap.h> 48 #include <linux/export.h> 49 #include <linux/pagemap.h> 50 #include <linux/swap.h> 51 #include <linux/backing-dev.h> 52 #include <linux/migrate.h> 53 #include <linux/suspend.h> 54 #include <linux/slab.h> 55 #include <linux/swapops.h> 56 #include <linux/hugetlb.h> 57 #include <linux/memory_hotplug.h> 58 #include <linux/mm_inline.h> 59 #include <linux/memremap.h> 60 #include <linux/kfifo.h> 61 #include <linux/ratelimit.h> 62 #include <linux/page-isolation.h> 63 #include <linux/pagewalk.h> 64 #include <linux/shmem_fs.h> 65 #include "swap.h" 66 #include "internal.h" 67 #include "ras/ras_event.h" 68 69 int sysctl_memory_failure_early_kill __read_mostly = 0; 70 71 int sysctl_memory_failure_recovery __read_mostly = 1; 72 73 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); 74 75 static bool hw_memory_failure __read_mostly = false; 76 77 inline void num_poisoned_pages_inc(unsigned long pfn) 78 { 79 atomic_long_inc(&num_poisoned_pages); 80 memblk_nr_poison_inc(pfn); 81 } 82 83 inline void num_poisoned_pages_sub(unsigned long pfn, long i) 84 { 85 atomic_long_sub(i, &num_poisoned_pages); 86 if (pfn != -1UL) 87 memblk_nr_poison_sub(pfn, i); 88 } 89 90 /* 91 * Return values: 92 * 1: the page is dissolved (if needed) and taken off from buddy, 93 * 0: the page is dissolved (if needed) and not taken off from buddy, 94 * < 0: failed to dissolve. 95 */ 96 static int __page_handle_poison(struct page *page) 97 { 98 int ret; 99 100 zone_pcp_disable(page_zone(page)); 101 ret = dissolve_free_huge_page(page); 102 if (!ret) 103 ret = take_page_off_buddy(page); 104 zone_pcp_enable(page_zone(page)); 105 106 return ret; 107 } 108 109 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) 110 { 111 if (hugepage_or_freepage) { 112 /* 113 * Doing this check for free pages is also fine since dissolve_free_huge_page 114 * returns 0 for non-hugetlb pages as well. 115 */ 116 if (__page_handle_poison(page) <= 0) 117 /* 118 * We could fail to take off the target page from buddy 119 * for example due to racy page allocation, but that's 120 * acceptable because soft-offlined page is not broken 121 * and if someone really want to use it, they should 122 * take it. 123 */ 124 return false; 125 } 126 127 SetPageHWPoison(page); 128 if (release) 129 put_page(page); 130 page_ref_inc(page); 131 num_poisoned_pages_inc(page_to_pfn(page)); 132 133 return true; 134 } 135 136 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) 137 138 u32 hwpoison_filter_enable = 0; 139 u32 hwpoison_filter_dev_major = ~0U; 140 u32 hwpoison_filter_dev_minor = ~0U; 141 u64 hwpoison_filter_flags_mask; 142 u64 hwpoison_filter_flags_value; 143 EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 144 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 145 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 146 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 147 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 148 149 static int hwpoison_filter_dev(struct page *p) 150 { 151 struct address_space *mapping; 152 dev_t dev; 153 154 if (hwpoison_filter_dev_major == ~0U && 155 hwpoison_filter_dev_minor == ~0U) 156 return 0; 157 158 mapping = page_mapping(p); 159 if (mapping == NULL || mapping->host == NULL) 160 return -EINVAL; 161 162 dev = mapping->host->i_sb->s_dev; 163 if (hwpoison_filter_dev_major != ~0U && 164 hwpoison_filter_dev_major != MAJOR(dev)) 165 return -EINVAL; 166 if (hwpoison_filter_dev_minor != ~0U && 167 hwpoison_filter_dev_minor != MINOR(dev)) 168 return -EINVAL; 169 170 return 0; 171 } 172 173 static int hwpoison_filter_flags(struct page *p) 174 { 175 if (!hwpoison_filter_flags_mask) 176 return 0; 177 178 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 179 hwpoison_filter_flags_value) 180 return 0; 181 else 182 return -EINVAL; 183 } 184 185 /* 186 * This allows stress tests to limit test scope to a collection of tasks 187 * by putting them under some memcg. This prevents killing unrelated/important 188 * processes such as /sbin/init. Note that the target task may share clean 189 * pages with init (eg. libc text), which is harmless. If the target task 190 * share _dirty_ pages with another task B, the test scheme must make sure B 191 * is also included in the memcg. At last, due to race conditions this filter 192 * can only guarantee that the page either belongs to the memcg tasks, or is 193 * a freed page. 194 */ 195 #ifdef CONFIG_MEMCG 196 u64 hwpoison_filter_memcg; 197 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 198 static int hwpoison_filter_task(struct page *p) 199 { 200 if (!hwpoison_filter_memcg) 201 return 0; 202 203 if (page_cgroup_ino(p) != hwpoison_filter_memcg) 204 return -EINVAL; 205 206 return 0; 207 } 208 #else 209 static int hwpoison_filter_task(struct page *p) { return 0; } 210 #endif 211 212 int hwpoison_filter(struct page *p) 213 { 214 if (!hwpoison_filter_enable) 215 return 0; 216 217 if (hwpoison_filter_dev(p)) 218 return -EINVAL; 219 220 if (hwpoison_filter_flags(p)) 221 return -EINVAL; 222 223 if (hwpoison_filter_task(p)) 224 return -EINVAL; 225 226 return 0; 227 } 228 #else 229 int hwpoison_filter(struct page *p) 230 { 231 return 0; 232 } 233 #endif 234 235 EXPORT_SYMBOL_GPL(hwpoison_filter); 236 237 /* 238 * Kill all processes that have a poisoned page mapped and then isolate 239 * the page. 240 * 241 * General strategy: 242 * Find all processes having the page mapped and kill them. 243 * But we keep a page reference around so that the page is not 244 * actually freed yet. 245 * Then stash the page away 246 * 247 * There's no convenient way to get back to mapped processes 248 * from the VMAs. So do a brute-force search over all 249 * running processes. 250 * 251 * Remember that machine checks are not common (or rather 252 * if they are common you have other problems), so this shouldn't 253 * be a performance issue. 254 * 255 * Also there are some races possible while we get from the 256 * error detection to actually handle it. 257 */ 258 259 struct to_kill { 260 struct list_head nd; 261 struct task_struct *tsk; 262 unsigned long addr; 263 short size_shift; 264 }; 265 266 /* 267 * Send all the processes who have the page mapped a signal. 268 * ``action optional'' if they are not immediately affected by the error 269 * ``action required'' if error happened in current execution context 270 */ 271 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) 272 { 273 struct task_struct *t = tk->tsk; 274 short addr_lsb = tk->size_shift; 275 int ret = 0; 276 277 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", 278 pfn, t->comm, t->pid); 279 280 if ((flags & MF_ACTION_REQUIRED) && (t == current)) 281 ret = force_sig_mceerr(BUS_MCEERR_AR, 282 (void __user *)tk->addr, addr_lsb); 283 else 284 /* 285 * Signal other processes sharing the page if they have 286 * PF_MCE_EARLY set. 287 * Don't use force here, it's convenient if the signal 288 * can be temporarily blocked. 289 * This could cause a loop when the user sets SIGBUS 290 * to SIG_IGN, but hopefully no one will do that? 291 */ 292 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, 293 addr_lsb, t); 294 if (ret < 0) 295 pr_info("Error sending signal to %s:%d: %d\n", 296 t->comm, t->pid, ret); 297 return ret; 298 } 299 300 /* 301 * Unknown page type encountered. Try to check whether it can turn PageLRU by 302 * lru_add_drain_all. 303 */ 304 void shake_page(struct page *p) 305 { 306 if (PageHuge(p)) 307 return; 308 309 if (!PageSlab(p)) { 310 lru_add_drain_all(); 311 if (PageLRU(p) || is_free_buddy_page(p)) 312 return; 313 } 314 315 /* 316 * TODO: Could shrink slab caches here if a lightweight range-based 317 * shrinker will be available. 318 */ 319 } 320 EXPORT_SYMBOL_GPL(shake_page); 321 322 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, 323 unsigned long address) 324 { 325 unsigned long ret = 0; 326 pgd_t *pgd; 327 p4d_t *p4d; 328 pud_t *pud; 329 pmd_t *pmd; 330 pte_t *pte; 331 332 VM_BUG_ON_VMA(address == -EFAULT, vma); 333 pgd = pgd_offset(vma->vm_mm, address); 334 if (!pgd_present(*pgd)) 335 return 0; 336 p4d = p4d_offset(pgd, address); 337 if (!p4d_present(*p4d)) 338 return 0; 339 pud = pud_offset(p4d, address); 340 if (!pud_present(*pud)) 341 return 0; 342 if (pud_devmap(*pud)) 343 return PUD_SHIFT; 344 pmd = pmd_offset(pud, address); 345 if (!pmd_present(*pmd)) 346 return 0; 347 if (pmd_devmap(*pmd)) 348 return PMD_SHIFT; 349 pte = pte_offset_map(pmd, address); 350 if (pte_present(*pte) && pte_devmap(*pte)) 351 ret = PAGE_SHIFT; 352 pte_unmap(pte); 353 return ret; 354 } 355 356 /* 357 * Failure handling: if we can't find or can't kill a process there's 358 * not much we can do. We just print a message and ignore otherwise. 359 */ 360 361 #define FSDAX_INVALID_PGOFF ULONG_MAX 362 363 /* 364 * Schedule a process for later kill. 365 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 366 * 367 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a 368 * filesystem with a memory failure handler has claimed the 369 * memory_failure event. In all other cases, page->index and 370 * page->mapping are sufficient for mapping the page back to its 371 * corresponding user virtual address. 372 */ 373 static void add_to_kill(struct task_struct *tsk, struct page *p, 374 pgoff_t fsdax_pgoff, struct vm_area_struct *vma, 375 struct list_head *to_kill) 376 { 377 struct to_kill *tk; 378 379 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 380 if (!tk) { 381 pr_err("Out of memory while machine check handling\n"); 382 return; 383 } 384 385 tk->addr = page_address_in_vma(p, vma); 386 if (is_zone_device_page(p)) { 387 if (fsdax_pgoff != FSDAX_INVALID_PGOFF) 388 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); 389 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); 390 } else 391 tk->size_shift = page_shift(compound_head(p)); 392 393 /* 394 * Send SIGKILL if "tk->addr == -EFAULT". Also, as 395 * "tk->size_shift" is always non-zero for !is_zone_device_page(), 396 * so "tk->size_shift == 0" effectively checks no mapping on 397 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times 398 * to a process' address space, it's possible not all N VMAs 399 * contain mappings for the page, but at least one VMA does. 400 * Only deliver SIGBUS with payload derived from the VMA that 401 * has a mapping for the page. 402 */ 403 if (tk->addr == -EFAULT) { 404 pr_info("Unable to find user space address %lx in %s\n", 405 page_to_pfn(p), tsk->comm); 406 } else if (tk->size_shift == 0) { 407 kfree(tk); 408 return; 409 } 410 411 get_task_struct(tsk); 412 tk->tsk = tsk; 413 list_add_tail(&tk->nd, to_kill); 414 } 415 416 /* 417 * Kill the processes that have been collected earlier. 418 * 419 * Only do anything when FORCEKILL is set, otherwise just free the 420 * list (this is used for clean pages which do not need killing) 421 * Also when FAIL is set do a force kill because something went 422 * wrong earlier. 423 */ 424 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, 425 unsigned long pfn, int flags) 426 { 427 struct to_kill *tk, *next; 428 429 list_for_each_entry_safe(tk, next, to_kill, nd) { 430 if (forcekill) { 431 /* 432 * In case something went wrong with munmapping 433 * make sure the process doesn't catch the 434 * signal and then access the memory. Just kill it. 435 */ 436 if (fail || tk->addr == -EFAULT) { 437 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 438 pfn, tk->tsk->comm, tk->tsk->pid); 439 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, 440 tk->tsk, PIDTYPE_PID); 441 } 442 443 /* 444 * In theory the process could have mapped 445 * something else on the address in-between. We could 446 * check for that, but we need to tell the 447 * process anyways. 448 */ 449 else if (kill_proc(tk, pfn, flags) < 0) 450 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n", 451 pfn, tk->tsk->comm, tk->tsk->pid); 452 } 453 list_del(&tk->nd); 454 put_task_struct(tk->tsk); 455 kfree(tk); 456 } 457 } 458 459 /* 460 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO) 461 * on behalf of the thread group. Return task_struct of the (first found) 462 * dedicated thread if found, and return NULL otherwise. 463 * 464 * We already hold read_lock(&tasklist_lock) in the caller, so we don't 465 * have to call rcu_read_lock/unlock() in this function. 466 */ 467 static struct task_struct *find_early_kill_thread(struct task_struct *tsk) 468 { 469 struct task_struct *t; 470 471 for_each_thread(tsk, t) { 472 if (t->flags & PF_MCE_PROCESS) { 473 if (t->flags & PF_MCE_EARLY) 474 return t; 475 } else { 476 if (sysctl_memory_failure_early_kill) 477 return t; 478 } 479 } 480 return NULL; 481 } 482 483 /* 484 * Determine whether a given process is "early kill" process which expects 485 * to be signaled when some page under the process is hwpoisoned. 486 * Return task_struct of the dedicated thread (main thread unless explicitly 487 * specified) if the process is "early kill" and otherwise returns NULL. 488 * 489 * Note that the above is true for Action Optional case. For Action Required 490 * case, it's only meaningful to the current thread which need to be signaled 491 * with SIGBUS, this error is Action Optional for other non current 492 * processes sharing the same error page,if the process is "early kill", the 493 * task_struct of the dedicated thread will also be returned. 494 */ 495 static struct task_struct *task_early_kill(struct task_struct *tsk, 496 int force_early) 497 { 498 if (!tsk->mm) 499 return NULL; 500 /* 501 * Comparing ->mm here because current task might represent 502 * a subthread, while tsk always points to the main thread. 503 */ 504 if (force_early && tsk->mm == current->mm) 505 return current; 506 507 return find_early_kill_thread(tsk); 508 } 509 510 /* 511 * Collect processes when the error hit an anonymous page. 512 */ 513 static void collect_procs_anon(struct page *page, struct list_head *to_kill, 514 int force_early) 515 { 516 struct folio *folio = page_folio(page); 517 struct vm_area_struct *vma; 518 struct task_struct *tsk; 519 struct anon_vma *av; 520 pgoff_t pgoff; 521 522 av = folio_lock_anon_vma_read(folio, NULL); 523 if (av == NULL) /* Not actually mapped anymore */ 524 return; 525 526 pgoff = page_to_pgoff(page); 527 read_lock(&tasklist_lock); 528 for_each_process (tsk) { 529 struct anon_vma_chain *vmac; 530 struct task_struct *t = task_early_kill(tsk, force_early); 531 532 if (!t) 533 continue; 534 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 535 pgoff, pgoff) { 536 vma = vmac->vma; 537 if (vma->vm_mm != t->mm) 538 continue; 539 if (!page_mapped_in_vma(page, vma)) 540 continue; 541 add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, to_kill); 542 } 543 } 544 read_unlock(&tasklist_lock); 545 anon_vma_unlock_read(av); 546 } 547 548 /* 549 * Collect processes when the error hit a file mapped page. 550 */ 551 static void collect_procs_file(struct page *page, struct list_head *to_kill, 552 int force_early) 553 { 554 struct vm_area_struct *vma; 555 struct task_struct *tsk; 556 struct address_space *mapping = page->mapping; 557 pgoff_t pgoff; 558 559 i_mmap_lock_read(mapping); 560 read_lock(&tasklist_lock); 561 pgoff = page_to_pgoff(page); 562 for_each_process(tsk) { 563 struct task_struct *t = task_early_kill(tsk, force_early); 564 565 if (!t) 566 continue; 567 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, 568 pgoff) { 569 /* 570 * Send early kill signal to tasks where a vma covers 571 * the page but the corrupted page is not necessarily 572 * mapped it in its pte. 573 * Assume applications who requested early kill want 574 * to be informed of all such data corruptions. 575 */ 576 if (vma->vm_mm == t->mm) 577 add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, 578 to_kill); 579 } 580 } 581 read_unlock(&tasklist_lock); 582 i_mmap_unlock_read(mapping); 583 } 584 585 #ifdef CONFIG_FS_DAX 586 /* 587 * Collect processes when the error hit a fsdax page. 588 */ 589 static void collect_procs_fsdax(struct page *page, 590 struct address_space *mapping, pgoff_t pgoff, 591 struct list_head *to_kill) 592 { 593 struct vm_area_struct *vma; 594 struct task_struct *tsk; 595 596 i_mmap_lock_read(mapping); 597 read_lock(&tasklist_lock); 598 for_each_process(tsk) { 599 struct task_struct *t = task_early_kill(tsk, true); 600 601 if (!t) 602 continue; 603 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 604 if (vma->vm_mm == t->mm) 605 add_to_kill(t, page, pgoff, vma, to_kill); 606 } 607 } 608 read_unlock(&tasklist_lock); 609 i_mmap_unlock_read(mapping); 610 } 611 #endif /* CONFIG_FS_DAX */ 612 613 /* 614 * Collect the processes who have the corrupted page mapped to kill. 615 */ 616 static void collect_procs(struct page *page, struct list_head *tokill, 617 int force_early) 618 { 619 if (!page->mapping) 620 return; 621 622 if (PageAnon(page)) 623 collect_procs_anon(page, tokill, force_early); 624 else 625 collect_procs_file(page, tokill, force_early); 626 } 627 628 struct hwp_walk { 629 struct to_kill tk; 630 unsigned long pfn; 631 int flags; 632 }; 633 634 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift) 635 { 636 tk->addr = addr; 637 tk->size_shift = shift; 638 } 639 640 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, 641 unsigned long poisoned_pfn, struct to_kill *tk) 642 { 643 unsigned long pfn = 0; 644 645 if (pte_present(pte)) { 646 pfn = pte_pfn(pte); 647 } else { 648 swp_entry_t swp = pte_to_swp_entry(pte); 649 650 if (is_hwpoison_entry(swp)) 651 pfn = swp_offset_pfn(swp); 652 } 653 654 if (!pfn || pfn != poisoned_pfn) 655 return 0; 656 657 set_to_kill(tk, addr, shift); 658 return 1; 659 } 660 661 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 662 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 663 struct hwp_walk *hwp) 664 { 665 pmd_t pmd = *pmdp; 666 unsigned long pfn; 667 unsigned long hwpoison_vaddr; 668 669 if (!pmd_present(pmd)) 670 return 0; 671 pfn = pmd_pfn(pmd); 672 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { 673 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT); 674 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT); 675 return 1; 676 } 677 return 0; 678 } 679 #else 680 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 681 struct hwp_walk *hwp) 682 { 683 return 0; 684 } 685 #endif 686 687 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, 688 unsigned long end, struct mm_walk *walk) 689 { 690 struct hwp_walk *hwp = walk->private; 691 int ret = 0; 692 pte_t *ptep, *mapped_pte; 693 spinlock_t *ptl; 694 695 ptl = pmd_trans_huge_lock(pmdp, walk->vma); 696 if (ptl) { 697 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp); 698 spin_unlock(ptl); 699 goto out; 700 } 701 702 if (pmd_trans_unstable(pmdp)) 703 goto out; 704 705 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, 706 addr, &ptl); 707 for (; addr != end; ptep++, addr += PAGE_SIZE) { 708 ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, 709 hwp->pfn, &hwp->tk); 710 if (ret == 1) 711 break; 712 } 713 pte_unmap_unlock(mapped_pte, ptl); 714 out: 715 cond_resched(); 716 return ret; 717 } 718 719 #ifdef CONFIG_HUGETLB_PAGE 720 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, 721 unsigned long addr, unsigned long end, 722 struct mm_walk *walk) 723 { 724 struct hwp_walk *hwp = walk->private; 725 pte_t pte = huge_ptep_get(ptep); 726 struct hstate *h = hstate_vma(walk->vma); 727 728 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), 729 hwp->pfn, &hwp->tk); 730 } 731 #else 732 #define hwpoison_hugetlb_range NULL 733 #endif 734 735 static const struct mm_walk_ops hwp_walk_ops = { 736 .pmd_entry = hwpoison_pte_range, 737 .hugetlb_entry = hwpoison_hugetlb_range, 738 }; 739 740 /* 741 * Sends SIGBUS to the current process with error info. 742 * 743 * This function is intended to handle "Action Required" MCEs on already 744 * hardware poisoned pages. They could happen, for example, when 745 * memory_failure() failed to unmap the error page at the first call, or 746 * when multiple local machine checks happened on different CPUs. 747 * 748 * MCE handler currently has no easy access to the error virtual address, 749 * so this function walks page table to find it. The returned virtual address 750 * is proper in most cases, but it could be wrong when the application 751 * process has multiple entries mapping the error page. 752 */ 753 static int kill_accessing_process(struct task_struct *p, unsigned long pfn, 754 int flags) 755 { 756 int ret; 757 struct hwp_walk priv = { 758 .pfn = pfn, 759 }; 760 priv.tk.tsk = p; 761 762 if (!p->mm) 763 return -EFAULT; 764 765 mmap_read_lock(p->mm); 766 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops, 767 (void *)&priv); 768 if (ret == 1 && priv.tk.addr) 769 kill_proc(&priv.tk, pfn, flags); 770 else 771 ret = 0; 772 mmap_read_unlock(p->mm); 773 return ret > 0 ? -EHWPOISON : -EFAULT; 774 } 775 776 static const char *action_name[] = { 777 [MF_IGNORED] = "Ignored", 778 [MF_FAILED] = "Failed", 779 [MF_DELAYED] = "Delayed", 780 [MF_RECOVERED] = "Recovered", 781 }; 782 783 static const char * const action_page_types[] = { 784 [MF_MSG_KERNEL] = "reserved kernel page", 785 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page", 786 [MF_MSG_SLAB] = "kernel slab page", 787 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", 788 [MF_MSG_HUGE] = "huge page", 789 [MF_MSG_FREE_HUGE] = "free huge page", 790 [MF_MSG_UNMAP_FAILED] = "unmapping failed page", 791 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", 792 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", 793 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page", 794 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page", 795 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page", 796 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page", 797 [MF_MSG_DIRTY_LRU] = "dirty LRU page", 798 [MF_MSG_CLEAN_LRU] = "clean LRU page", 799 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page", 800 [MF_MSG_BUDDY] = "free buddy page", 801 [MF_MSG_DAX] = "dax page", 802 [MF_MSG_UNSPLIT_THP] = "unsplit thp", 803 [MF_MSG_UNKNOWN] = "unknown page", 804 }; 805 806 /* 807 * XXX: It is possible that a page is isolated from LRU cache, 808 * and then kept in swap cache or failed to remove from page cache. 809 * The page count will stop it from being freed by unpoison. 810 * Stress tests should be aware of this memory leak problem. 811 */ 812 static int delete_from_lru_cache(struct page *p) 813 { 814 if (!isolate_lru_page(p)) { 815 /* 816 * Clear sensible page flags, so that the buddy system won't 817 * complain when the page is unpoison-and-freed. 818 */ 819 ClearPageActive(p); 820 ClearPageUnevictable(p); 821 822 /* 823 * Poisoned page might never drop its ref count to 0 so we have 824 * to uncharge it manually from its memcg. 825 */ 826 mem_cgroup_uncharge(page_folio(p)); 827 828 /* 829 * drop the page count elevated by isolate_lru_page() 830 */ 831 put_page(p); 832 return 0; 833 } 834 return -EIO; 835 } 836 837 static int truncate_error_page(struct page *p, unsigned long pfn, 838 struct address_space *mapping) 839 { 840 int ret = MF_FAILED; 841 842 if (mapping->a_ops->error_remove_page) { 843 int err = mapping->a_ops->error_remove_page(mapping, p); 844 845 if (err != 0) { 846 pr_info("%#lx: Failed to punch page: %d\n", pfn, err); 847 } else if (page_has_private(p) && 848 !try_to_release_page(p, GFP_NOIO)) { 849 pr_info("%#lx: failed to release buffers\n", pfn); 850 } else { 851 ret = MF_RECOVERED; 852 } 853 } else { 854 /* 855 * If the file system doesn't support it just invalidate 856 * This fails on dirty or anything with private pages 857 */ 858 if (invalidate_inode_page(p)) 859 ret = MF_RECOVERED; 860 else 861 pr_info("%#lx: Failed to invalidate\n", pfn); 862 } 863 864 return ret; 865 } 866 867 struct page_state { 868 unsigned long mask; 869 unsigned long res; 870 enum mf_action_page_type type; 871 872 /* Callback ->action() has to unlock the relevant page inside it. */ 873 int (*action)(struct page_state *ps, struct page *p); 874 }; 875 876 /* 877 * Return true if page is still referenced by others, otherwise return 878 * false. 879 * 880 * The extra_pins is true when one extra refcount is expected. 881 */ 882 static bool has_extra_refcount(struct page_state *ps, struct page *p, 883 bool extra_pins) 884 { 885 int count = page_count(p) - 1; 886 887 if (extra_pins) 888 count -= 1; 889 890 if (count > 0) { 891 pr_err("%#lx: %s still referenced by %d users\n", 892 page_to_pfn(p), action_page_types[ps->type], count); 893 return true; 894 } 895 896 return false; 897 } 898 899 /* 900 * Error hit kernel page. 901 * Do nothing, try to be lucky and not touch this instead. For a few cases we 902 * could be more sophisticated. 903 */ 904 static int me_kernel(struct page_state *ps, struct page *p) 905 { 906 unlock_page(p); 907 return MF_IGNORED; 908 } 909 910 /* 911 * Page in unknown state. Do nothing. 912 */ 913 static int me_unknown(struct page_state *ps, struct page *p) 914 { 915 pr_err("%#lx: Unknown page state\n", page_to_pfn(p)); 916 unlock_page(p); 917 return MF_FAILED; 918 } 919 920 /* 921 * Clean (or cleaned) page cache page. 922 */ 923 static int me_pagecache_clean(struct page_state *ps, struct page *p) 924 { 925 int ret; 926 struct address_space *mapping; 927 bool extra_pins; 928 929 delete_from_lru_cache(p); 930 931 /* 932 * For anonymous pages we're done the only reference left 933 * should be the one m_f() holds. 934 */ 935 if (PageAnon(p)) { 936 ret = MF_RECOVERED; 937 goto out; 938 } 939 940 /* 941 * Now truncate the page in the page cache. This is really 942 * more like a "temporary hole punch" 943 * Don't do this for block devices when someone else 944 * has a reference, because it could be file system metadata 945 * and that's not safe to truncate. 946 */ 947 mapping = page_mapping(p); 948 if (!mapping) { 949 /* 950 * Page has been teared down in the meanwhile 951 */ 952 ret = MF_FAILED; 953 goto out; 954 } 955 956 /* 957 * The shmem page is kept in page cache instead of truncating 958 * so is expected to have an extra refcount after error-handling. 959 */ 960 extra_pins = shmem_mapping(mapping); 961 962 /* 963 * Truncation is a bit tricky. Enable it per file system for now. 964 * 965 * Open: to take i_rwsem or not for this? Right now we don't. 966 */ 967 ret = truncate_error_page(p, page_to_pfn(p), mapping); 968 if (has_extra_refcount(ps, p, extra_pins)) 969 ret = MF_FAILED; 970 971 out: 972 unlock_page(p); 973 974 return ret; 975 } 976 977 /* 978 * Dirty pagecache page 979 * Issues: when the error hit a hole page the error is not properly 980 * propagated. 981 */ 982 static int me_pagecache_dirty(struct page_state *ps, struct page *p) 983 { 984 struct address_space *mapping = page_mapping(p); 985 986 SetPageError(p); 987 /* TBD: print more information about the file. */ 988 if (mapping) { 989 /* 990 * IO error will be reported by write(), fsync(), etc. 991 * who check the mapping. 992 * This way the application knows that something went 993 * wrong with its dirty file data. 994 * 995 * There's one open issue: 996 * 997 * The EIO will be only reported on the next IO 998 * operation and then cleared through the IO map. 999 * Normally Linux has two mechanisms to pass IO error 1000 * first through the AS_EIO flag in the address space 1001 * and then through the PageError flag in the page. 1002 * Since we drop pages on memory failure handling the 1003 * only mechanism open to use is through AS_AIO. 1004 * 1005 * This has the disadvantage that it gets cleared on 1006 * the first operation that returns an error, while 1007 * the PageError bit is more sticky and only cleared 1008 * when the page is reread or dropped. If an 1009 * application assumes it will always get error on 1010 * fsync, but does other operations on the fd before 1011 * and the page is dropped between then the error 1012 * will not be properly reported. 1013 * 1014 * This can already happen even without hwpoisoned 1015 * pages: first on metadata IO errors (which only 1016 * report through AS_EIO) or when the page is dropped 1017 * at the wrong time. 1018 * 1019 * So right now we assume that the application DTRT on 1020 * the first EIO, but we're not worse than other parts 1021 * of the kernel. 1022 */ 1023 mapping_set_error(mapping, -EIO); 1024 } 1025 1026 return me_pagecache_clean(ps, p); 1027 } 1028 1029 /* 1030 * Clean and dirty swap cache. 1031 * 1032 * Dirty swap cache page is tricky to handle. The page could live both in page 1033 * cache and swap cache(ie. page is freshly swapped in). So it could be 1034 * referenced concurrently by 2 types of PTEs: 1035 * normal PTEs and swap PTEs. We try to handle them consistently by calling 1036 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 1037 * and then 1038 * - clear dirty bit to prevent IO 1039 * - remove from LRU 1040 * - but keep in the swap cache, so that when we return to it on 1041 * a later page fault, we know the application is accessing 1042 * corrupted data and shall be killed (we installed simple 1043 * interception code in do_swap_page to catch it). 1044 * 1045 * Clean swap cache pages can be directly isolated. A later page fault will 1046 * bring in the known good data from disk. 1047 */ 1048 static int me_swapcache_dirty(struct page_state *ps, struct page *p) 1049 { 1050 int ret; 1051 bool extra_pins = false; 1052 1053 ClearPageDirty(p); 1054 /* Trigger EIO in shmem: */ 1055 ClearPageUptodate(p); 1056 1057 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; 1058 unlock_page(p); 1059 1060 if (ret == MF_DELAYED) 1061 extra_pins = true; 1062 1063 if (has_extra_refcount(ps, p, extra_pins)) 1064 ret = MF_FAILED; 1065 1066 return ret; 1067 } 1068 1069 static int me_swapcache_clean(struct page_state *ps, struct page *p) 1070 { 1071 struct folio *folio = page_folio(p); 1072 int ret; 1073 1074 delete_from_swap_cache(folio); 1075 1076 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; 1077 folio_unlock(folio); 1078 1079 if (has_extra_refcount(ps, p, false)) 1080 ret = MF_FAILED; 1081 1082 return ret; 1083 } 1084 1085 /* 1086 * Huge pages. Needs work. 1087 * Issues: 1088 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 1089 * To narrow down kill region to one page, we need to break up pmd. 1090 */ 1091 static int me_huge_page(struct page_state *ps, struct page *p) 1092 { 1093 int res; 1094 struct page *hpage = compound_head(p); 1095 struct address_space *mapping; 1096 bool extra_pins = false; 1097 1098 if (!PageHuge(hpage)) 1099 return MF_DELAYED; 1100 1101 mapping = page_mapping(hpage); 1102 if (mapping) { 1103 res = truncate_error_page(hpage, page_to_pfn(p), mapping); 1104 /* The page is kept in page cache. */ 1105 extra_pins = true; 1106 unlock_page(hpage); 1107 } else { 1108 unlock_page(hpage); 1109 /* 1110 * migration entry prevents later access on error hugepage, 1111 * so we can free and dissolve it into buddy to save healthy 1112 * subpages. 1113 */ 1114 put_page(hpage); 1115 if (__page_handle_poison(p) >= 0) { 1116 page_ref_inc(p); 1117 res = MF_RECOVERED; 1118 } else { 1119 res = MF_FAILED; 1120 } 1121 } 1122 1123 if (has_extra_refcount(ps, p, extra_pins)) 1124 res = MF_FAILED; 1125 1126 return res; 1127 } 1128 1129 /* 1130 * Various page states we can handle. 1131 * 1132 * A page state is defined by its current page->flags bits. 1133 * The table matches them in order and calls the right handler. 1134 * 1135 * This is quite tricky because we can access page at any time 1136 * in its live cycle, so all accesses have to be extremely careful. 1137 * 1138 * This is not complete. More states could be added. 1139 * For any missing state don't attempt recovery. 1140 */ 1141 1142 #define dirty (1UL << PG_dirty) 1143 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked)) 1144 #define unevict (1UL << PG_unevictable) 1145 #define mlock (1UL << PG_mlocked) 1146 #define lru (1UL << PG_lru) 1147 #define head (1UL << PG_head) 1148 #define slab (1UL << PG_slab) 1149 #define reserved (1UL << PG_reserved) 1150 1151 static struct page_state error_states[] = { 1152 { reserved, reserved, MF_MSG_KERNEL, me_kernel }, 1153 /* 1154 * free pages are specially detected outside this table: 1155 * PG_buddy pages only make a small fraction of all free pages. 1156 */ 1157 1158 /* 1159 * Could in theory check if slab page is free or if we can drop 1160 * currently unused objects without touching them. But just 1161 * treat it as standard kernel for now. 1162 */ 1163 { slab, slab, MF_MSG_SLAB, me_kernel }, 1164 1165 { head, head, MF_MSG_HUGE, me_huge_page }, 1166 1167 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, 1168 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, 1169 1170 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty }, 1171 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean }, 1172 1173 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty }, 1174 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean }, 1175 1176 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 1177 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 1178 1179 /* 1180 * Catchall entry: must be at end. 1181 */ 1182 { 0, 0, MF_MSG_UNKNOWN, me_unknown }, 1183 }; 1184 1185 #undef dirty 1186 #undef sc 1187 #undef unevict 1188 #undef mlock 1189 #undef lru 1190 #undef head 1191 #undef slab 1192 #undef reserved 1193 1194 /* 1195 * "Dirty/Clean" indication is not 100% accurate due to the possibility of 1196 * setting PG_dirty outside page lock. See also comment above set_page_dirty(). 1197 */ 1198 static int action_result(unsigned long pfn, enum mf_action_page_type type, 1199 enum mf_result result) 1200 { 1201 trace_memory_failure_event(pfn, type, result); 1202 1203 num_poisoned_pages_inc(pfn); 1204 pr_err("%#lx: recovery action for %s: %s\n", 1205 pfn, action_page_types[type], action_name[result]); 1206 1207 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY; 1208 } 1209 1210 static int page_action(struct page_state *ps, struct page *p, 1211 unsigned long pfn) 1212 { 1213 int result; 1214 1215 /* page p should be unlocked after returning from ps->action(). */ 1216 result = ps->action(ps, p); 1217 1218 /* Could do more checks here if page looks ok */ 1219 /* 1220 * Could adjust zone counters here to correct for the missing page. 1221 */ 1222 1223 return action_result(pfn, ps->type, result); 1224 } 1225 1226 static inline bool PageHWPoisonTakenOff(struct page *page) 1227 { 1228 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON; 1229 } 1230 1231 void SetPageHWPoisonTakenOff(struct page *page) 1232 { 1233 set_page_private(page, MAGIC_HWPOISON); 1234 } 1235 1236 void ClearPageHWPoisonTakenOff(struct page *page) 1237 { 1238 if (PageHWPoison(page)) 1239 set_page_private(page, 0); 1240 } 1241 1242 /* 1243 * Return true if a page type of a given page is supported by hwpoison 1244 * mechanism (while handling could fail), otherwise false. This function 1245 * does not return true for hugetlb or device memory pages, so it's assumed 1246 * to be called only in the context where we never have such pages. 1247 */ 1248 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) 1249 { 1250 /* Soft offline could migrate non-LRU movable pages */ 1251 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) 1252 return true; 1253 1254 return PageLRU(page) || is_free_buddy_page(page); 1255 } 1256 1257 static int __get_hwpoison_page(struct page *page, unsigned long flags) 1258 { 1259 struct page *head = compound_head(page); 1260 int ret = 0; 1261 bool hugetlb = false; 1262 1263 ret = get_hwpoison_huge_page(head, &hugetlb, false); 1264 if (hugetlb) 1265 return ret; 1266 1267 /* 1268 * This check prevents from calling get_page_unless_zero() for any 1269 * unsupported type of page in order to reduce the risk of unexpected 1270 * races caused by taking a page refcount. 1271 */ 1272 if (!HWPoisonHandlable(head, flags)) 1273 return -EBUSY; 1274 1275 if (get_page_unless_zero(head)) { 1276 if (head == compound_head(page)) 1277 return 1; 1278 1279 pr_info("%#lx cannot catch tail\n", page_to_pfn(page)); 1280 put_page(head); 1281 } 1282 1283 return 0; 1284 } 1285 1286 static int get_any_page(struct page *p, unsigned long flags) 1287 { 1288 int ret = 0, pass = 0; 1289 bool count_increased = false; 1290 1291 if (flags & MF_COUNT_INCREASED) 1292 count_increased = true; 1293 1294 try_again: 1295 if (!count_increased) { 1296 ret = __get_hwpoison_page(p, flags); 1297 if (!ret) { 1298 if (page_count(p)) { 1299 /* We raced with an allocation, retry. */ 1300 if (pass++ < 3) 1301 goto try_again; 1302 ret = -EBUSY; 1303 } else if (!PageHuge(p) && !is_free_buddy_page(p)) { 1304 /* We raced with put_page, retry. */ 1305 if (pass++ < 3) 1306 goto try_again; 1307 ret = -EIO; 1308 } 1309 goto out; 1310 } else if (ret == -EBUSY) { 1311 /* 1312 * We raced with (possibly temporary) unhandlable 1313 * page, retry. 1314 */ 1315 if (pass++ < 3) { 1316 shake_page(p); 1317 goto try_again; 1318 } 1319 ret = -EIO; 1320 goto out; 1321 } 1322 } 1323 1324 if (PageHuge(p) || HWPoisonHandlable(p, flags)) { 1325 ret = 1; 1326 } else { 1327 /* 1328 * A page we cannot handle. Check whether we can turn 1329 * it into something we can handle. 1330 */ 1331 if (pass++ < 3) { 1332 put_page(p); 1333 shake_page(p); 1334 count_increased = false; 1335 goto try_again; 1336 } 1337 put_page(p); 1338 ret = -EIO; 1339 } 1340 out: 1341 if (ret == -EIO) 1342 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p)); 1343 1344 return ret; 1345 } 1346 1347 static int __get_unpoison_page(struct page *page) 1348 { 1349 struct page *head = compound_head(page); 1350 int ret = 0; 1351 bool hugetlb = false; 1352 1353 ret = get_hwpoison_huge_page(head, &hugetlb, true); 1354 if (hugetlb) 1355 return ret; 1356 1357 /* 1358 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison, 1359 * but also isolated from buddy freelist, so need to identify the 1360 * state and have to cancel both operations to unpoison. 1361 */ 1362 if (PageHWPoisonTakenOff(page)) 1363 return -EHWPOISON; 1364 1365 return get_page_unless_zero(page) ? 1 : 0; 1366 } 1367 1368 /** 1369 * get_hwpoison_page() - Get refcount for memory error handling 1370 * @p: Raw error page (hit by memory error) 1371 * @flags: Flags controlling behavior of error handling 1372 * 1373 * get_hwpoison_page() takes a page refcount of an error page to handle memory 1374 * error on it, after checking that the error page is in a well-defined state 1375 * (defined as a page-type we can successfully handle the memory error on it, 1376 * such as LRU page and hugetlb page). 1377 * 1378 * Memory error handling could be triggered at any time on any type of page, 1379 * so it's prone to race with typical memory management lifecycle (like 1380 * allocation and free). So to avoid such races, get_hwpoison_page() takes 1381 * extra care for the error page's state (as done in __get_hwpoison_page()), 1382 * and has some retry logic in get_any_page(). 1383 * 1384 * When called from unpoison_memory(), the caller should already ensure that 1385 * the given page has PG_hwpoison. So it's never reused for other page 1386 * allocations, and __get_unpoison_page() never races with them. 1387 * 1388 * Return: 0 on failure, 1389 * 1 on success for in-use pages in a well-defined state, 1390 * -EIO for pages on which we can not handle memory errors, 1391 * -EBUSY when get_hwpoison_page() has raced with page lifecycle 1392 * operations like allocation and free, 1393 * -EHWPOISON when the page is hwpoisoned and taken off from buddy. 1394 */ 1395 static int get_hwpoison_page(struct page *p, unsigned long flags) 1396 { 1397 int ret; 1398 1399 zone_pcp_disable(page_zone(p)); 1400 if (flags & MF_UNPOISON) 1401 ret = __get_unpoison_page(p); 1402 else 1403 ret = get_any_page(p, flags); 1404 zone_pcp_enable(page_zone(p)); 1405 1406 return ret; 1407 } 1408 1409 /* 1410 * Do all that is necessary to remove user space mappings. Unmap 1411 * the pages and send SIGBUS to the processes if the data was dirty. 1412 */ 1413 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, 1414 int flags, struct page *hpage) 1415 { 1416 struct folio *folio = page_folio(hpage); 1417 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC; 1418 struct address_space *mapping; 1419 LIST_HEAD(tokill); 1420 bool unmap_success; 1421 int forcekill; 1422 bool mlocked = PageMlocked(hpage); 1423 1424 /* 1425 * Here we are interested only in user-mapped pages, so skip any 1426 * other types of pages. 1427 */ 1428 if (PageReserved(p) || PageSlab(p) || PageTable(p)) 1429 return true; 1430 if (!(PageLRU(hpage) || PageHuge(p))) 1431 return true; 1432 1433 /* 1434 * This check implies we don't kill processes if their pages 1435 * are in the swap cache early. Those are always late kills. 1436 */ 1437 if (!page_mapped(hpage)) 1438 return true; 1439 1440 if (PageKsm(p)) { 1441 pr_err("%#lx: can't handle KSM pages.\n", pfn); 1442 return false; 1443 } 1444 1445 if (PageSwapCache(p)) { 1446 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); 1447 ttu |= TTU_IGNORE_HWPOISON; 1448 } 1449 1450 /* 1451 * Propagate the dirty bit from PTEs to struct page first, because we 1452 * need this to decide if we should kill or just drop the page. 1453 * XXX: the dirty test could be racy: set_page_dirty() may not always 1454 * be called inside page lock (it's recommended but not enforced). 1455 */ 1456 mapping = page_mapping(hpage); 1457 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && 1458 mapping_can_writeback(mapping)) { 1459 if (page_mkclean(hpage)) { 1460 SetPageDirty(hpage); 1461 } else { 1462 ttu |= TTU_IGNORE_HWPOISON; 1463 pr_info("%#lx: corrupted page was clean: dropped without side effects\n", 1464 pfn); 1465 } 1466 } 1467 1468 /* 1469 * First collect all the processes that have the page 1470 * mapped in dirty form. This has to be done before try_to_unmap, 1471 * because ttu takes the rmap data structures down. 1472 */ 1473 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1474 1475 if (PageHuge(hpage) && !PageAnon(hpage)) { 1476 /* 1477 * For hugetlb pages in shared mappings, try_to_unmap 1478 * could potentially call huge_pmd_unshare. Because of 1479 * this, take semaphore in write mode here and set 1480 * TTU_RMAP_LOCKED to indicate we have taken the lock 1481 * at this higher level. 1482 */ 1483 mapping = hugetlb_page_mapping_lock_write(hpage); 1484 if (mapping) { 1485 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); 1486 i_mmap_unlock_write(mapping); 1487 } else 1488 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); 1489 } else { 1490 try_to_unmap(folio, ttu); 1491 } 1492 1493 unmap_success = !page_mapped(hpage); 1494 if (!unmap_success) 1495 pr_err("%#lx: failed to unmap page (mapcount=%d)\n", 1496 pfn, page_mapcount(hpage)); 1497 1498 /* 1499 * try_to_unmap() might put mlocked page in lru cache, so call 1500 * shake_page() again to ensure that it's flushed. 1501 */ 1502 if (mlocked) 1503 shake_page(hpage); 1504 1505 /* 1506 * Now that the dirty bit has been propagated to the 1507 * struct page and all unmaps done we can decide if 1508 * killing is needed or not. Only kill when the page 1509 * was dirty or the process is not restartable, 1510 * otherwise the tokill list is merely 1511 * freed. When there was a problem unmapping earlier 1512 * use a more force-full uncatchable kill to prevent 1513 * any accesses to the poisoned memory. 1514 */ 1515 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) || 1516 !unmap_success; 1517 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); 1518 1519 return unmap_success; 1520 } 1521 1522 static int identify_page_state(unsigned long pfn, struct page *p, 1523 unsigned long page_flags) 1524 { 1525 struct page_state *ps; 1526 1527 /* 1528 * The first check uses the current page flags which may not have any 1529 * relevant information. The second check with the saved page flags is 1530 * carried out only if the first check can't determine the page status. 1531 */ 1532 for (ps = error_states;; ps++) 1533 if ((p->flags & ps->mask) == ps->res) 1534 break; 1535 1536 page_flags |= (p->flags & (1UL << PG_dirty)); 1537 1538 if (!ps->mask) 1539 for (ps = error_states;; ps++) 1540 if ((page_flags & ps->mask) == ps->res) 1541 break; 1542 return page_action(ps, p, pfn); 1543 } 1544 1545 static int try_to_split_thp_page(struct page *page) 1546 { 1547 int ret; 1548 1549 lock_page(page); 1550 ret = split_huge_page(page); 1551 unlock_page(page); 1552 1553 if (unlikely(ret)) 1554 put_page(page); 1555 1556 return ret; 1557 } 1558 1559 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, 1560 struct address_space *mapping, pgoff_t index, int flags) 1561 { 1562 struct to_kill *tk; 1563 unsigned long size = 0; 1564 1565 list_for_each_entry(tk, to_kill, nd) 1566 if (tk->size_shift) 1567 size = max(size, 1UL << tk->size_shift); 1568 1569 if (size) { 1570 /* 1571 * Unmap the largest mapping to avoid breaking up device-dax 1572 * mappings which are constant size. The actual size of the 1573 * mapping being torn down is communicated in siginfo, see 1574 * kill_proc() 1575 */ 1576 loff_t start = (index << PAGE_SHIFT) & ~(size - 1); 1577 1578 unmap_mapping_range(mapping, start, size, 0); 1579 } 1580 1581 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); 1582 } 1583 1584 static int mf_generic_kill_procs(unsigned long long pfn, int flags, 1585 struct dev_pagemap *pgmap) 1586 { 1587 struct page *page = pfn_to_page(pfn); 1588 LIST_HEAD(to_kill); 1589 dax_entry_t cookie; 1590 int rc = 0; 1591 1592 /* 1593 * Pages instantiated by device-dax (not filesystem-dax) 1594 * may be compound pages. 1595 */ 1596 page = compound_head(page); 1597 1598 /* 1599 * Prevent the inode from being freed while we are interrogating 1600 * the address_space, typically this would be handled by 1601 * lock_page(), but dax pages do not use the page lock. This 1602 * also prevents changes to the mapping of this pfn until 1603 * poison signaling is complete. 1604 */ 1605 cookie = dax_lock_page(page); 1606 if (!cookie) 1607 return -EBUSY; 1608 1609 if (hwpoison_filter(page)) { 1610 rc = -EOPNOTSUPP; 1611 goto unlock; 1612 } 1613 1614 switch (pgmap->type) { 1615 case MEMORY_DEVICE_PRIVATE: 1616 case MEMORY_DEVICE_COHERENT: 1617 /* 1618 * TODO: Handle device pages which may need coordination 1619 * with device-side memory. 1620 */ 1621 rc = -ENXIO; 1622 goto unlock; 1623 default: 1624 break; 1625 } 1626 1627 /* 1628 * Use this flag as an indication that the dax page has been 1629 * remapped UC to prevent speculative consumption of poison. 1630 */ 1631 SetPageHWPoison(page); 1632 1633 /* 1634 * Unlike System-RAM there is no possibility to swap in a 1635 * different physical page at a given virtual address, so all 1636 * userspace consumption of ZONE_DEVICE memory necessitates 1637 * SIGBUS (i.e. MF_MUST_KILL) 1638 */ 1639 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1640 collect_procs(page, &to_kill, true); 1641 1642 unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); 1643 unlock: 1644 dax_unlock_page(page, cookie); 1645 return rc; 1646 } 1647 1648 #ifdef CONFIG_FS_DAX 1649 /** 1650 * mf_dax_kill_procs - Collect and kill processes who are using this file range 1651 * @mapping: address_space of the file in use 1652 * @index: start pgoff of the range within the file 1653 * @count: length of the range, in unit of PAGE_SIZE 1654 * @mf_flags: memory failure flags 1655 */ 1656 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, 1657 unsigned long count, int mf_flags) 1658 { 1659 LIST_HEAD(to_kill); 1660 dax_entry_t cookie; 1661 struct page *page; 1662 size_t end = index + count; 1663 1664 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1665 1666 for (; index < end; index++) { 1667 page = NULL; 1668 cookie = dax_lock_mapping_entry(mapping, index, &page); 1669 if (!cookie) 1670 return -EBUSY; 1671 if (!page) 1672 goto unlock; 1673 1674 SetPageHWPoison(page); 1675 1676 collect_procs_fsdax(page, mapping, index, &to_kill); 1677 unmap_and_kill(&to_kill, page_to_pfn(page), mapping, 1678 index, mf_flags); 1679 unlock: 1680 dax_unlock_mapping_entry(mapping, index, cookie); 1681 } 1682 return 0; 1683 } 1684 EXPORT_SYMBOL_GPL(mf_dax_kill_procs); 1685 #endif /* CONFIG_FS_DAX */ 1686 1687 #ifdef CONFIG_HUGETLB_PAGE 1688 /* 1689 * Struct raw_hwp_page represents information about "raw error page", 1690 * constructing singly linked list from ->_hugetlb_hwpoison field of folio. 1691 */ 1692 struct raw_hwp_page { 1693 struct llist_node node; 1694 struct page *page; 1695 }; 1696 1697 static inline struct llist_head *raw_hwp_list_head(struct page *hpage) 1698 { 1699 return (struct llist_head *)&page_folio(hpage)->_hugetlb_hwpoison; 1700 } 1701 1702 static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag) 1703 { 1704 struct llist_head *head; 1705 struct llist_node *t, *tnode; 1706 unsigned long count = 0; 1707 1708 head = raw_hwp_list_head(hpage); 1709 llist_for_each_safe(tnode, t, head->first) { 1710 struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); 1711 1712 if (move_flag) 1713 SetPageHWPoison(p->page); 1714 else 1715 num_poisoned_pages_sub(page_to_pfn(p->page), 1); 1716 kfree(p); 1717 count++; 1718 } 1719 llist_del_all(head); 1720 return count; 1721 } 1722 1723 static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page) 1724 { 1725 struct llist_head *head; 1726 struct raw_hwp_page *raw_hwp; 1727 struct llist_node *t, *tnode; 1728 int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0; 1729 1730 /* 1731 * Once the hwpoison hugepage has lost reliable raw error info, 1732 * there is little meaning to keep additional error info precisely, 1733 * so skip to add additional raw error info. 1734 */ 1735 if (HPageRawHwpUnreliable(hpage)) 1736 return -EHWPOISON; 1737 head = raw_hwp_list_head(hpage); 1738 llist_for_each_safe(tnode, t, head->first) { 1739 struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node); 1740 1741 if (p->page == page) 1742 return -EHWPOISON; 1743 } 1744 1745 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); 1746 if (raw_hwp) { 1747 raw_hwp->page = page; 1748 llist_add(&raw_hwp->node, head); 1749 /* the first error event will be counted in action_result(). */ 1750 if (ret) 1751 num_poisoned_pages_inc(page_to_pfn(page)); 1752 } else { 1753 /* 1754 * Failed to save raw error info. We no longer trace all 1755 * hwpoisoned subpages, and we need refuse to free/dissolve 1756 * this hwpoisoned hugepage. 1757 */ 1758 SetHPageRawHwpUnreliable(hpage); 1759 /* 1760 * Once HPageRawHwpUnreliable is set, raw_hwp_page is not 1761 * used any more, so free it. 1762 */ 1763 __free_raw_hwp_pages(hpage, false); 1764 } 1765 return ret; 1766 } 1767 1768 static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag) 1769 { 1770 /* 1771 * HPageVmemmapOptimized hugepages can't be freed because struct 1772 * pages for tail pages are required but they don't exist. 1773 */ 1774 if (move_flag && HPageVmemmapOptimized(hpage)) 1775 return 0; 1776 1777 /* 1778 * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by 1779 * definition. 1780 */ 1781 if (HPageRawHwpUnreliable(hpage)) 1782 return 0; 1783 1784 return __free_raw_hwp_pages(hpage, move_flag); 1785 } 1786 1787 void hugetlb_clear_page_hwpoison(struct page *hpage) 1788 { 1789 if (HPageRawHwpUnreliable(hpage)) 1790 return; 1791 ClearPageHWPoison(hpage); 1792 free_raw_hwp_pages(hpage, true); 1793 } 1794 1795 /* 1796 * Called from hugetlb code with hugetlb_lock held. 1797 * 1798 * Return values: 1799 * 0 - free hugepage 1800 * 1 - in-use hugepage 1801 * 2 - not a hugepage 1802 * -EBUSY - the hugepage is busy (try to retry) 1803 * -EHWPOISON - the hugepage is already hwpoisoned 1804 */ 1805 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, 1806 bool *migratable_cleared) 1807 { 1808 struct page *page = pfn_to_page(pfn); 1809 struct page *head = compound_head(page); 1810 int ret = 2; /* fallback to normal page handling */ 1811 bool count_increased = false; 1812 1813 if (!PageHeadHuge(head)) 1814 goto out; 1815 1816 if (flags & MF_COUNT_INCREASED) { 1817 ret = 1; 1818 count_increased = true; 1819 } else if (HPageFreed(head)) { 1820 ret = 0; 1821 } else if (HPageMigratable(head)) { 1822 ret = get_page_unless_zero(head); 1823 if (ret) 1824 count_increased = true; 1825 } else { 1826 ret = -EBUSY; 1827 if (!(flags & MF_NO_RETRY)) 1828 goto out; 1829 } 1830 1831 if (hugetlb_set_page_hwpoison(head, page)) { 1832 ret = -EHWPOISON; 1833 goto out; 1834 } 1835 1836 /* 1837 * Clearing HPageMigratable for hwpoisoned hugepages to prevent them 1838 * from being migrated by memory hotremove. 1839 */ 1840 if (count_increased && HPageMigratable(head)) { 1841 ClearHPageMigratable(head); 1842 *migratable_cleared = true; 1843 } 1844 1845 return ret; 1846 out: 1847 if (count_increased) 1848 put_page(head); 1849 return ret; 1850 } 1851 1852 /* 1853 * Taking refcount of hugetlb pages needs extra care about race conditions 1854 * with basic operations like hugepage allocation/free/demotion. 1855 * So some of prechecks for hwpoison (pinning, and testing/setting 1856 * PageHWPoison) should be done in single hugetlb_lock range. 1857 */ 1858 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) 1859 { 1860 int res; 1861 struct page *p = pfn_to_page(pfn); 1862 struct page *head; 1863 unsigned long page_flags; 1864 bool migratable_cleared = false; 1865 1866 *hugetlb = 1; 1867 retry: 1868 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared); 1869 if (res == 2) { /* fallback to normal page handling */ 1870 *hugetlb = 0; 1871 return 0; 1872 } else if (res == -EHWPOISON) { 1873 pr_err("%#lx: already hardware poisoned\n", pfn); 1874 if (flags & MF_ACTION_REQUIRED) { 1875 head = compound_head(p); 1876 res = kill_accessing_process(current, page_to_pfn(head), flags); 1877 } 1878 return res; 1879 } else if (res == -EBUSY) { 1880 if (!(flags & MF_NO_RETRY)) { 1881 flags |= MF_NO_RETRY; 1882 goto retry; 1883 } 1884 return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 1885 } 1886 1887 head = compound_head(p); 1888 lock_page(head); 1889 1890 if (hwpoison_filter(p)) { 1891 hugetlb_clear_page_hwpoison(head); 1892 if (migratable_cleared) 1893 SetHPageMigratable(head); 1894 unlock_page(head); 1895 if (res == 1) 1896 put_page(head); 1897 return -EOPNOTSUPP; 1898 } 1899 1900 /* 1901 * Handling free hugepage. The possible race with hugepage allocation 1902 * or demotion can be prevented by PageHWPoison flag. 1903 */ 1904 if (res == 0) { 1905 unlock_page(head); 1906 if (__page_handle_poison(p) >= 0) { 1907 page_ref_inc(p); 1908 res = MF_RECOVERED; 1909 } else { 1910 res = MF_FAILED; 1911 } 1912 return action_result(pfn, MF_MSG_FREE_HUGE, res); 1913 } 1914 1915 page_flags = head->flags; 1916 1917 if (!hwpoison_user_mappings(p, pfn, flags, head)) { 1918 unlock_page(head); 1919 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 1920 } 1921 1922 return identify_page_state(pfn, p, page_flags); 1923 } 1924 1925 #else 1926 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) 1927 { 1928 return 0; 1929 } 1930 1931 static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag) 1932 { 1933 return 0; 1934 } 1935 #endif /* CONFIG_HUGETLB_PAGE */ 1936 1937 /* Drop the extra refcount in case we come from madvise() */ 1938 static void put_ref_page(unsigned long pfn, int flags) 1939 { 1940 struct page *page; 1941 1942 if (!(flags & MF_COUNT_INCREASED)) 1943 return; 1944 1945 page = pfn_to_page(pfn); 1946 if (page) 1947 put_page(page); 1948 } 1949 1950 static int memory_failure_dev_pagemap(unsigned long pfn, int flags, 1951 struct dev_pagemap *pgmap) 1952 { 1953 int rc = -ENXIO; 1954 1955 put_ref_page(pfn, flags); 1956 1957 /* device metadata space is not recoverable */ 1958 if (!pgmap_pfn_valid(pgmap, pfn)) 1959 goto out; 1960 1961 /* 1962 * Call driver's implementation to handle the memory failure, otherwise 1963 * fall back to generic handler. 1964 */ 1965 if (pgmap_has_memory_failure(pgmap)) { 1966 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); 1967 /* 1968 * Fall back to generic handler too if operation is not 1969 * supported inside the driver/device/filesystem. 1970 */ 1971 if (rc != -EOPNOTSUPP) 1972 goto out; 1973 } 1974 1975 rc = mf_generic_kill_procs(pfn, flags, pgmap); 1976 out: 1977 /* drop pgmap ref acquired in caller */ 1978 put_dev_pagemap(pgmap); 1979 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); 1980 return rc; 1981 } 1982 1983 static DEFINE_MUTEX(mf_mutex); 1984 1985 /** 1986 * memory_failure - Handle memory failure of a page. 1987 * @pfn: Page Number of the corrupted page 1988 * @flags: fine tune action taken 1989 * 1990 * This function is called by the low level machine check code 1991 * of an architecture when it detects hardware memory corruption 1992 * of a page. It tries its best to recover, which includes 1993 * dropping pages, killing processes etc. 1994 * 1995 * The function is primarily of use for corruptions that 1996 * happen outside the current execution context (e.g. when 1997 * detected by a background scrubber) 1998 * 1999 * Must run in process context (e.g. a work queue) with interrupts 2000 * enabled and no spinlocks hold. 2001 * 2002 * Return: 0 for successfully handled the memory error, 2003 * -EOPNOTSUPP for hwpoison_filter() filtered the error event, 2004 * < 0(except -EOPNOTSUPP) on failure. 2005 */ 2006 int memory_failure(unsigned long pfn, int flags) 2007 { 2008 struct page *p; 2009 struct page *hpage; 2010 struct dev_pagemap *pgmap; 2011 int res = 0; 2012 unsigned long page_flags; 2013 bool retry = true; 2014 int hugetlb = 0; 2015 2016 if (!sysctl_memory_failure_recovery) 2017 panic("Memory failure on page %lx", pfn); 2018 2019 mutex_lock(&mf_mutex); 2020 2021 if (!(flags & MF_SW_SIMULATED)) 2022 hw_memory_failure = true; 2023 2024 p = pfn_to_online_page(pfn); 2025 if (!p) { 2026 res = arch_memory_failure(pfn, flags); 2027 if (res == 0) 2028 goto unlock_mutex; 2029 2030 if (pfn_valid(pfn)) { 2031 pgmap = get_dev_pagemap(pfn, NULL); 2032 if (pgmap) { 2033 res = memory_failure_dev_pagemap(pfn, flags, 2034 pgmap); 2035 goto unlock_mutex; 2036 } 2037 } 2038 pr_err("%#lx: memory outside kernel control\n", pfn); 2039 res = -ENXIO; 2040 goto unlock_mutex; 2041 } 2042 2043 try_again: 2044 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb); 2045 if (hugetlb) 2046 goto unlock_mutex; 2047 2048 if (TestSetPageHWPoison(p)) { 2049 pr_err("%#lx: already hardware poisoned\n", pfn); 2050 res = -EHWPOISON; 2051 if (flags & MF_ACTION_REQUIRED) 2052 res = kill_accessing_process(current, pfn, flags); 2053 if (flags & MF_COUNT_INCREASED) 2054 put_page(p); 2055 goto unlock_mutex; 2056 } 2057 2058 hpage = compound_head(p); 2059 2060 /* 2061 * We need/can do nothing about count=0 pages. 2062 * 1) it's a free page, and therefore in safe hand: 2063 * check_new_page() will be the gate keeper. 2064 * 2) it's part of a non-compound high order page. 2065 * Implies some kernel user: cannot stop them from 2066 * R/W the page; let's pray that the page has been 2067 * used and will be freed some time later. 2068 * In fact it's dangerous to directly bump up page count from 0, 2069 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. 2070 */ 2071 if (!(flags & MF_COUNT_INCREASED)) { 2072 res = get_hwpoison_page(p, flags); 2073 if (!res) { 2074 if (is_free_buddy_page(p)) { 2075 if (take_page_off_buddy(p)) { 2076 page_ref_inc(p); 2077 res = MF_RECOVERED; 2078 } else { 2079 /* We lost the race, try again */ 2080 if (retry) { 2081 ClearPageHWPoison(p); 2082 retry = false; 2083 goto try_again; 2084 } 2085 res = MF_FAILED; 2086 } 2087 res = action_result(pfn, MF_MSG_BUDDY, res); 2088 } else { 2089 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); 2090 } 2091 goto unlock_mutex; 2092 } else if (res < 0) { 2093 res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 2094 goto unlock_mutex; 2095 } 2096 } 2097 2098 if (PageTransHuge(hpage)) { 2099 /* 2100 * The flag must be set after the refcount is bumped 2101 * otherwise it may race with THP split. 2102 * And the flag can't be set in get_hwpoison_page() since 2103 * it is called by soft offline too and it is just called 2104 * for !MF_COUNT_INCREASE. So here seems to be the best 2105 * place. 2106 * 2107 * Don't need care about the above error handling paths for 2108 * get_hwpoison_page() since they handle either free page 2109 * or unhandlable page. The refcount is bumped iff the 2110 * page is a valid handlable page. 2111 */ 2112 SetPageHasHWPoisoned(hpage); 2113 if (try_to_split_thp_page(p) < 0) { 2114 res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); 2115 goto unlock_mutex; 2116 } 2117 VM_BUG_ON_PAGE(!page_count(p), p); 2118 } 2119 2120 /* 2121 * We ignore non-LRU pages for good reasons. 2122 * - PG_locked is only well defined for LRU pages and a few others 2123 * - to avoid races with __SetPageLocked() 2124 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 2125 * The check (unnecessarily) ignores LRU pages being isolated and 2126 * walked by the page reclaim code, however that's not a big loss. 2127 */ 2128 shake_page(p); 2129 2130 lock_page(p); 2131 2132 /* 2133 * We're only intended to deal with the non-Compound page here. 2134 * However, the page could have changed compound pages due to 2135 * race window. If this happens, we could try again to hopefully 2136 * handle the page next round. 2137 */ 2138 if (PageCompound(p)) { 2139 if (retry) { 2140 ClearPageHWPoison(p); 2141 unlock_page(p); 2142 put_page(p); 2143 flags &= ~MF_COUNT_INCREASED; 2144 retry = false; 2145 goto try_again; 2146 } 2147 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); 2148 goto unlock_page; 2149 } 2150 2151 /* 2152 * We use page flags to determine what action should be taken, but 2153 * the flags can be modified by the error containment action. One 2154 * example is an mlocked page, where PG_mlocked is cleared by 2155 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 2156 * correctly, we save a copy of the page flags at this time. 2157 */ 2158 page_flags = p->flags; 2159 2160 if (hwpoison_filter(p)) { 2161 ClearPageHWPoison(p); 2162 unlock_page(p); 2163 put_page(p); 2164 res = -EOPNOTSUPP; 2165 goto unlock_mutex; 2166 } 2167 2168 /* 2169 * __munlock_pagevec may clear a writeback page's LRU flag without 2170 * page_lock. We need wait writeback completion for this page or it 2171 * may trigger vfs BUG while evict inode. 2172 */ 2173 if (!PageLRU(p) && !PageWriteback(p)) 2174 goto identify_page_state; 2175 2176 /* 2177 * It's very difficult to mess with pages currently under IO 2178 * and in many cases impossible, so we just avoid it here. 2179 */ 2180 wait_on_page_writeback(p); 2181 2182 /* 2183 * Now take care of user space mappings. 2184 * Abort on fail: __filemap_remove_folio() assumes unmapped page. 2185 */ 2186 if (!hwpoison_user_mappings(p, pfn, flags, p)) { 2187 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 2188 goto unlock_page; 2189 } 2190 2191 /* 2192 * Torn down by someone else? 2193 */ 2194 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 2195 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); 2196 goto unlock_page; 2197 } 2198 2199 identify_page_state: 2200 res = identify_page_state(pfn, p, page_flags); 2201 mutex_unlock(&mf_mutex); 2202 return res; 2203 unlock_page: 2204 unlock_page(p); 2205 unlock_mutex: 2206 mutex_unlock(&mf_mutex); 2207 return res; 2208 } 2209 EXPORT_SYMBOL_GPL(memory_failure); 2210 2211 #define MEMORY_FAILURE_FIFO_ORDER 4 2212 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) 2213 2214 struct memory_failure_entry { 2215 unsigned long pfn; 2216 int flags; 2217 }; 2218 2219 struct memory_failure_cpu { 2220 DECLARE_KFIFO(fifo, struct memory_failure_entry, 2221 MEMORY_FAILURE_FIFO_SIZE); 2222 spinlock_t lock; 2223 struct work_struct work; 2224 }; 2225 2226 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); 2227 2228 /** 2229 * memory_failure_queue - Schedule handling memory failure of a page. 2230 * @pfn: Page Number of the corrupted page 2231 * @flags: Flags for memory failure handling 2232 * 2233 * This function is called by the low level hardware error handler 2234 * when it detects hardware memory corruption of a page. It schedules 2235 * the recovering of error page, including dropping pages, killing 2236 * processes etc. 2237 * 2238 * The function is primarily of use for corruptions that 2239 * happen outside the current execution context (e.g. when 2240 * detected by a background scrubber) 2241 * 2242 * Can run in IRQ context. 2243 */ 2244 void memory_failure_queue(unsigned long pfn, int flags) 2245 { 2246 struct memory_failure_cpu *mf_cpu; 2247 unsigned long proc_flags; 2248 struct memory_failure_entry entry = { 2249 .pfn = pfn, 2250 .flags = flags, 2251 }; 2252 2253 mf_cpu = &get_cpu_var(memory_failure_cpu); 2254 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 2255 if (kfifo_put(&mf_cpu->fifo, entry)) 2256 schedule_work_on(smp_processor_id(), &mf_cpu->work); 2257 else 2258 pr_err("buffer overflow when queuing memory failure at %#lx\n", 2259 pfn); 2260 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 2261 put_cpu_var(memory_failure_cpu); 2262 } 2263 EXPORT_SYMBOL_GPL(memory_failure_queue); 2264 2265 static void memory_failure_work_func(struct work_struct *work) 2266 { 2267 struct memory_failure_cpu *mf_cpu; 2268 struct memory_failure_entry entry = { 0, }; 2269 unsigned long proc_flags; 2270 int gotten; 2271 2272 mf_cpu = container_of(work, struct memory_failure_cpu, work); 2273 for (;;) { 2274 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 2275 gotten = kfifo_get(&mf_cpu->fifo, &entry); 2276 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 2277 if (!gotten) 2278 break; 2279 if (entry.flags & MF_SOFT_OFFLINE) 2280 soft_offline_page(entry.pfn, entry.flags); 2281 else 2282 memory_failure(entry.pfn, entry.flags); 2283 } 2284 } 2285 2286 /* 2287 * Process memory_failure work queued on the specified CPU. 2288 * Used to avoid return-to-userspace racing with the memory_failure workqueue. 2289 */ 2290 void memory_failure_queue_kick(int cpu) 2291 { 2292 struct memory_failure_cpu *mf_cpu; 2293 2294 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 2295 cancel_work_sync(&mf_cpu->work); 2296 memory_failure_work_func(&mf_cpu->work); 2297 } 2298 2299 static int __init memory_failure_init(void) 2300 { 2301 struct memory_failure_cpu *mf_cpu; 2302 int cpu; 2303 2304 for_each_possible_cpu(cpu) { 2305 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 2306 spin_lock_init(&mf_cpu->lock); 2307 INIT_KFIFO(mf_cpu->fifo); 2308 INIT_WORK(&mf_cpu->work, memory_failure_work_func); 2309 } 2310 2311 return 0; 2312 } 2313 core_initcall(memory_failure_init); 2314 2315 #undef pr_fmt 2316 #define pr_fmt(fmt) "" fmt 2317 #define unpoison_pr_info(fmt, pfn, rs) \ 2318 ({ \ 2319 if (__ratelimit(rs)) \ 2320 pr_info(fmt, pfn); \ 2321 }) 2322 2323 /** 2324 * unpoison_memory - Unpoison a previously poisoned page 2325 * @pfn: Page number of the to be unpoisoned page 2326 * 2327 * Software-unpoison a page that has been poisoned by 2328 * memory_failure() earlier. 2329 * 2330 * This is only done on the software-level, so it only works 2331 * for linux injected failures, not real hardware failures 2332 * 2333 * Returns 0 for success, otherwise -errno. 2334 */ 2335 int unpoison_memory(unsigned long pfn) 2336 { 2337 struct page *page; 2338 struct page *p; 2339 int ret = -EBUSY; 2340 int freeit = 0; 2341 unsigned long count = 1; 2342 bool huge = false; 2343 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, 2344 DEFAULT_RATELIMIT_BURST); 2345 2346 if (!pfn_valid(pfn)) 2347 return -ENXIO; 2348 2349 p = pfn_to_page(pfn); 2350 page = compound_head(p); 2351 2352 mutex_lock(&mf_mutex); 2353 2354 if (hw_memory_failure) { 2355 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", 2356 pfn, &unpoison_rs); 2357 ret = -EOPNOTSUPP; 2358 goto unlock_mutex; 2359 } 2360 2361 if (!PageHWPoison(p)) { 2362 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", 2363 pfn, &unpoison_rs); 2364 goto unlock_mutex; 2365 } 2366 2367 if (page_count(page) > 1) { 2368 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", 2369 pfn, &unpoison_rs); 2370 goto unlock_mutex; 2371 } 2372 2373 if (page_mapped(page)) { 2374 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", 2375 pfn, &unpoison_rs); 2376 goto unlock_mutex; 2377 } 2378 2379 if (page_mapping(page)) { 2380 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", 2381 pfn, &unpoison_rs); 2382 goto unlock_mutex; 2383 } 2384 2385 if (PageSlab(page) || PageTable(page) || PageReserved(page)) 2386 goto unlock_mutex; 2387 2388 ret = get_hwpoison_page(p, MF_UNPOISON); 2389 if (!ret) { 2390 if (PageHuge(p)) { 2391 huge = true; 2392 count = free_raw_hwp_pages(page, false); 2393 if (count == 0) { 2394 ret = -EBUSY; 2395 goto unlock_mutex; 2396 } 2397 } 2398 ret = TestClearPageHWPoison(page) ? 0 : -EBUSY; 2399 } else if (ret < 0) { 2400 if (ret == -EHWPOISON) { 2401 ret = put_page_back_buddy(p) ? 0 : -EBUSY; 2402 } else 2403 unpoison_pr_info("Unpoison: failed to grab page %#lx\n", 2404 pfn, &unpoison_rs); 2405 } else { 2406 if (PageHuge(p)) { 2407 huge = true; 2408 count = free_raw_hwp_pages(page, false); 2409 if (count == 0) { 2410 ret = -EBUSY; 2411 put_page(page); 2412 goto unlock_mutex; 2413 } 2414 } 2415 freeit = !!TestClearPageHWPoison(p); 2416 2417 put_page(page); 2418 if (freeit) { 2419 put_page(page); 2420 ret = 0; 2421 } 2422 } 2423 2424 unlock_mutex: 2425 mutex_unlock(&mf_mutex); 2426 if (!ret || freeit) { 2427 if (!huge) 2428 num_poisoned_pages_sub(pfn, 1); 2429 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", 2430 page_to_pfn(p), &unpoison_rs); 2431 } 2432 return ret; 2433 } 2434 EXPORT_SYMBOL(unpoison_memory); 2435 2436 static bool isolate_page(struct page *page, struct list_head *pagelist) 2437 { 2438 bool isolated = false; 2439 2440 if (PageHuge(page)) { 2441 isolated = !isolate_hugetlb(page, pagelist); 2442 } else { 2443 bool lru = !__PageMovable(page); 2444 2445 if (lru) 2446 isolated = !isolate_lru_page(page); 2447 else 2448 isolated = !isolate_movable_page(page, 2449 ISOLATE_UNEVICTABLE); 2450 2451 if (isolated) { 2452 list_add(&page->lru, pagelist); 2453 if (lru) 2454 inc_node_page_state(page, NR_ISOLATED_ANON + 2455 page_is_file_lru(page)); 2456 } 2457 } 2458 2459 /* 2460 * If we succeed to isolate the page, we grabbed another refcount on 2461 * the page, so we can safely drop the one we got from get_any_pages(). 2462 * If we failed to isolate the page, it means that we cannot go further 2463 * and we will return an error, so drop the reference we got from 2464 * get_any_pages() as well. 2465 */ 2466 put_page(page); 2467 return isolated; 2468 } 2469 2470 /* 2471 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. 2472 * If the page is a non-dirty unmapped page-cache page, it simply invalidates. 2473 * If the page is mapped, it migrates the contents over. 2474 */ 2475 static int soft_offline_in_use_page(struct page *page) 2476 { 2477 long ret = 0; 2478 unsigned long pfn = page_to_pfn(page); 2479 struct page *hpage = compound_head(page); 2480 char const *msg_page[] = {"page", "hugepage"}; 2481 bool huge = PageHuge(page); 2482 LIST_HEAD(pagelist); 2483 struct migration_target_control mtc = { 2484 .nid = NUMA_NO_NODE, 2485 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 2486 }; 2487 2488 if (!huge && PageTransHuge(hpage)) { 2489 if (try_to_split_thp_page(page)) { 2490 pr_info("soft offline: %#lx: thp split failed\n", pfn); 2491 return -EBUSY; 2492 } 2493 hpage = page; 2494 } 2495 2496 lock_page(page); 2497 if (!PageHuge(page)) 2498 wait_on_page_writeback(page); 2499 if (PageHWPoison(page)) { 2500 unlock_page(page); 2501 put_page(page); 2502 pr_info("soft offline: %#lx page already poisoned\n", pfn); 2503 return 0; 2504 } 2505 2506 if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page)) 2507 /* 2508 * Try to invalidate first. This should work for 2509 * non dirty unmapped page cache pages. 2510 */ 2511 ret = invalidate_inode_page(page); 2512 unlock_page(page); 2513 2514 if (ret) { 2515 pr_info("soft_offline: %#lx: invalidated\n", pfn); 2516 page_handle_poison(page, false, true); 2517 return 0; 2518 } 2519 2520 if (isolate_page(hpage, &pagelist)) { 2521 ret = migrate_pages(&pagelist, alloc_migration_target, NULL, 2522 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); 2523 if (!ret) { 2524 bool release = !huge; 2525 2526 if (!page_handle_poison(page, huge, release)) 2527 ret = -EBUSY; 2528 } else { 2529 if (!list_empty(&pagelist)) 2530 putback_movable_pages(&pagelist); 2531 2532 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n", 2533 pfn, msg_page[huge], ret, &page->flags); 2534 if (ret > 0) 2535 ret = -EBUSY; 2536 } 2537 } else { 2538 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", 2539 pfn, msg_page[huge], page_count(page), &page->flags); 2540 ret = -EBUSY; 2541 } 2542 return ret; 2543 } 2544 2545 /** 2546 * soft_offline_page - Soft offline a page. 2547 * @pfn: pfn to soft-offline 2548 * @flags: flags. Same as memory_failure(). 2549 * 2550 * Returns 0 on success 2551 * -EOPNOTSUPP for hwpoison_filter() filtered the error event 2552 * < 0 otherwise negated errno. 2553 * 2554 * Soft offline a page, by migration or invalidation, 2555 * without killing anything. This is for the case when 2556 * a page is not corrupted yet (so it's still valid to access), 2557 * but has had a number of corrected errors and is better taken 2558 * out. 2559 * 2560 * The actual policy on when to do that is maintained by 2561 * user space. 2562 * 2563 * This should never impact any application or cause data loss, 2564 * however it might take some time. 2565 * 2566 * This is not a 100% solution for all memory, but tries to be 2567 * ``good enough'' for the majority of memory. 2568 */ 2569 int soft_offline_page(unsigned long pfn, int flags) 2570 { 2571 int ret; 2572 bool try_again = true; 2573 struct page *page; 2574 2575 if (!pfn_valid(pfn)) { 2576 WARN_ON_ONCE(flags & MF_COUNT_INCREASED); 2577 return -ENXIO; 2578 } 2579 2580 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ 2581 page = pfn_to_online_page(pfn); 2582 if (!page) { 2583 put_ref_page(pfn, flags); 2584 return -EIO; 2585 } 2586 2587 mutex_lock(&mf_mutex); 2588 2589 if (PageHWPoison(page)) { 2590 pr_info("%s: %#lx page already poisoned\n", __func__, pfn); 2591 put_ref_page(pfn, flags); 2592 mutex_unlock(&mf_mutex); 2593 return 0; 2594 } 2595 2596 retry: 2597 get_online_mems(); 2598 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE); 2599 put_online_mems(); 2600 2601 if (hwpoison_filter(page)) { 2602 if (ret > 0) 2603 put_page(page); 2604 2605 mutex_unlock(&mf_mutex); 2606 return -EOPNOTSUPP; 2607 } 2608 2609 if (ret > 0) { 2610 ret = soft_offline_in_use_page(page); 2611 } else if (ret == 0) { 2612 if (!page_handle_poison(page, true, false) && try_again) { 2613 try_again = false; 2614 flags &= ~MF_COUNT_INCREASED; 2615 goto retry; 2616 } 2617 } 2618 2619 mutex_unlock(&mf_mutex); 2620 2621 return ret; 2622 } 2623