1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008, 2009 Intel Corporation 4 * Authors: Andi Kleen, Fengguang Wu 5 * 6 * High level machine check handler. Handles pages reported by the 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache 8 * failure. 9 * 10 * In addition there is a "soft offline" entry point that allows stop using 11 * not-yet-corrupted-by-suspicious pages without killing anything. 12 * 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 15 * other VM users, because memory failures could happen anytime and 16 * anywhere. This could violate some of their assumptions. This is why 17 * this code has to be extremely careful. Generally it tries to use 18 * normal locking rules, as in get the standard locks, even if that means 19 * the error handling takes potentially a long time. 20 * 21 * It can be very tempting to add handling for obscure cases here. 22 * In general any code for handling new cases should only be added iff: 23 * - You know how to test it. 24 * - You have a test that can be added to mce-test 25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/vm/page-types when running a real workload. 28 * 29 * There are several operations here with exponential complexity because 30 * of unsuitable VM data structures. For example the operation to map back 31 * from RMAP chains to processes has to walk the complete process list and 32 * has non linear complexity with the number. But since memory corruptions 33 * are rare we hope to get away with this. This avoids impacting the core 34 * VM. 35 */ 36 #include <linux/kernel.h> 37 #include <linux/mm.h> 38 #include <linux/page-flags.h> 39 #include <linux/kernel-page-flags.h> 40 #include <linux/sched/signal.h> 41 #include <linux/sched/task.h> 42 #include <linux/dax.h> 43 #include <linux/ksm.h> 44 #include <linux/rmap.h> 45 #include <linux/export.h> 46 #include <linux/pagemap.h> 47 #include <linux/swap.h> 48 #include <linux/backing-dev.h> 49 #include <linux/migrate.h> 50 #include <linux/suspend.h> 51 #include <linux/slab.h> 52 #include <linux/swapops.h> 53 #include <linux/hugetlb.h> 54 #include <linux/memory_hotplug.h> 55 #include <linux/mm_inline.h> 56 #include <linux/memremap.h> 57 #include <linux/kfifo.h> 58 #include <linux/ratelimit.h> 59 #include <linux/page-isolation.h> 60 #include <linux/pagewalk.h> 61 #include <linux/shmem_fs.h> 62 #include "internal.h" 63 #include "ras/ras_event.h" 64 65 int sysctl_memory_failure_early_kill __read_mostly = 0; 66 67 int sysctl_memory_failure_recovery __read_mostly = 1; 68 69 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); 70 71 static bool __page_handle_poison(struct page *page) 72 { 73 int ret; 74 75 zone_pcp_disable(page_zone(page)); 76 ret = dissolve_free_huge_page(page); 77 if (!ret) 78 ret = take_page_off_buddy(page); 79 zone_pcp_enable(page_zone(page)); 80 81 return ret > 0; 82 } 83 84 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) 85 { 86 if (hugepage_or_freepage) { 87 /* 88 * Doing this check for free pages is also fine since dissolve_free_huge_page 89 * returns 0 for non-hugetlb pages as well. 90 */ 91 if (!__page_handle_poison(page)) 92 /* 93 * We could fail to take off the target page from buddy 94 * for example due to racy page allocation, but that's 95 * acceptable because soft-offlined page is not broken 96 * and if someone really want to use it, they should 97 * take it. 98 */ 99 return false; 100 } 101 102 SetPageHWPoison(page); 103 if (release) 104 put_page(page); 105 page_ref_inc(page); 106 num_poisoned_pages_inc(); 107 108 return true; 109 } 110 111 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) 112 113 u32 hwpoison_filter_enable = 0; 114 u32 hwpoison_filter_dev_major = ~0U; 115 u32 hwpoison_filter_dev_minor = ~0U; 116 u64 hwpoison_filter_flags_mask; 117 u64 hwpoison_filter_flags_value; 118 EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 119 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 120 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 121 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 122 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 123 124 static int hwpoison_filter_dev(struct page *p) 125 { 126 struct address_space *mapping; 127 dev_t dev; 128 129 if (hwpoison_filter_dev_major == ~0U && 130 hwpoison_filter_dev_minor == ~0U) 131 return 0; 132 133 mapping = page_mapping(p); 134 if (mapping == NULL || mapping->host == NULL) 135 return -EINVAL; 136 137 dev = mapping->host->i_sb->s_dev; 138 if (hwpoison_filter_dev_major != ~0U && 139 hwpoison_filter_dev_major != MAJOR(dev)) 140 return -EINVAL; 141 if (hwpoison_filter_dev_minor != ~0U && 142 hwpoison_filter_dev_minor != MINOR(dev)) 143 return -EINVAL; 144 145 return 0; 146 } 147 148 static int hwpoison_filter_flags(struct page *p) 149 { 150 if (!hwpoison_filter_flags_mask) 151 return 0; 152 153 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 154 hwpoison_filter_flags_value) 155 return 0; 156 else 157 return -EINVAL; 158 } 159 160 /* 161 * This allows stress tests to limit test scope to a collection of tasks 162 * by putting them under some memcg. This prevents killing unrelated/important 163 * processes such as /sbin/init. Note that the target task may share clean 164 * pages with init (eg. libc text), which is harmless. If the target task 165 * share _dirty_ pages with another task B, the test scheme must make sure B 166 * is also included in the memcg. At last, due to race conditions this filter 167 * can only guarantee that the page either belongs to the memcg tasks, or is 168 * a freed page. 169 */ 170 #ifdef CONFIG_MEMCG 171 u64 hwpoison_filter_memcg; 172 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 173 static int hwpoison_filter_task(struct page *p) 174 { 175 if (!hwpoison_filter_memcg) 176 return 0; 177 178 if (page_cgroup_ino(p) != hwpoison_filter_memcg) 179 return -EINVAL; 180 181 return 0; 182 } 183 #else 184 static int hwpoison_filter_task(struct page *p) { return 0; } 185 #endif 186 187 int hwpoison_filter(struct page *p) 188 { 189 if (!hwpoison_filter_enable) 190 return 0; 191 192 if (hwpoison_filter_dev(p)) 193 return -EINVAL; 194 195 if (hwpoison_filter_flags(p)) 196 return -EINVAL; 197 198 if (hwpoison_filter_task(p)) 199 return -EINVAL; 200 201 return 0; 202 } 203 #else 204 int hwpoison_filter(struct page *p) 205 { 206 return 0; 207 } 208 #endif 209 210 EXPORT_SYMBOL_GPL(hwpoison_filter); 211 212 /* 213 * Kill all processes that have a poisoned page mapped and then isolate 214 * the page. 215 * 216 * General strategy: 217 * Find all processes having the page mapped and kill them. 218 * But we keep a page reference around so that the page is not 219 * actually freed yet. 220 * Then stash the page away 221 * 222 * There's no convenient way to get back to mapped processes 223 * from the VMAs. So do a brute-force search over all 224 * running processes. 225 * 226 * Remember that machine checks are not common (or rather 227 * if they are common you have other problems), so this shouldn't 228 * be a performance issue. 229 * 230 * Also there are some races possible while we get from the 231 * error detection to actually handle it. 232 */ 233 234 struct to_kill { 235 struct list_head nd; 236 struct task_struct *tsk; 237 unsigned long addr; 238 short size_shift; 239 }; 240 241 /* 242 * Send all the processes who have the page mapped a signal. 243 * ``action optional'' if they are not immediately affected by the error 244 * ``action required'' if error happened in current execution context 245 */ 246 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) 247 { 248 struct task_struct *t = tk->tsk; 249 short addr_lsb = tk->size_shift; 250 int ret = 0; 251 252 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", 253 pfn, t->comm, t->pid); 254 255 if ((flags & MF_ACTION_REQUIRED) && (t == current)) 256 ret = force_sig_mceerr(BUS_MCEERR_AR, 257 (void __user *)tk->addr, addr_lsb); 258 else 259 /* 260 * Signal other processes sharing the page if they have 261 * PF_MCE_EARLY set. 262 * Don't use force here, it's convenient if the signal 263 * can be temporarily blocked. 264 * This could cause a loop when the user sets SIGBUS 265 * to SIG_IGN, but hopefully no one will do that? 266 */ 267 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, 268 addr_lsb, t); /* synchronous? */ 269 if (ret < 0) 270 pr_info("Memory failure: Error sending signal to %s:%d: %d\n", 271 t->comm, t->pid, ret); 272 return ret; 273 } 274 275 /* 276 * Unknown page type encountered. Try to check whether it can turn PageLRU by 277 * lru_add_drain_all. 278 */ 279 void shake_page(struct page *p) 280 { 281 if (PageHuge(p)) 282 return; 283 284 if (!PageSlab(p)) { 285 lru_add_drain_all(); 286 if (PageLRU(p) || is_free_buddy_page(p)) 287 return; 288 } 289 290 /* 291 * TODO: Could shrink slab caches here if a lightweight range-based 292 * shrinker will be available. 293 */ 294 } 295 EXPORT_SYMBOL_GPL(shake_page); 296 297 static unsigned long dev_pagemap_mapping_shift(struct page *page, 298 struct vm_area_struct *vma) 299 { 300 unsigned long address = vma_address(page, vma); 301 unsigned long ret = 0; 302 pgd_t *pgd; 303 p4d_t *p4d; 304 pud_t *pud; 305 pmd_t *pmd; 306 pte_t *pte; 307 308 VM_BUG_ON_VMA(address == -EFAULT, vma); 309 pgd = pgd_offset(vma->vm_mm, address); 310 if (!pgd_present(*pgd)) 311 return 0; 312 p4d = p4d_offset(pgd, address); 313 if (!p4d_present(*p4d)) 314 return 0; 315 pud = pud_offset(p4d, address); 316 if (!pud_present(*pud)) 317 return 0; 318 if (pud_devmap(*pud)) 319 return PUD_SHIFT; 320 pmd = pmd_offset(pud, address); 321 if (!pmd_present(*pmd)) 322 return 0; 323 if (pmd_devmap(*pmd)) 324 return PMD_SHIFT; 325 pte = pte_offset_map(pmd, address); 326 if (pte_present(*pte) && pte_devmap(*pte)) 327 ret = PAGE_SHIFT; 328 pte_unmap(pte); 329 return ret; 330 } 331 332 /* 333 * Failure handling: if we can't find or can't kill a process there's 334 * not much we can do. We just print a message and ignore otherwise. 335 */ 336 337 /* 338 * Schedule a process for later kill. 339 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 340 */ 341 static void add_to_kill(struct task_struct *tsk, struct page *p, 342 struct vm_area_struct *vma, 343 struct list_head *to_kill) 344 { 345 struct to_kill *tk; 346 347 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 348 if (!tk) { 349 pr_err("Memory failure: Out of memory while machine check handling\n"); 350 return; 351 } 352 353 tk->addr = page_address_in_vma(p, vma); 354 if (is_zone_device_page(p)) 355 tk->size_shift = dev_pagemap_mapping_shift(p, vma); 356 else 357 tk->size_shift = page_shift(compound_head(p)); 358 359 /* 360 * Send SIGKILL if "tk->addr == -EFAULT". Also, as 361 * "tk->size_shift" is always non-zero for !is_zone_device_page(), 362 * so "tk->size_shift == 0" effectively checks no mapping on 363 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times 364 * to a process' address space, it's possible not all N VMAs 365 * contain mappings for the page, but at least one VMA does. 366 * Only deliver SIGBUS with payload derived from the VMA that 367 * has a mapping for the page. 368 */ 369 if (tk->addr == -EFAULT) { 370 pr_info("Memory failure: Unable to find user space address %lx in %s\n", 371 page_to_pfn(p), tsk->comm); 372 } else if (tk->size_shift == 0) { 373 kfree(tk); 374 return; 375 } 376 377 get_task_struct(tsk); 378 tk->tsk = tsk; 379 list_add_tail(&tk->nd, to_kill); 380 } 381 382 /* 383 * Kill the processes that have been collected earlier. 384 * 385 * Only do anything when FORCEKILL is set, otherwise just free the 386 * list (this is used for clean pages which do not need killing) 387 * Also when FAIL is set do a force kill because something went 388 * wrong earlier. 389 */ 390 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, 391 unsigned long pfn, int flags) 392 { 393 struct to_kill *tk, *next; 394 395 list_for_each_entry_safe (tk, next, to_kill, nd) { 396 if (forcekill) { 397 /* 398 * In case something went wrong with munmapping 399 * make sure the process doesn't catch the 400 * signal and then access the memory. Just kill it. 401 */ 402 if (fail || tk->addr == -EFAULT) { 403 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 404 pfn, tk->tsk->comm, tk->tsk->pid); 405 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, 406 tk->tsk, PIDTYPE_PID); 407 } 408 409 /* 410 * In theory the process could have mapped 411 * something else on the address in-between. We could 412 * check for that, but we need to tell the 413 * process anyways. 414 */ 415 else if (kill_proc(tk, pfn, flags) < 0) 416 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n", 417 pfn, tk->tsk->comm, tk->tsk->pid); 418 } 419 put_task_struct(tk->tsk); 420 kfree(tk); 421 } 422 } 423 424 /* 425 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO) 426 * on behalf of the thread group. Return task_struct of the (first found) 427 * dedicated thread if found, and return NULL otherwise. 428 * 429 * We already hold read_lock(&tasklist_lock) in the caller, so we don't 430 * have to call rcu_read_lock/unlock() in this function. 431 */ 432 static struct task_struct *find_early_kill_thread(struct task_struct *tsk) 433 { 434 struct task_struct *t; 435 436 for_each_thread(tsk, t) { 437 if (t->flags & PF_MCE_PROCESS) { 438 if (t->flags & PF_MCE_EARLY) 439 return t; 440 } else { 441 if (sysctl_memory_failure_early_kill) 442 return t; 443 } 444 } 445 return NULL; 446 } 447 448 /* 449 * Determine whether a given process is "early kill" process which expects 450 * to be signaled when some page under the process is hwpoisoned. 451 * Return task_struct of the dedicated thread (main thread unless explicitly 452 * specified) if the process is "early kill" and otherwise returns NULL. 453 * 454 * Note that the above is true for Action Optional case. For Action Required 455 * case, it's only meaningful to the current thread which need to be signaled 456 * with SIGBUS, this error is Action Optional for other non current 457 * processes sharing the same error page,if the process is "early kill", the 458 * task_struct of the dedicated thread will also be returned. 459 */ 460 static struct task_struct *task_early_kill(struct task_struct *tsk, 461 int force_early) 462 { 463 if (!tsk->mm) 464 return NULL; 465 /* 466 * Comparing ->mm here because current task might represent 467 * a subthread, while tsk always points to the main thread. 468 */ 469 if (force_early && tsk->mm == current->mm) 470 return current; 471 472 return find_early_kill_thread(tsk); 473 } 474 475 /* 476 * Collect processes when the error hit an anonymous page. 477 */ 478 static void collect_procs_anon(struct page *page, struct list_head *to_kill, 479 int force_early) 480 { 481 struct vm_area_struct *vma; 482 struct task_struct *tsk; 483 struct anon_vma *av; 484 pgoff_t pgoff; 485 486 av = page_lock_anon_vma_read(page); 487 if (av == NULL) /* Not actually mapped anymore */ 488 return; 489 490 pgoff = page_to_pgoff(page); 491 read_lock(&tasklist_lock); 492 for_each_process (tsk) { 493 struct anon_vma_chain *vmac; 494 struct task_struct *t = task_early_kill(tsk, force_early); 495 496 if (!t) 497 continue; 498 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 499 pgoff, pgoff) { 500 vma = vmac->vma; 501 if (!page_mapped_in_vma(page, vma)) 502 continue; 503 if (vma->vm_mm == t->mm) 504 add_to_kill(t, page, vma, to_kill); 505 } 506 } 507 read_unlock(&tasklist_lock); 508 page_unlock_anon_vma_read(av); 509 } 510 511 /* 512 * Collect processes when the error hit a file mapped page. 513 */ 514 static void collect_procs_file(struct page *page, struct list_head *to_kill, 515 int force_early) 516 { 517 struct vm_area_struct *vma; 518 struct task_struct *tsk; 519 struct address_space *mapping = page->mapping; 520 pgoff_t pgoff; 521 522 i_mmap_lock_read(mapping); 523 read_lock(&tasklist_lock); 524 pgoff = page_to_pgoff(page); 525 for_each_process(tsk) { 526 struct task_struct *t = task_early_kill(tsk, force_early); 527 528 if (!t) 529 continue; 530 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, 531 pgoff) { 532 /* 533 * Send early kill signal to tasks where a vma covers 534 * the page but the corrupted page is not necessarily 535 * mapped it in its pte. 536 * Assume applications who requested early kill want 537 * to be informed of all such data corruptions. 538 */ 539 if (vma->vm_mm == t->mm) 540 add_to_kill(t, page, vma, to_kill); 541 } 542 } 543 read_unlock(&tasklist_lock); 544 i_mmap_unlock_read(mapping); 545 } 546 547 /* 548 * Collect the processes who have the corrupted page mapped to kill. 549 */ 550 static void collect_procs(struct page *page, struct list_head *tokill, 551 int force_early) 552 { 553 if (!page->mapping) 554 return; 555 556 if (PageAnon(page)) 557 collect_procs_anon(page, tokill, force_early); 558 else 559 collect_procs_file(page, tokill, force_early); 560 } 561 562 struct hwp_walk { 563 struct to_kill tk; 564 unsigned long pfn; 565 int flags; 566 }; 567 568 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift) 569 { 570 tk->addr = addr; 571 tk->size_shift = shift; 572 } 573 574 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, 575 unsigned long poisoned_pfn, struct to_kill *tk) 576 { 577 unsigned long pfn = 0; 578 579 if (pte_present(pte)) { 580 pfn = pte_pfn(pte); 581 } else { 582 swp_entry_t swp = pte_to_swp_entry(pte); 583 584 if (is_hwpoison_entry(swp)) 585 pfn = hwpoison_entry_to_pfn(swp); 586 } 587 588 if (!pfn || pfn != poisoned_pfn) 589 return 0; 590 591 set_to_kill(tk, addr, shift); 592 return 1; 593 } 594 595 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 596 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 597 struct hwp_walk *hwp) 598 { 599 pmd_t pmd = *pmdp; 600 unsigned long pfn; 601 unsigned long hwpoison_vaddr; 602 603 if (!pmd_present(pmd)) 604 return 0; 605 pfn = pmd_pfn(pmd); 606 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { 607 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT); 608 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT); 609 return 1; 610 } 611 return 0; 612 } 613 #else 614 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 615 struct hwp_walk *hwp) 616 { 617 return 0; 618 } 619 #endif 620 621 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, 622 unsigned long end, struct mm_walk *walk) 623 { 624 struct hwp_walk *hwp = (struct hwp_walk *)walk->private; 625 int ret = 0; 626 pte_t *ptep, *mapped_pte; 627 spinlock_t *ptl; 628 629 ptl = pmd_trans_huge_lock(pmdp, walk->vma); 630 if (ptl) { 631 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp); 632 spin_unlock(ptl); 633 goto out; 634 } 635 636 if (pmd_trans_unstable(pmdp)) 637 goto out; 638 639 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, 640 addr, &ptl); 641 for (; addr != end; ptep++, addr += PAGE_SIZE) { 642 ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, 643 hwp->pfn, &hwp->tk); 644 if (ret == 1) 645 break; 646 } 647 pte_unmap_unlock(mapped_pte, ptl); 648 out: 649 cond_resched(); 650 return ret; 651 } 652 653 #ifdef CONFIG_HUGETLB_PAGE 654 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, 655 unsigned long addr, unsigned long end, 656 struct mm_walk *walk) 657 { 658 struct hwp_walk *hwp = (struct hwp_walk *)walk->private; 659 pte_t pte = huge_ptep_get(ptep); 660 struct hstate *h = hstate_vma(walk->vma); 661 662 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), 663 hwp->pfn, &hwp->tk); 664 } 665 #else 666 #define hwpoison_hugetlb_range NULL 667 #endif 668 669 static const struct mm_walk_ops hwp_walk_ops = { 670 .pmd_entry = hwpoison_pte_range, 671 .hugetlb_entry = hwpoison_hugetlb_range, 672 }; 673 674 /* 675 * Sends SIGBUS to the current process with error info. 676 * 677 * This function is intended to handle "Action Required" MCEs on already 678 * hardware poisoned pages. They could happen, for example, when 679 * memory_failure() failed to unmap the error page at the first call, or 680 * when multiple local machine checks happened on different CPUs. 681 * 682 * MCE handler currently has no easy access to the error virtual address, 683 * so this function walks page table to find it. The returned virtual address 684 * is proper in most cases, but it could be wrong when the application 685 * process has multiple entries mapping the error page. 686 */ 687 static int kill_accessing_process(struct task_struct *p, unsigned long pfn, 688 int flags) 689 { 690 int ret; 691 struct hwp_walk priv = { 692 .pfn = pfn, 693 }; 694 priv.tk.tsk = p; 695 696 mmap_read_lock(p->mm); 697 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops, 698 (void *)&priv); 699 if (ret == 1 && priv.tk.addr) 700 kill_proc(&priv.tk, pfn, flags); 701 else 702 ret = 0; 703 mmap_read_unlock(p->mm); 704 return ret > 0 ? -EHWPOISON : -EFAULT; 705 } 706 707 static const char *action_name[] = { 708 [MF_IGNORED] = "Ignored", 709 [MF_FAILED] = "Failed", 710 [MF_DELAYED] = "Delayed", 711 [MF_RECOVERED] = "Recovered", 712 }; 713 714 static const char * const action_page_types[] = { 715 [MF_MSG_KERNEL] = "reserved kernel page", 716 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page", 717 [MF_MSG_SLAB] = "kernel slab page", 718 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", 719 [MF_MSG_HUGE] = "huge page", 720 [MF_MSG_FREE_HUGE] = "free huge page", 721 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page", 722 [MF_MSG_UNMAP_FAILED] = "unmapping failed page", 723 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", 724 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", 725 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page", 726 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page", 727 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page", 728 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page", 729 [MF_MSG_DIRTY_LRU] = "dirty LRU page", 730 [MF_MSG_CLEAN_LRU] = "clean LRU page", 731 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page", 732 [MF_MSG_BUDDY] = "free buddy page", 733 [MF_MSG_DAX] = "dax page", 734 [MF_MSG_UNSPLIT_THP] = "unsplit thp", 735 [MF_MSG_DIFFERENT_PAGE_SIZE] = "different page size", 736 [MF_MSG_UNKNOWN] = "unknown page", 737 }; 738 739 /* 740 * XXX: It is possible that a page is isolated from LRU cache, 741 * and then kept in swap cache or failed to remove from page cache. 742 * The page count will stop it from being freed by unpoison. 743 * Stress tests should be aware of this memory leak problem. 744 */ 745 static int delete_from_lru_cache(struct page *p) 746 { 747 if (!isolate_lru_page(p)) { 748 /* 749 * Clear sensible page flags, so that the buddy system won't 750 * complain when the page is unpoison-and-freed. 751 */ 752 ClearPageActive(p); 753 ClearPageUnevictable(p); 754 755 /* 756 * Poisoned page might never drop its ref count to 0 so we have 757 * to uncharge it manually from its memcg. 758 */ 759 mem_cgroup_uncharge(page_folio(p)); 760 761 /* 762 * drop the page count elevated by isolate_lru_page() 763 */ 764 put_page(p); 765 return 0; 766 } 767 return -EIO; 768 } 769 770 static int truncate_error_page(struct page *p, unsigned long pfn, 771 struct address_space *mapping) 772 { 773 int ret = MF_FAILED; 774 775 if (mapping->a_ops->error_remove_page) { 776 int err = mapping->a_ops->error_remove_page(mapping, p); 777 778 if (err != 0) { 779 pr_info("Memory failure: %#lx: Failed to punch page: %d\n", 780 pfn, err); 781 } else if (page_has_private(p) && 782 !try_to_release_page(p, GFP_NOIO)) { 783 pr_info("Memory failure: %#lx: failed to release buffers\n", 784 pfn); 785 } else { 786 ret = MF_RECOVERED; 787 } 788 } else { 789 /* 790 * If the file system doesn't support it just invalidate 791 * This fails on dirty or anything with private pages 792 */ 793 if (invalidate_inode_page(p)) 794 ret = MF_RECOVERED; 795 else 796 pr_info("Memory failure: %#lx: Failed to invalidate\n", 797 pfn); 798 } 799 800 return ret; 801 } 802 803 struct page_state { 804 unsigned long mask; 805 unsigned long res; 806 enum mf_action_page_type type; 807 808 /* Callback ->action() has to unlock the relevant page inside it. */ 809 int (*action)(struct page_state *ps, struct page *p); 810 }; 811 812 /* 813 * Return true if page is still referenced by others, otherwise return 814 * false. 815 * 816 * The extra_pins is true when one extra refcount is expected. 817 */ 818 static bool has_extra_refcount(struct page_state *ps, struct page *p, 819 bool extra_pins) 820 { 821 int count = page_count(p) - 1; 822 823 if (extra_pins) 824 count -= 1; 825 826 if (count > 0) { 827 pr_err("Memory failure: %#lx: %s still referenced by %d users\n", 828 page_to_pfn(p), action_page_types[ps->type], count); 829 return true; 830 } 831 832 return false; 833 } 834 835 /* 836 * Error hit kernel page. 837 * Do nothing, try to be lucky and not touch this instead. For a few cases we 838 * could be more sophisticated. 839 */ 840 static int me_kernel(struct page_state *ps, struct page *p) 841 { 842 unlock_page(p); 843 return MF_IGNORED; 844 } 845 846 /* 847 * Page in unknown state. Do nothing. 848 */ 849 static int me_unknown(struct page_state *ps, struct page *p) 850 { 851 pr_err("Memory failure: %#lx: Unknown page state\n", page_to_pfn(p)); 852 unlock_page(p); 853 return MF_FAILED; 854 } 855 856 /* 857 * Clean (or cleaned) page cache page. 858 */ 859 static int me_pagecache_clean(struct page_state *ps, struct page *p) 860 { 861 int ret; 862 struct address_space *mapping; 863 bool extra_pins; 864 865 delete_from_lru_cache(p); 866 867 /* 868 * For anonymous pages we're done the only reference left 869 * should be the one m_f() holds. 870 */ 871 if (PageAnon(p)) { 872 ret = MF_RECOVERED; 873 goto out; 874 } 875 876 /* 877 * Now truncate the page in the page cache. This is really 878 * more like a "temporary hole punch" 879 * Don't do this for block devices when someone else 880 * has a reference, because it could be file system metadata 881 * and that's not safe to truncate. 882 */ 883 mapping = page_mapping(p); 884 if (!mapping) { 885 /* 886 * Page has been teared down in the meanwhile 887 */ 888 ret = MF_FAILED; 889 goto out; 890 } 891 892 /* 893 * The shmem page is kept in page cache instead of truncating 894 * so is expected to have an extra refcount after error-handling. 895 */ 896 extra_pins = shmem_mapping(mapping); 897 898 /* 899 * Truncation is a bit tricky. Enable it per file system for now. 900 * 901 * Open: to take i_rwsem or not for this? Right now we don't. 902 */ 903 ret = truncate_error_page(p, page_to_pfn(p), mapping); 904 if (has_extra_refcount(ps, p, extra_pins)) 905 ret = MF_FAILED; 906 907 out: 908 unlock_page(p); 909 910 return ret; 911 } 912 913 /* 914 * Dirty pagecache page 915 * Issues: when the error hit a hole page the error is not properly 916 * propagated. 917 */ 918 static int me_pagecache_dirty(struct page_state *ps, struct page *p) 919 { 920 struct address_space *mapping = page_mapping(p); 921 922 SetPageError(p); 923 /* TBD: print more information about the file. */ 924 if (mapping) { 925 /* 926 * IO error will be reported by write(), fsync(), etc. 927 * who check the mapping. 928 * This way the application knows that something went 929 * wrong with its dirty file data. 930 * 931 * There's one open issue: 932 * 933 * The EIO will be only reported on the next IO 934 * operation and then cleared through the IO map. 935 * Normally Linux has two mechanisms to pass IO error 936 * first through the AS_EIO flag in the address space 937 * and then through the PageError flag in the page. 938 * Since we drop pages on memory failure handling the 939 * only mechanism open to use is through AS_AIO. 940 * 941 * This has the disadvantage that it gets cleared on 942 * the first operation that returns an error, while 943 * the PageError bit is more sticky and only cleared 944 * when the page is reread or dropped. If an 945 * application assumes it will always get error on 946 * fsync, but does other operations on the fd before 947 * and the page is dropped between then the error 948 * will not be properly reported. 949 * 950 * This can already happen even without hwpoisoned 951 * pages: first on metadata IO errors (which only 952 * report through AS_EIO) or when the page is dropped 953 * at the wrong time. 954 * 955 * So right now we assume that the application DTRT on 956 * the first EIO, but we're not worse than other parts 957 * of the kernel. 958 */ 959 mapping_set_error(mapping, -EIO); 960 } 961 962 return me_pagecache_clean(ps, p); 963 } 964 965 /* 966 * Clean and dirty swap cache. 967 * 968 * Dirty swap cache page is tricky to handle. The page could live both in page 969 * cache and swap cache(ie. page is freshly swapped in). So it could be 970 * referenced concurrently by 2 types of PTEs: 971 * normal PTEs and swap PTEs. We try to handle them consistently by calling 972 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 973 * and then 974 * - clear dirty bit to prevent IO 975 * - remove from LRU 976 * - but keep in the swap cache, so that when we return to it on 977 * a later page fault, we know the application is accessing 978 * corrupted data and shall be killed (we installed simple 979 * interception code in do_swap_page to catch it). 980 * 981 * Clean swap cache pages can be directly isolated. A later page fault will 982 * bring in the known good data from disk. 983 */ 984 static int me_swapcache_dirty(struct page_state *ps, struct page *p) 985 { 986 int ret; 987 bool extra_pins = false; 988 989 ClearPageDirty(p); 990 /* Trigger EIO in shmem: */ 991 ClearPageUptodate(p); 992 993 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; 994 unlock_page(p); 995 996 if (ret == MF_DELAYED) 997 extra_pins = true; 998 999 if (has_extra_refcount(ps, p, extra_pins)) 1000 ret = MF_FAILED; 1001 1002 return ret; 1003 } 1004 1005 static int me_swapcache_clean(struct page_state *ps, struct page *p) 1006 { 1007 int ret; 1008 1009 delete_from_swap_cache(p); 1010 1011 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; 1012 unlock_page(p); 1013 1014 if (has_extra_refcount(ps, p, false)) 1015 ret = MF_FAILED; 1016 1017 return ret; 1018 } 1019 1020 /* 1021 * Huge pages. Needs work. 1022 * Issues: 1023 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 1024 * To narrow down kill region to one page, we need to break up pmd. 1025 */ 1026 static int me_huge_page(struct page_state *ps, struct page *p) 1027 { 1028 int res; 1029 struct page *hpage = compound_head(p); 1030 struct address_space *mapping; 1031 1032 if (!PageHuge(hpage)) 1033 return MF_DELAYED; 1034 1035 mapping = page_mapping(hpage); 1036 if (mapping) { 1037 res = truncate_error_page(hpage, page_to_pfn(p), mapping); 1038 unlock_page(hpage); 1039 } else { 1040 res = MF_FAILED; 1041 unlock_page(hpage); 1042 /* 1043 * migration entry prevents later access on error anonymous 1044 * hugepage, so we can free and dissolve it into buddy to 1045 * save healthy subpages. 1046 */ 1047 if (PageAnon(hpage)) 1048 put_page(hpage); 1049 if (__page_handle_poison(p)) { 1050 page_ref_inc(p); 1051 res = MF_RECOVERED; 1052 } 1053 } 1054 1055 if (has_extra_refcount(ps, p, false)) 1056 res = MF_FAILED; 1057 1058 return res; 1059 } 1060 1061 /* 1062 * Various page states we can handle. 1063 * 1064 * A page state is defined by its current page->flags bits. 1065 * The table matches them in order and calls the right handler. 1066 * 1067 * This is quite tricky because we can access page at any time 1068 * in its live cycle, so all accesses have to be extremely careful. 1069 * 1070 * This is not complete. More states could be added. 1071 * For any missing state don't attempt recovery. 1072 */ 1073 1074 #define dirty (1UL << PG_dirty) 1075 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked)) 1076 #define unevict (1UL << PG_unevictable) 1077 #define mlock (1UL << PG_mlocked) 1078 #define lru (1UL << PG_lru) 1079 #define head (1UL << PG_head) 1080 #define slab (1UL << PG_slab) 1081 #define reserved (1UL << PG_reserved) 1082 1083 static struct page_state error_states[] = { 1084 { reserved, reserved, MF_MSG_KERNEL, me_kernel }, 1085 /* 1086 * free pages are specially detected outside this table: 1087 * PG_buddy pages only make a small fraction of all free pages. 1088 */ 1089 1090 /* 1091 * Could in theory check if slab page is free or if we can drop 1092 * currently unused objects without touching them. But just 1093 * treat it as standard kernel for now. 1094 */ 1095 { slab, slab, MF_MSG_SLAB, me_kernel }, 1096 1097 { head, head, MF_MSG_HUGE, me_huge_page }, 1098 1099 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, 1100 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, 1101 1102 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty }, 1103 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean }, 1104 1105 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty }, 1106 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean }, 1107 1108 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 1109 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 1110 1111 /* 1112 * Catchall entry: must be at end. 1113 */ 1114 { 0, 0, MF_MSG_UNKNOWN, me_unknown }, 1115 }; 1116 1117 #undef dirty 1118 #undef sc 1119 #undef unevict 1120 #undef mlock 1121 #undef lru 1122 #undef head 1123 #undef slab 1124 #undef reserved 1125 1126 /* 1127 * "Dirty/Clean" indication is not 100% accurate due to the possibility of 1128 * setting PG_dirty outside page lock. See also comment above set_page_dirty(). 1129 */ 1130 static void action_result(unsigned long pfn, enum mf_action_page_type type, 1131 enum mf_result result) 1132 { 1133 trace_memory_failure_event(pfn, type, result); 1134 1135 pr_err("Memory failure: %#lx: recovery action for %s: %s\n", 1136 pfn, action_page_types[type], action_name[result]); 1137 } 1138 1139 static int page_action(struct page_state *ps, struct page *p, 1140 unsigned long pfn) 1141 { 1142 int result; 1143 1144 /* page p should be unlocked after returning from ps->action(). */ 1145 result = ps->action(ps, p); 1146 1147 action_result(pfn, ps->type, result); 1148 1149 /* Could do more checks here if page looks ok */ 1150 /* 1151 * Could adjust zone counters here to correct for the missing page. 1152 */ 1153 1154 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY; 1155 } 1156 1157 static inline bool PageHWPoisonTakenOff(struct page *page) 1158 { 1159 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON; 1160 } 1161 1162 void SetPageHWPoisonTakenOff(struct page *page) 1163 { 1164 set_page_private(page, MAGIC_HWPOISON); 1165 } 1166 1167 void ClearPageHWPoisonTakenOff(struct page *page) 1168 { 1169 if (PageHWPoison(page)) 1170 set_page_private(page, 0); 1171 } 1172 1173 /* 1174 * Return true if a page type of a given page is supported by hwpoison 1175 * mechanism (while handling could fail), otherwise false. This function 1176 * does not return true for hugetlb or device memory pages, so it's assumed 1177 * to be called only in the context where we never have such pages. 1178 */ 1179 static inline bool HWPoisonHandlable(struct page *page) 1180 { 1181 return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page); 1182 } 1183 1184 static int __get_hwpoison_page(struct page *page) 1185 { 1186 struct page *head = compound_head(page); 1187 int ret = 0; 1188 bool hugetlb = false; 1189 1190 ret = get_hwpoison_huge_page(head, &hugetlb); 1191 if (hugetlb) 1192 return ret; 1193 1194 /* 1195 * This check prevents from calling get_hwpoison_unless_zero() 1196 * for any unsupported type of page in order to reduce the risk of 1197 * unexpected races caused by taking a page refcount. 1198 */ 1199 if (!HWPoisonHandlable(head)) 1200 return -EBUSY; 1201 1202 if (get_page_unless_zero(head)) { 1203 if (head == compound_head(page)) 1204 return 1; 1205 1206 pr_info("Memory failure: %#lx cannot catch tail\n", 1207 page_to_pfn(page)); 1208 put_page(head); 1209 } 1210 1211 return 0; 1212 } 1213 1214 static int get_any_page(struct page *p, unsigned long flags) 1215 { 1216 int ret = 0, pass = 0; 1217 bool count_increased = false; 1218 1219 if (flags & MF_COUNT_INCREASED) 1220 count_increased = true; 1221 1222 try_again: 1223 if (!count_increased) { 1224 ret = __get_hwpoison_page(p); 1225 if (!ret) { 1226 if (page_count(p)) { 1227 /* We raced with an allocation, retry. */ 1228 if (pass++ < 3) 1229 goto try_again; 1230 ret = -EBUSY; 1231 } else if (!PageHuge(p) && !is_free_buddy_page(p)) { 1232 /* We raced with put_page, retry. */ 1233 if (pass++ < 3) 1234 goto try_again; 1235 ret = -EIO; 1236 } 1237 goto out; 1238 } else if (ret == -EBUSY) { 1239 /* 1240 * We raced with (possibly temporary) unhandlable 1241 * page, retry. 1242 */ 1243 if (pass++ < 3) { 1244 shake_page(p); 1245 goto try_again; 1246 } 1247 ret = -EIO; 1248 goto out; 1249 } 1250 } 1251 1252 if (PageHuge(p) || HWPoisonHandlable(p)) { 1253 ret = 1; 1254 } else { 1255 /* 1256 * A page we cannot handle. Check whether we can turn 1257 * it into something we can handle. 1258 */ 1259 if (pass++ < 3) { 1260 put_page(p); 1261 shake_page(p); 1262 count_increased = false; 1263 goto try_again; 1264 } 1265 put_page(p); 1266 ret = -EIO; 1267 } 1268 out: 1269 if (ret == -EIO) 1270 dump_page(p, "hwpoison: unhandlable page"); 1271 1272 return ret; 1273 } 1274 1275 static int __get_unpoison_page(struct page *page) 1276 { 1277 struct page *head = compound_head(page); 1278 int ret = 0; 1279 bool hugetlb = false; 1280 1281 ret = get_hwpoison_huge_page(head, &hugetlb); 1282 if (hugetlb) 1283 return ret; 1284 1285 /* 1286 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison, 1287 * but also isolated from buddy freelist, so need to identify the 1288 * state and have to cancel both operations to unpoison. 1289 */ 1290 if (PageHWPoisonTakenOff(page)) 1291 return -EHWPOISON; 1292 1293 return get_page_unless_zero(page) ? 1 : 0; 1294 } 1295 1296 /** 1297 * get_hwpoison_page() - Get refcount for memory error handling 1298 * @p: Raw error page (hit by memory error) 1299 * @flags: Flags controlling behavior of error handling 1300 * 1301 * get_hwpoison_page() takes a page refcount of an error page to handle memory 1302 * error on it, after checking that the error page is in a well-defined state 1303 * (defined as a page-type we can successfully handle the memory error on it, 1304 * such as LRU page and hugetlb page). 1305 * 1306 * Memory error handling could be triggered at any time on any type of page, 1307 * so it's prone to race with typical memory management lifecycle (like 1308 * allocation and free). So to avoid such races, get_hwpoison_page() takes 1309 * extra care for the error page's state (as done in __get_hwpoison_page()), 1310 * and has some retry logic in get_any_page(). 1311 * 1312 * When called from unpoison_memory(), the caller should already ensure that 1313 * the given page has PG_hwpoison. So it's never reused for other page 1314 * allocations, and __get_unpoison_page() never races with them. 1315 * 1316 * Return: 0 on failure, 1317 * 1 on success for in-use pages in a well-defined state, 1318 * -EIO for pages on which we can not handle memory errors, 1319 * -EBUSY when get_hwpoison_page() has raced with page lifecycle 1320 * operations like allocation and free, 1321 * -EHWPOISON when the page is hwpoisoned and taken off from buddy. 1322 */ 1323 static int get_hwpoison_page(struct page *p, unsigned long flags) 1324 { 1325 int ret; 1326 1327 zone_pcp_disable(page_zone(p)); 1328 if (flags & MF_UNPOISON) 1329 ret = __get_unpoison_page(p); 1330 else 1331 ret = get_any_page(p, flags); 1332 zone_pcp_enable(page_zone(p)); 1333 1334 return ret; 1335 } 1336 1337 /* 1338 * Do all that is necessary to remove user space mappings. Unmap 1339 * the pages and send SIGBUS to the processes if the data was dirty. 1340 */ 1341 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, 1342 int flags, struct page *hpage) 1343 { 1344 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC; 1345 struct address_space *mapping; 1346 LIST_HEAD(tokill); 1347 bool unmap_success; 1348 int kill = 1, forcekill; 1349 bool mlocked = PageMlocked(hpage); 1350 1351 /* 1352 * Here we are interested only in user-mapped pages, so skip any 1353 * other types of pages. 1354 */ 1355 if (PageReserved(p) || PageSlab(p)) 1356 return true; 1357 if (!(PageLRU(hpage) || PageHuge(p))) 1358 return true; 1359 1360 /* 1361 * This check implies we don't kill processes if their pages 1362 * are in the swap cache early. Those are always late kills. 1363 */ 1364 if (!page_mapped(hpage)) 1365 return true; 1366 1367 if (PageKsm(p)) { 1368 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn); 1369 return false; 1370 } 1371 1372 if (PageSwapCache(p)) { 1373 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n", 1374 pfn); 1375 ttu |= TTU_IGNORE_HWPOISON; 1376 } 1377 1378 /* 1379 * Propagate the dirty bit from PTEs to struct page first, because we 1380 * need this to decide if we should kill or just drop the page. 1381 * XXX: the dirty test could be racy: set_page_dirty() may not always 1382 * be called inside page lock (it's recommended but not enforced). 1383 */ 1384 mapping = page_mapping(hpage); 1385 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && 1386 mapping_can_writeback(mapping)) { 1387 if (page_mkclean(hpage)) { 1388 SetPageDirty(hpage); 1389 } else { 1390 kill = 0; 1391 ttu |= TTU_IGNORE_HWPOISON; 1392 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n", 1393 pfn); 1394 } 1395 } 1396 1397 /* 1398 * First collect all the processes that have the page 1399 * mapped in dirty form. This has to be done before try_to_unmap, 1400 * because ttu takes the rmap data structures down. 1401 * 1402 * Error handling: We ignore errors here because 1403 * there's nothing that can be done. 1404 */ 1405 if (kill) 1406 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1407 1408 if (PageHuge(hpage) && !PageAnon(hpage)) { 1409 /* 1410 * For hugetlb pages in shared mappings, try_to_unmap 1411 * could potentially call huge_pmd_unshare. Because of 1412 * this, take semaphore in write mode here and set 1413 * TTU_RMAP_LOCKED to indicate we have taken the lock 1414 * at this higher level. 1415 */ 1416 mapping = hugetlb_page_mapping_lock_write(hpage); 1417 if (mapping) { 1418 try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); 1419 i_mmap_unlock_write(mapping); 1420 } else 1421 pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); 1422 } else { 1423 try_to_unmap(hpage, ttu); 1424 } 1425 1426 unmap_success = !page_mapped(hpage); 1427 if (!unmap_success) 1428 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", 1429 pfn, page_mapcount(hpage)); 1430 1431 /* 1432 * try_to_unmap() might put mlocked page in lru cache, so call 1433 * shake_page() again to ensure that it's flushed. 1434 */ 1435 if (mlocked) 1436 shake_page(hpage); 1437 1438 /* 1439 * Now that the dirty bit has been propagated to the 1440 * struct page and all unmaps done we can decide if 1441 * killing is needed or not. Only kill when the page 1442 * was dirty or the process is not restartable, 1443 * otherwise the tokill list is merely 1444 * freed. When there was a problem unmapping earlier 1445 * use a more force-full uncatchable kill to prevent 1446 * any accesses to the poisoned memory. 1447 */ 1448 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL); 1449 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); 1450 1451 return unmap_success; 1452 } 1453 1454 static int identify_page_state(unsigned long pfn, struct page *p, 1455 unsigned long page_flags) 1456 { 1457 struct page_state *ps; 1458 1459 /* 1460 * The first check uses the current page flags which may not have any 1461 * relevant information. The second check with the saved page flags is 1462 * carried out only if the first check can't determine the page status. 1463 */ 1464 for (ps = error_states;; ps++) 1465 if ((p->flags & ps->mask) == ps->res) 1466 break; 1467 1468 page_flags |= (p->flags & (1UL << PG_dirty)); 1469 1470 if (!ps->mask) 1471 for (ps = error_states;; ps++) 1472 if ((page_flags & ps->mask) == ps->res) 1473 break; 1474 return page_action(ps, p, pfn); 1475 } 1476 1477 static int try_to_split_thp_page(struct page *page, const char *msg) 1478 { 1479 lock_page(page); 1480 if (unlikely(split_huge_page(page))) { 1481 unsigned long pfn = page_to_pfn(page); 1482 1483 unlock_page(page); 1484 pr_info("%s: %#lx: thp split failed\n", msg, pfn); 1485 put_page(page); 1486 return -EBUSY; 1487 } 1488 unlock_page(page); 1489 1490 return 0; 1491 } 1492 1493 static int memory_failure_hugetlb(unsigned long pfn, int flags) 1494 { 1495 struct page *p = pfn_to_page(pfn); 1496 struct page *head = compound_head(p); 1497 int res; 1498 unsigned long page_flags; 1499 1500 if (TestSetPageHWPoison(head)) { 1501 pr_err("Memory failure: %#lx: already hardware poisoned\n", 1502 pfn); 1503 res = -EHWPOISON; 1504 if (flags & MF_ACTION_REQUIRED) 1505 res = kill_accessing_process(current, page_to_pfn(head), flags); 1506 return res; 1507 } 1508 1509 num_poisoned_pages_inc(); 1510 1511 if (!(flags & MF_COUNT_INCREASED)) { 1512 res = get_hwpoison_page(p, flags); 1513 if (!res) { 1514 lock_page(head); 1515 if (hwpoison_filter(p)) { 1516 if (TestClearPageHWPoison(head)) 1517 num_poisoned_pages_dec(); 1518 unlock_page(head); 1519 return -EOPNOTSUPP; 1520 } 1521 unlock_page(head); 1522 res = MF_FAILED; 1523 if (__page_handle_poison(p)) { 1524 page_ref_inc(p); 1525 res = MF_RECOVERED; 1526 } 1527 action_result(pfn, MF_MSG_FREE_HUGE, res); 1528 return res == MF_RECOVERED ? 0 : -EBUSY; 1529 } else if (res < 0) { 1530 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 1531 return -EBUSY; 1532 } 1533 } 1534 1535 lock_page(head); 1536 1537 /* 1538 * The page could have changed compound pages due to race window. 1539 * If this happens just bail out. 1540 */ 1541 if (!PageHuge(p) || compound_head(p) != head) { 1542 action_result(pfn, MF_MSG_DIFFERENT_PAGE_SIZE, MF_IGNORED); 1543 res = -EBUSY; 1544 goto out; 1545 } 1546 1547 page_flags = head->flags; 1548 1549 if (hwpoison_filter(p)) { 1550 if (TestClearPageHWPoison(head)) 1551 num_poisoned_pages_dec(); 1552 put_page(p); 1553 res = -EOPNOTSUPP; 1554 goto out; 1555 } 1556 1557 /* 1558 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so 1559 * simply disable it. In order to make it work properly, we need 1560 * make sure that: 1561 * - conversion of a pud that maps an error hugetlb into hwpoison 1562 * entry properly works, and 1563 * - other mm code walking over page table is aware of pud-aligned 1564 * hwpoison entries. 1565 */ 1566 if (huge_page_size(page_hstate(head)) > PMD_SIZE) { 1567 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED); 1568 res = -EBUSY; 1569 goto out; 1570 } 1571 1572 if (!hwpoison_user_mappings(p, pfn, flags, head)) { 1573 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 1574 res = -EBUSY; 1575 goto out; 1576 } 1577 1578 return identify_page_state(pfn, p, page_flags); 1579 out: 1580 unlock_page(head); 1581 return res; 1582 } 1583 1584 static int memory_failure_dev_pagemap(unsigned long pfn, int flags, 1585 struct dev_pagemap *pgmap) 1586 { 1587 struct page *page = pfn_to_page(pfn); 1588 unsigned long size = 0; 1589 struct to_kill *tk; 1590 LIST_HEAD(tokill); 1591 int rc = -EBUSY; 1592 loff_t start; 1593 dax_entry_t cookie; 1594 1595 if (flags & MF_COUNT_INCREASED) 1596 /* 1597 * Drop the extra refcount in case we come from madvise(). 1598 */ 1599 put_page(page); 1600 1601 /* device metadata space is not recoverable */ 1602 if (!pgmap_pfn_valid(pgmap, pfn)) { 1603 rc = -ENXIO; 1604 goto out; 1605 } 1606 1607 /* 1608 * Pages instantiated by device-dax (not filesystem-dax) 1609 * may be compound pages. 1610 */ 1611 page = compound_head(page); 1612 1613 /* 1614 * Prevent the inode from being freed while we are interrogating 1615 * the address_space, typically this would be handled by 1616 * lock_page(), but dax pages do not use the page lock. This 1617 * also prevents changes to the mapping of this pfn until 1618 * poison signaling is complete. 1619 */ 1620 cookie = dax_lock_page(page); 1621 if (!cookie) 1622 goto out; 1623 1624 if (hwpoison_filter(page)) { 1625 rc = -EOPNOTSUPP; 1626 goto unlock; 1627 } 1628 1629 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 1630 /* 1631 * TODO: Handle HMM pages which may need coordination 1632 * with device-side memory. 1633 */ 1634 goto unlock; 1635 } 1636 1637 /* 1638 * Use this flag as an indication that the dax page has been 1639 * remapped UC to prevent speculative consumption of poison. 1640 */ 1641 SetPageHWPoison(page); 1642 1643 /* 1644 * Unlike System-RAM there is no possibility to swap in a 1645 * different physical page at a given virtual address, so all 1646 * userspace consumption of ZONE_DEVICE memory necessitates 1647 * SIGBUS (i.e. MF_MUST_KILL) 1648 */ 1649 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1650 collect_procs(page, &tokill, true); 1651 1652 list_for_each_entry(tk, &tokill, nd) 1653 if (tk->size_shift) 1654 size = max(size, 1UL << tk->size_shift); 1655 if (size) { 1656 /* 1657 * Unmap the largest mapping to avoid breaking up 1658 * device-dax mappings which are constant size. The 1659 * actual size of the mapping being torn down is 1660 * communicated in siginfo, see kill_proc() 1661 */ 1662 start = (page->index << PAGE_SHIFT) & ~(size - 1); 1663 unmap_mapping_range(page->mapping, start, size, 0); 1664 } 1665 kill_procs(&tokill, true, false, pfn, flags); 1666 rc = 0; 1667 unlock: 1668 dax_unlock_page(page, cookie); 1669 out: 1670 /* drop pgmap ref acquired in caller */ 1671 put_dev_pagemap(pgmap); 1672 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); 1673 return rc; 1674 } 1675 1676 static DEFINE_MUTEX(mf_mutex); 1677 1678 /** 1679 * memory_failure - Handle memory failure of a page. 1680 * @pfn: Page Number of the corrupted page 1681 * @flags: fine tune action taken 1682 * 1683 * This function is called by the low level machine check code 1684 * of an architecture when it detects hardware memory corruption 1685 * of a page. It tries its best to recover, which includes 1686 * dropping pages, killing processes etc. 1687 * 1688 * The function is primarily of use for corruptions that 1689 * happen outside the current execution context (e.g. when 1690 * detected by a background scrubber) 1691 * 1692 * Must run in process context (e.g. a work queue) with interrupts 1693 * enabled and no spinlocks hold. 1694 * 1695 * Return: 0 for successfully handled the memory error, 1696 * -EOPNOTSUPP for memory_filter() filtered the error event, 1697 * < 0(except -EOPNOTSUPP) on failure. 1698 */ 1699 int memory_failure(unsigned long pfn, int flags) 1700 { 1701 struct page *p; 1702 struct page *hpage; 1703 struct dev_pagemap *pgmap; 1704 int res = 0; 1705 unsigned long page_flags; 1706 bool retry = true; 1707 1708 if (!sysctl_memory_failure_recovery) 1709 panic("Memory failure on page %lx", pfn); 1710 1711 mutex_lock(&mf_mutex); 1712 1713 p = pfn_to_online_page(pfn); 1714 if (!p) { 1715 res = arch_memory_failure(pfn, flags); 1716 if (res == 0) 1717 goto unlock_mutex; 1718 1719 if (pfn_valid(pfn)) { 1720 pgmap = get_dev_pagemap(pfn, NULL); 1721 if (pgmap) { 1722 res = memory_failure_dev_pagemap(pfn, flags, 1723 pgmap); 1724 goto unlock_mutex; 1725 } 1726 } 1727 pr_err("Memory failure: %#lx: memory outside kernel control\n", 1728 pfn); 1729 res = -ENXIO; 1730 goto unlock_mutex; 1731 } 1732 1733 try_again: 1734 if (PageHuge(p)) { 1735 res = memory_failure_hugetlb(pfn, flags); 1736 goto unlock_mutex; 1737 } 1738 1739 if (TestSetPageHWPoison(p)) { 1740 pr_err("Memory failure: %#lx: already hardware poisoned\n", 1741 pfn); 1742 res = -EHWPOISON; 1743 if (flags & MF_ACTION_REQUIRED) 1744 res = kill_accessing_process(current, pfn, flags); 1745 goto unlock_mutex; 1746 } 1747 1748 hpage = compound_head(p); 1749 num_poisoned_pages_inc(); 1750 1751 /* 1752 * We need/can do nothing about count=0 pages. 1753 * 1) it's a free page, and therefore in safe hand: 1754 * prep_new_page() will be the gate keeper. 1755 * 2) it's part of a non-compound high order page. 1756 * Implies some kernel user: cannot stop them from 1757 * R/W the page; let's pray that the page has been 1758 * used and will be freed some time later. 1759 * In fact it's dangerous to directly bump up page count from 0, 1760 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. 1761 */ 1762 if (!(flags & MF_COUNT_INCREASED)) { 1763 res = get_hwpoison_page(p, flags); 1764 if (!res) { 1765 if (is_free_buddy_page(p)) { 1766 if (take_page_off_buddy(p)) { 1767 page_ref_inc(p); 1768 res = MF_RECOVERED; 1769 } else { 1770 /* We lost the race, try again */ 1771 if (retry) { 1772 ClearPageHWPoison(p); 1773 num_poisoned_pages_dec(); 1774 retry = false; 1775 goto try_again; 1776 } 1777 res = MF_FAILED; 1778 } 1779 action_result(pfn, MF_MSG_BUDDY, res); 1780 res = res == MF_RECOVERED ? 0 : -EBUSY; 1781 } else { 1782 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); 1783 res = -EBUSY; 1784 } 1785 goto unlock_mutex; 1786 } else if (res < 0) { 1787 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); 1788 res = -EBUSY; 1789 goto unlock_mutex; 1790 } 1791 } 1792 1793 if (PageTransHuge(hpage)) { 1794 /* 1795 * The flag must be set after the refcount is bumped 1796 * otherwise it may race with THP split. 1797 * And the flag can't be set in get_hwpoison_page() since 1798 * it is called by soft offline too and it is just called 1799 * for !MF_COUNT_INCREASE. So here seems to be the best 1800 * place. 1801 * 1802 * Don't need care about the above error handling paths for 1803 * get_hwpoison_page() since they handle either free page 1804 * or unhandlable page. The refcount is bumped iff the 1805 * page is a valid handlable page. 1806 */ 1807 SetPageHasHWPoisoned(hpage); 1808 if (try_to_split_thp_page(p, "Memory Failure") < 0) { 1809 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); 1810 res = -EBUSY; 1811 goto unlock_mutex; 1812 } 1813 VM_BUG_ON_PAGE(!page_count(p), p); 1814 } 1815 1816 /* 1817 * We ignore non-LRU pages for good reasons. 1818 * - PG_locked is only well defined for LRU pages and a few others 1819 * - to avoid races with __SetPageLocked() 1820 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 1821 * The check (unnecessarily) ignores LRU pages being isolated and 1822 * walked by the page reclaim code, however that's not a big loss. 1823 */ 1824 shake_page(p); 1825 1826 lock_page(p); 1827 1828 /* 1829 * We're only intended to deal with the non-Compound page here. 1830 * However, the page could have changed compound pages due to 1831 * race window. If this happens, we could try again to hopefully 1832 * handle the page next round. 1833 */ 1834 if (PageCompound(p)) { 1835 if (retry) { 1836 if (TestClearPageHWPoison(p)) 1837 num_poisoned_pages_dec(); 1838 unlock_page(p); 1839 put_page(p); 1840 flags &= ~MF_COUNT_INCREASED; 1841 retry = false; 1842 goto try_again; 1843 } 1844 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); 1845 res = -EBUSY; 1846 goto unlock_page; 1847 } 1848 1849 /* 1850 * We use page flags to determine what action should be taken, but 1851 * the flags can be modified by the error containment action. One 1852 * example is an mlocked page, where PG_mlocked is cleared by 1853 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 1854 * correctly, we save a copy of the page flags at this time. 1855 */ 1856 page_flags = p->flags; 1857 1858 if (hwpoison_filter(p)) { 1859 if (TestClearPageHWPoison(p)) 1860 num_poisoned_pages_dec(); 1861 unlock_page(p); 1862 put_page(p); 1863 res = -EOPNOTSUPP; 1864 goto unlock_mutex; 1865 } 1866 1867 /* 1868 * __munlock_pagevec may clear a writeback page's LRU flag without 1869 * page_lock. We need wait writeback completion for this page or it 1870 * may trigger vfs BUG while evict inode. 1871 */ 1872 if (!PageLRU(p) && !PageWriteback(p)) 1873 goto identify_page_state; 1874 1875 /* 1876 * It's very difficult to mess with pages currently under IO 1877 * and in many cases impossible, so we just avoid it here. 1878 */ 1879 wait_on_page_writeback(p); 1880 1881 /* 1882 * Now take care of user space mappings. 1883 * Abort on fail: __delete_from_page_cache() assumes unmapped page. 1884 */ 1885 if (!hwpoison_user_mappings(p, pfn, flags, p)) { 1886 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); 1887 res = -EBUSY; 1888 goto unlock_page; 1889 } 1890 1891 /* 1892 * Torn down by someone else? 1893 */ 1894 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 1895 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); 1896 res = -EBUSY; 1897 goto unlock_page; 1898 } 1899 1900 identify_page_state: 1901 res = identify_page_state(pfn, p, page_flags); 1902 mutex_unlock(&mf_mutex); 1903 return res; 1904 unlock_page: 1905 unlock_page(p); 1906 unlock_mutex: 1907 mutex_unlock(&mf_mutex); 1908 return res; 1909 } 1910 EXPORT_SYMBOL_GPL(memory_failure); 1911 1912 #define MEMORY_FAILURE_FIFO_ORDER 4 1913 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) 1914 1915 struct memory_failure_entry { 1916 unsigned long pfn; 1917 int flags; 1918 }; 1919 1920 struct memory_failure_cpu { 1921 DECLARE_KFIFO(fifo, struct memory_failure_entry, 1922 MEMORY_FAILURE_FIFO_SIZE); 1923 spinlock_t lock; 1924 struct work_struct work; 1925 }; 1926 1927 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); 1928 1929 /** 1930 * memory_failure_queue - Schedule handling memory failure of a page. 1931 * @pfn: Page Number of the corrupted page 1932 * @flags: Flags for memory failure handling 1933 * 1934 * This function is called by the low level hardware error handler 1935 * when it detects hardware memory corruption of a page. It schedules 1936 * the recovering of error page, including dropping pages, killing 1937 * processes etc. 1938 * 1939 * The function is primarily of use for corruptions that 1940 * happen outside the current execution context (e.g. when 1941 * detected by a background scrubber) 1942 * 1943 * Can run in IRQ context. 1944 */ 1945 void memory_failure_queue(unsigned long pfn, int flags) 1946 { 1947 struct memory_failure_cpu *mf_cpu; 1948 unsigned long proc_flags; 1949 struct memory_failure_entry entry = { 1950 .pfn = pfn, 1951 .flags = flags, 1952 }; 1953 1954 mf_cpu = &get_cpu_var(memory_failure_cpu); 1955 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 1956 if (kfifo_put(&mf_cpu->fifo, entry)) 1957 schedule_work_on(smp_processor_id(), &mf_cpu->work); 1958 else 1959 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n", 1960 pfn); 1961 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 1962 put_cpu_var(memory_failure_cpu); 1963 } 1964 EXPORT_SYMBOL_GPL(memory_failure_queue); 1965 1966 static void memory_failure_work_func(struct work_struct *work) 1967 { 1968 struct memory_failure_cpu *mf_cpu; 1969 struct memory_failure_entry entry = { 0, }; 1970 unsigned long proc_flags; 1971 int gotten; 1972 1973 mf_cpu = container_of(work, struct memory_failure_cpu, work); 1974 for (;;) { 1975 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 1976 gotten = kfifo_get(&mf_cpu->fifo, &entry); 1977 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 1978 if (!gotten) 1979 break; 1980 if (entry.flags & MF_SOFT_OFFLINE) 1981 soft_offline_page(entry.pfn, entry.flags); 1982 else 1983 memory_failure(entry.pfn, entry.flags); 1984 } 1985 } 1986 1987 /* 1988 * Process memory_failure work queued on the specified CPU. 1989 * Used to avoid return-to-userspace racing with the memory_failure workqueue. 1990 */ 1991 void memory_failure_queue_kick(int cpu) 1992 { 1993 struct memory_failure_cpu *mf_cpu; 1994 1995 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 1996 cancel_work_sync(&mf_cpu->work); 1997 memory_failure_work_func(&mf_cpu->work); 1998 } 1999 2000 static int __init memory_failure_init(void) 2001 { 2002 struct memory_failure_cpu *mf_cpu; 2003 int cpu; 2004 2005 for_each_possible_cpu(cpu) { 2006 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 2007 spin_lock_init(&mf_cpu->lock); 2008 INIT_KFIFO(mf_cpu->fifo); 2009 INIT_WORK(&mf_cpu->work, memory_failure_work_func); 2010 } 2011 2012 return 0; 2013 } 2014 core_initcall(memory_failure_init); 2015 2016 #define unpoison_pr_info(fmt, pfn, rs) \ 2017 ({ \ 2018 if (__ratelimit(rs)) \ 2019 pr_info(fmt, pfn); \ 2020 }) 2021 2022 static inline int clear_page_hwpoison(struct ratelimit_state *rs, struct page *p) 2023 { 2024 if (TestClearPageHWPoison(p)) { 2025 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", 2026 page_to_pfn(p), rs); 2027 num_poisoned_pages_dec(); 2028 return 1; 2029 } 2030 return 0; 2031 } 2032 2033 static inline int unpoison_taken_off_page(struct ratelimit_state *rs, 2034 struct page *p) 2035 { 2036 if (put_page_back_buddy(p)) { 2037 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", 2038 page_to_pfn(p), rs); 2039 return 0; 2040 } 2041 return -EBUSY; 2042 } 2043 2044 /** 2045 * unpoison_memory - Unpoison a previously poisoned page 2046 * @pfn: Page number of the to be unpoisoned page 2047 * 2048 * Software-unpoison a page that has been poisoned by 2049 * memory_failure() earlier. 2050 * 2051 * This is only done on the software-level, so it only works 2052 * for linux injected failures, not real hardware failures 2053 * 2054 * Returns 0 for success, otherwise -errno. 2055 */ 2056 int unpoison_memory(unsigned long pfn) 2057 { 2058 struct page *page; 2059 struct page *p; 2060 int ret = -EBUSY; 2061 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, 2062 DEFAULT_RATELIMIT_BURST); 2063 2064 if (!pfn_valid(pfn)) 2065 return -ENXIO; 2066 2067 p = pfn_to_page(pfn); 2068 page = compound_head(p); 2069 2070 mutex_lock(&mf_mutex); 2071 2072 if (!PageHWPoison(p)) { 2073 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", 2074 pfn, &unpoison_rs); 2075 goto unlock_mutex; 2076 } 2077 2078 if (page_count(page) > 1) { 2079 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", 2080 pfn, &unpoison_rs); 2081 goto unlock_mutex; 2082 } 2083 2084 if (page_mapped(page)) { 2085 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", 2086 pfn, &unpoison_rs); 2087 goto unlock_mutex; 2088 } 2089 2090 if (page_mapping(page)) { 2091 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", 2092 pfn, &unpoison_rs); 2093 goto unlock_mutex; 2094 } 2095 2096 if (PageSlab(page) || PageTable(page)) 2097 goto unlock_mutex; 2098 2099 ret = get_hwpoison_page(p, MF_UNPOISON); 2100 if (!ret) { 2101 if (clear_page_hwpoison(&unpoison_rs, page)) 2102 ret = 0; 2103 else 2104 ret = -EBUSY; 2105 } else if (ret < 0) { 2106 if (ret == -EHWPOISON) { 2107 ret = unpoison_taken_off_page(&unpoison_rs, p); 2108 } else 2109 unpoison_pr_info("Unpoison: failed to grab page %#lx\n", 2110 pfn, &unpoison_rs); 2111 } else { 2112 int freeit = clear_page_hwpoison(&unpoison_rs, p); 2113 2114 put_page(page); 2115 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) { 2116 put_page(page); 2117 ret = 0; 2118 } 2119 } 2120 2121 unlock_mutex: 2122 mutex_unlock(&mf_mutex); 2123 return ret; 2124 } 2125 EXPORT_SYMBOL(unpoison_memory); 2126 2127 static bool isolate_page(struct page *page, struct list_head *pagelist) 2128 { 2129 bool isolated = false; 2130 bool lru = PageLRU(page); 2131 2132 if (PageHuge(page)) { 2133 isolated = isolate_huge_page(page, pagelist); 2134 } else { 2135 if (lru) 2136 isolated = !isolate_lru_page(page); 2137 else 2138 isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE); 2139 2140 if (isolated) 2141 list_add(&page->lru, pagelist); 2142 } 2143 2144 if (isolated && lru) 2145 inc_node_page_state(page, NR_ISOLATED_ANON + 2146 page_is_file_lru(page)); 2147 2148 /* 2149 * If we succeed to isolate the page, we grabbed another refcount on 2150 * the page, so we can safely drop the one we got from get_any_pages(). 2151 * If we failed to isolate the page, it means that we cannot go further 2152 * and we will return an error, so drop the reference we got from 2153 * get_any_pages() as well. 2154 */ 2155 put_page(page); 2156 return isolated; 2157 } 2158 2159 /* 2160 * __soft_offline_page handles hugetlb-pages and non-hugetlb pages. 2161 * If the page is a non-dirty unmapped page-cache page, it simply invalidates. 2162 * If the page is mapped, it migrates the contents over. 2163 */ 2164 static int __soft_offline_page(struct page *page) 2165 { 2166 int ret = 0; 2167 unsigned long pfn = page_to_pfn(page); 2168 struct page *hpage = compound_head(page); 2169 char const *msg_page[] = {"page", "hugepage"}; 2170 bool huge = PageHuge(page); 2171 LIST_HEAD(pagelist); 2172 struct migration_target_control mtc = { 2173 .nid = NUMA_NO_NODE, 2174 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 2175 }; 2176 2177 lock_page(page); 2178 if (!PageHuge(page)) 2179 wait_on_page_writeback(page); 2180 if (PageHWPoison(page)) { 2181 unlock_page(page); 2182 put_page(page); 2183 pr_info("soft offline: %#lx page already poisoned\n", pfn); 2184 return 0; 2185 } 2186 2187 if (!PageHuge(page)) 2188 /* 2189 * Try to invalidate first. This should work for 2190 * non dirty unmapped page cache pages. 2191 */ 2192 ret = invalidate_inode_page(page); 2193 unlock_page(page); 2194 2195 if (ret) { 2196 pr_info("soft_offline: %#lx: invalidated\n", pfn); 2197 page_handle_poison(page, false, true); 2198 return 0; 2199 } 2200 2201 if (isolate_page(hpage, &pagelist)) { 2202 ret = migrate_pages(&pagelist, alloc_migration_target, NULL, 2203 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); 2204 if (!ret) { 2205 bool release = !huge; 2206 2207 if (!page_handle_poison(page, huge, release)) 2208 ret = -EBUSY; 2209 } else { 2210 if (!list_empty(&pagelist)) 2211 putback_movable_pages(&pagelist); 2212 2213 pr_info("soft offline: %#lx: %s migration failed %d, type %pGp\n", 2214 pfn, msg_page[huge], ret, &page->flags); 2215 if (ret > 0) 2216 ret = -EBUSY; 2217 } 2218 } else { 2219 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", 2220 pfn, msg_page[huge], page_count(page), &page->flags); 2221 ret = -EBUSY; 2222 } 2223 return ret; 2224 } 2225 2226 static int soft_offline_in_use_page(struct page *page) 2227 { 2228 struct page *hpage = compound_head(page); 2229 2230 if (!PageHuge(page) && PageTransHuge(hpage)) 2231 if (try_to_split_thp_page(page, "soft offline") < 0) 2232 return -EBUSY; 2233 return __soft_offline_page(page); 2234 } 2235 2236 static int soft_offline_free_page(struct page *page) 2237 { 2238 int rc = 0; 2239 2240 if (!page_handle_poison(page, true, false)) 2241 rc = -EBUSY; 2242 2243 return rc; 2244 } 2245 2246 static void put_ref_page(struct page *page) 2247 { 2248 if (page) 2249 put_page(page); 2250 } 2251 2252 /** 2253 * soft_offline_page - Soft offline a page. 2254 * @pfn: pfn to soft-offline 2255 * @flags: flags. Same as memory_failure(). 2256 * 2257 * Returns 0 on success, otherwise negated errno. 2258 * 2259 * Soft offline a page, by migration or invalidation, 2260 * without killing anything. This is for the case when 2261 * a page is not corrupted yet (so it's still valid to access), 2262 * but has had a number of corrected errors and is better taken 2263 * out. 2264 * 2265 * The actual policy on when to do that is maintained by 2266 * user space. 2267 * 2268 * This should never impact any application or cause data loss, 2269 * however it might take some time. 2270 * 2271 * This is not a 100% solution for all memory, but tries to be 2272 * ``good enough'' for the majority of memory. 2273 */ 2274 int soft_offline_page(unsigned long pfn, int flags) 2275 { 2276 int ret; 2277 bool try_again = true; 2278 struct page *page, *ref_page = NULL; 2279 2280 WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED)); 2281 2282 if (!pfn_valid(pfn)) 2283 return -ENXIO; 2284 if (flags & MF_COUNT_INCREASED) 2285 ref_page = pfn_to_page(pfn); 2286 2287 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ 2288 page = pfn_to_online_page(pfn); 2289 if (!page) { 2290 put_ref_page(ref_page); 2291 return -EIO; 2292 } 2293 2294 mutex_lock(&mf_mutex); 2295 2296 if (PageHWPoison(page)) { 2297 pr_info("%s: %#lx page already poisoned\n", __func__, pfn); 2298 put_ref_page(ref_page); 2299 mutex_unlock(&mf_mutex); 2300 return 0; 2301 } 2302 2303 retry: 2304 get_online_mems(); 2305 ret = get_hwpoison_page(page, flags); 2306 put_online_mems(); 2307 2308 if (ret > 0) { 2309 ret = soft_offline_in_use_page(page); 2310 } else if (ret == 0) { 2311 if (soft_offline_free_page(page) && try_again) { 2312 try_again = false; 2313 flags &= ~MF_COUNT_INCREASED; 2314 goto retry; 2315 } 2316 } 2317 2318 mutex_unlock(&mf_mutex); 2319 2320 return ret; 2321 } 2322