1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008, 2009 Intel Corporation 4 * Authors: Andi Kleen, Fengguang Wu 5 * 6 * High level machine check handler. Handles pages reported by the 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache 8 * failure. 9 * 10 * In addition there is a "soft offline" entry point that allows stop using 11 * not-yet-corrupted-by-suspicious pages without killing anything. 12 * 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronously in respect to 15 * other VM users, because memory failures could happen anytime and 16 * anywhere. This could violate some of their assumptions. This is why 17 * this code has to be extremely careful. Generally it tries to use 18 * normal locking rules, as in get the standard locks, even if that means 19 * the error handling takes potentially a long time. 20 * 21 * It can be very tempting to add handling for obscure cases here. 22 * In general any code for handling new cases should only be added iff: 23 * - You know how to test it. 24 * - You have a test that can be added to mce-test 25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ 26 * - The case actually shows up as a frequent (top 10) page state in 27 * tools/mm/page-types when running a real workload. 28 * 29 * There are several operations here with exponential complexity because 30 * of unsuitable VM data structures. For example the operation to map back 31 * from RMAP chains to processes has to walk the complete process list and 32 * has non linear complexity with the number. But since memory corruptions 33 * are rare we hope to get away with this. This avoids impacting the core 34 * VM. 35 */ 36 37 #define pr_fmt(fmt) "Memory failure: " fmt 38 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/page-flags.h> 42 #include <linux/sched/signal.h> 43 #include <linux/sched/task.h> 44 #include <linux/dax.h> 45 #include <linux/ksm.h> 46 #include <linux/rmap.h> 47 #include <linux/export.h> 48 #include <linux/pagemap.h> 49 #include <linux/swap.h> 50 #include <linux/backing-dev.h> 51 #include <linux/migrate.h> 52 #include <linux/slab.h> 53 #include <linux/swapops.h> 54 #include <linux/hugetlb.h> 55 #include <linux/memory_hotplug.h> 56 #include <linux/mm_inline.h> 57 #include <linux/memremap.h> 58 #include <linux/kfifo.h> 59 #include <linux/ratelimit.h> 60 #include <linux/pagewalk.h> 61 #include <linux/shmem_fs.h> 62 #include <linux/sysctl.h> 63 #include "swap.h" 64 #include "internal.h" 65 #include "ras/ras_event.h" 66 67 static int sysctl_memory_failure_early_kill __read_mostly; 68 69 static int sysctl_memory_failure_recovery __read_mostly = 1; 70 71 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); 72 73 static bool hw_memory_failure __read_mostly = false; 74 75 static DEFINE_MUTEX(mf_mutex); 76 77 void num_poisoned_pages_inc(unsigned long pfn) 78 { 79 atomic_long_inc(&num_poisoned_pages); 80 memblk_nr_poison_inc(pfn); 81 } 82 83 void num_poisoned_pages_sub(unsigned long pfn, long i) 84 { 85 atomic_long_sub(i, &num_poisoned_pages); 86 if (pfn != -1UL) 87 memblk_nr_poison_sub(pfn, i); 88 } 89 90 /** 91 * MF_ATTR_RO - Create sysfs entry for each memory failure statistics. 92 * @_name: name of the file in the per NUMA sysfs directory. 93 */ 94 #define MF_ATTR_RO(_name) \ 95 static ssize_t _name##_show(struct device *dev, \ 96 struct device_attribute *attr, \ 97 char *buf) \ 98 { \ 99 struct memory_failure_stats *mf_stats = \ 100 &NODE_DATA(dev->id)->mf_stats; \ 101 return sprintf(buf, "%lu\n", mf_stats->_name); \ 102 } \ 103 static DEVICE_ATTR_RO(_name) 104 105 MF_ATTR_RO(total); 106 MF_ATTR_RO(ignored); 107 MF_ATTR_RO(failed); 108 MF_ATTR_RO(delayed); 109 MF_ATTR_RO(recovered); 110 111 static struct attribute *memory_failure_attr[] = { 112 &dev_attr_total.attr, 113 &dev_attr_ignored.attr, 114 &dev_attr_failed.attr, 115 &dev_attr_delayed.attr, 116 &dev_attr_recovered.attr, 117 NULL, 118 }; 119 120 const struct attribute_group memory_failure_attr_group = { 121 .name = "memory_failure", 122 .attrs = memory_failure_attr, 123 }; 124 125 static struct ctl_table memory_failure_table[] = { 126 { 127 .procname = "memory_failure_early_kill", 128 .data = &sysctl_memory_failure_early_kill, 129 .maxlen = sizeof(sysctl_memory_failure_early_kill), 130 .mode = 0644, 131 .proc_handler = proc_dointvec_minmax, 132 .extra1 = SYSCTL_ZERO, 133 .extra2 = SYSCTL_ONE, 134 }, 135 { 136 .procname = "memory_failure_recovery", 137 .data = &sysctl_memory_failure_recovery, 138 .maxlen = sizeof(sysctl_memory_failure_recovery), 139 .mode = 0644, 140 .proc_handler = proc_dointvec_minmax, 141 .extra1 = SYSCTL_ZERO, 142 .extra2 = SYSCTL_ONE, 143 }, 144 }; 145 146 /* 147 * Return values: 148 * 1: the page is dissolved (if needed) and taken off from buddy, 149 * 0: the page is dissolved (if needed) and not taken off from buddy, 150 * < 0: failed to dissolve. 151 */ 152 static int __page_handle_poison(struct page *page) 153 { 154 int ret; 155 156 /* 157 * zone_pcp_disable() can't be used here. It will 158 * hold pcp_batch_high_lock and dissolve_free_hugetlb_folio() might hold 159 * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap 160 * optimization is enabled. This will break current lock dependency 161 * chain and leads to deadlock. 162 * Disabling pcp before dissolving the page was a deterministic 163 * approach because we made sure that those pages cannot end up in any 164 * PCP list. Draining PCP lists expels those pages to the buddy system, 165 * but nothing guarantees that those pages do not get back to a PCP 166 * queue if we need to refill those. 167 */ 168 ret = dissolve_free_hugetlb_folio(page_folio(page)); 169 if (!ret) { 170 drain_all_pages(page_zone(page)); 171 ret = take_page_off_buddy(page); 172 } 173 174 return ret; 175 } 176 177 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) 178 { 179 if (hugepage_or_freepage) { 180 /* 181 * Doing this check for free pages is also fine since 182 * dissolve_free_hugetlb_folio() returns 0 for non-hugetlb folios as well. 183 */ 184 if (__page_handle_poison(page) <= 0) 185 /* 186 * We could fail to take off the target page from buddy 187 * for example due to racy page allocation, but that's 188 * acceptable because soft-offlined page is not broken 189 * and if someone really want to use it, they should 190 * take it. 191 */ 192 return false; 193 } 194 195 SetPageHWPoison(page); 196 if (release) 197 put_page(page); 198 page_ref_inc(page); 199 num_poisoned_pages_inc(page_to_pfn(page)); 200 201 return true; 202 } 203 204 #if IS_ENABLED(CONFIG_HWPOISON_INJECT) 205 206 u32 hwpoison_filter_enable = 0; 207 u32 hwpoison_filter_dev_major = ~0U; 208 u32 hwpoison_filter_dev_minor = ~0U; 209 u64 hwpoison_filter_flags_mask; 210 u64 hwpoison_filter_flags_value; 211 EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 212 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 213 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 214 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 215 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 216 217 static int hwpoison_filter_dev(struct page *p) 218 { 219 struct folio *folio = page_folio(p); 220 struct address_space *mapping; 221 dev_t dev; 222 223 if (hwpoison_filter_dev_major == ~0U && 224 hwpoison_filter_dev_minor == ~0U) 225 return 0; 226 227 mapping = folio_mapping(folio); 228 if (mapping == NULL || mapping->host == NULL) 229 return -EINVAL; 230 231 dev = mapping->host->i_sb->s_dev; 232 if (hwpoison_filter_dev_major != ~0U && 233 hwpoison_filter_dev_major != MAJOR(dev)) 234 return -EINVAL; 235 if (hwpoison_filter_dev_minor != ~0U && 236 hwpoison_filter_dev_minor != MINOR(dev)) 237 return -EINVAL; 238 239 return 0; 240 } 241 242 static int hwpoison_filter_flags(struct page *p) 243 { 244 if (!hwpoison_filter_flags_mask) 245 return 0; 246 247 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 248 hwpoison_filter_flags_value) 249 return 0; 250 else 251 return -EINVAL; 252 } 253 254 /* 255 * This allows stress tests to limit test scope to a collection of tasks 256 * by putting them under some memcg. This prevents killing unrelated/important 257 * processes such as /sbin/init. Note that the target task may share clean 258 * pages with init (eg. libc text), which is harmless. If the target task 259 * share _dirty_ pages with another task B, the test scheme must make sure B 260 * is also included in the memcg. At last, due to race conditions this filter 261 * can only guarantee that the page either belongs to the memcg tasks, or is 262 * a freed page. 263 */ 264 #ifdef CONFIG_MEMCG 265 u64 hwpoison_filter_memcg; 266 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 267 static int hwpoison_filter_task(struct page *p) 268 { 269 if (!hwpoison_filter_memcg) 270 return 0; 271 272 if (page_cgroup_ino(p) != hwpoison_filter_memcg) 273 return -EINVAL; 274 275 return 0; 276 } 277 #else 278 static int hwpoison_filter_task(struct page *p) { return 0; } 279 #endif 280 281 int hwpoison_filter(struct page *p) 282 { 283 if (!hwpoison_filter_enable) 284 return 0; 285 286 if (hwpoison_filter_dev(p)) 287 return -EINVAL; 288 289 if (hwpoison_filter_flags(p)) 290 return -EINVAL; 291 292 if (hwpoison_filter_task(p)) 293 return -EINVAL; 294 295 return 0; 296 } 297 #else 298 int hwpoison_filter(struct page *p) 299 { 300 return 0; 301 } 302 #endif 303 304 EXPORT_SYMBOL_GPL(hwpoison_filter); 305 306 /* 307 * Kill all processes that have a poisoned page mapped and then isolate 308 * the page. 309 * 310 * General strategy: 311 * Find all processes having the page mapped and kill them. 312 * But we keep a page reference around so that the page is not 313 * actually freed yet. 314 * Then stash the page away 315 * 316 * There's no convenient way to get back to mapped processes 317 * from the VMAs. So do a brute-force search over all 318 * running processes. 319 * 320 * Remember that machine checks are not common (or rather 321 * if they are common you have other problems), so this shouldn't 322 * be a performance issue. 323 * 324 * Also there are some races possible while we get from the 325 * error detection to actually handle it. 326 */ 327 328 struct to_kill { 329 struct list_head nd; 330 struct task_struct *tsk; 331 unsigned long addr; 332 short size_shift; 333 }; 334 335 /* 336 * Send all the processes who have the page mapped a signal. 337 * ``action optional'' if they are not immediately affected by the error 338 * ``action required'' if error happened in current execution context 339 */ 340 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) 341 { 342 struct task_struct *t = tk->tsk; 343 short addr_lsb = tk->size_shift; 344 int ret = 0; 345 346 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", 347 pfn, t->comm, t->pid); 348 349 if ((flags & MF_ACTION_REQUIRED) && (t == current)) 350 ret = force_sig_mceerr(BUS_MCEERR_AR, 351 (void __user *)tk->addr, addr_lsb); 352 else 353 /* 354 * Signal other processes sharing the page if they have 355 * PF_MCE_EARLY set. 356 * Don't use force here, it's convenient if the signal 357 * can be temporarily blocked. 358 * This could cause a loop when the user sets SIGBUS 359 * to SIG_IGN, but hopefully no one will do that? 360 */ 361 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, 362 addr_lsb, t); 363 if (ret < 0) 364 pr_info("Error sending signal to %s:%d: %d\n", 365 t->comm, t->pid, ret); 366 return ret; 367 } 368 369 /* 370 * Unknown page type encountered. Try to check whether it can turn PageLRU by 371 * lru_add_drain_all. 372 */ 373 void shake_folio(struct folio *folio) 374 { 375 if (folio_test_hugetlb(folio)) 376 return; 377 /* 378 * TODO: Could shrink slab caches here if a lightweight range-based 379 * shrinker will be available. 380 */ 381 if (folio_test_slab(folio)) 382 return; 383 384 lru_add_drain_all(); 385 } 386 EXPORT_SYMBOL_GPL(shake_folio); 387 388 static void shake_page(struct page *page) 389 { 390 shake_folio(page_folio(page)); 391 } 392 393 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, 394 unsigned long address) 395 { 396 unsigned long ret = 0; 397 pgd_t *pgd; 398 p4d_t *p4d; 399 pud_t *pud; 400 pmd_t *pmd; 401 pte_t *pte; 402 pte_t ptent; 403 404 VM_BUG_ON_VMA(address == -EFAULT, vma); 405 pgd = pgd_offset(vma->vm_mm, address); 406 if (!pgd_present(*pgd)) 407 return 0; 408 p4d = p4d_offset(pgd, address); 409 if (!p4d_present(*p4d)) 410 return 0; 411 pud = pud_offset(p4d, address); 412 if (!pud_present(*pud)) 413 return 0; 414 if (pud_devmap(*pud)) 415 return PUD_SHIFT; 416 pmd = pmd_offset(pud, address); 417 if (!pmd_present(*pmd)) 418 return 0; 419 if (pmd_devmap(*pmd)) 420 return PMD_SHIFT; 421 pte = pte_offset_map(pmd, address); 422 if (!pte) 423 return 0; 424 ptent = ptep_get(pte); 425 if (pte_present(ptent) && pte_devmap(ptent)) 426 ret = PAGE_SHIFT; 427 pte_unmap(pte); 428 return ret; 429 } 430 431 /* 432 * Failure handling: if we can't find or can't kill a process there's 433 * not much we can do. We just print a message and ignore otherwise. 434 */ 435 436 /* 437 * Schedule a process for later kill. 438 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 439 */ 440 static void __add_to_kill(struct task_struct *tsk, struct page *p, 441 struct vm_area_struct *vma, struct list_head *to_kill, 442 unsigned long addr) 443 { 444 struct to_kill *tk; 445 446 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 447 if (!tk) { 448 pr_err("Out of memory while machine check handling\n"); 449 return; 450 } 451 452 tk->addr = addr; 453 if (is_zone_device_page(p)) 454 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); 455 else 456 tk->size_shift = page_shift(compound_head(p)); 457 458 /* 459 * Send SIGKILL if "tk->addr == -EFAULT". Also, as 460 * "tk->size_shift" is always non-zero for !is_zone_device_page(), 461 * so "tk->size_shift == 0" effectively checks no mapping on 462 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times 463 * to a process' address space, it's possible not all N VMAs 464 * contain mappings for the page, but at least one VMA does. 465 * Only deliver SIGBUS with payload derived from the VMA that 466 * has a mapping for the page. 467 */ 468 if (tk->addr == -EFAULT) { 469 pr_info("Unable to find user space address %lx in %s\n", 470 page_to_pfn(p), tsk->comm); 471 } else if (tk->size_shift == 0) { 472 kfree(tk); 473 return; 474 } 475 476 get_task_struct(tsk); 477 tk->tsk = tsk; 478 list_add_tail(&tk->nd, to_kill); 479 } 480 481 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, 482 struct vm_area_struct *vma, struct list_head *to_kill, 483 unsigned long addr) 484 { 485 if (addr == -EFAULT) 486 return; 487 __add_to_kill(tsk, p, vma, to_kill, addr); 488 } 489 490 #ifdef CONFIG_KSM 491 static bool task_in_to_kill_list(struct list_head *to_kill, 492 struct task_struct *tsk) 493 { 494 struct to_kill *tk, *next; 495 496 list_for_each_entry_safe(tk, next, to_kill, nd) { 497 if (tk->tsk == tsk) 498 return true; 499 } 500 501 return false; 502 } 503 504 void add_to_kill_ksm(struct task_struct *tsk, struct page *p, 505 struct vm_area_struct *vma, struct list_head *to_kill, 506 unsigned long addr) 507 { 508 if (!task_in_to_kill_list(to_kill, tsk)) 509 __add_to_kill(tsk, p, vma, to_kill, addr); 510 } 511 #endif 512 /* 513 * Kill the processes that have been collected earlier. 514 * 515 * Only do anything when FORCEKILL is set, otherwise just free the 516 * list (this is used for clean pages which do not need killing) 517 */ 518 static void kill_procs(struct list_head *to_kill, int forcekill, 519 unsigned long pfn, int flags) 520 { 521 struct to_kill *tk, *next; 522 523 list_for_each_entry_safe(tk, next, to_kill, nd) { 524 if (forcekill) { 525 if (tk->addr == -EFAULT) { 526 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 527 pfn, tk->tsk->comm, tk->tsk->pid); 528 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, 529 tk->tsk, PIDTYPE_PID); 530 } 531 532 /* 533 * In theory the process could have mapped 534 * something else on the address in-between. We could 535 * check for that, but we need to tell the 536 * process anyways. 537 */ 538 else if (kill_proc(tk, pfn, flags) < 0) 539 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n", 540 pfn, tk->tsk->comm, tk->tsk->pid); 541 } 542 list_del(&tk->nd); 543 put_task_struct(tk->tsk); 544 kfree(tk); 545 } 546 } 547 548 /* 549 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO) 550 * on behalf of the thread group. Return task_struct of the (first found) 551 * dedicated thread if found, and return NULL otherwise. 552 * 553 * We already hold rcu lock in the caller, so we don't have to call 554 * rcu_read_lock/unlock() in this function. 555 */ 556 static struct task_struct *find_early_kill_thread(struct task_struct *tsk) 557 { 558 struct task_struct *t; 559 560 for_each_thread(tsk, t) { 561 if (t->flags & PF_MCE_PROCESS) { 562 if (t->flags & PF_MCE_EARLY) 563 return t; 564 } else { 565 if (sysctl_memory_failure_early_kill) 566 return t; 567 } 568 } 569 return NULL; 570 } 571 572 /* 573 * Determine whether a given process is "early kill" process which expects 574 * to be signaled when some page under the process is hwpoisoned. 575 * Return task_struct of the dedicated thread (main thread unless explicitly 576 * specified) if the process is "early kill" and otherwise returns NULL. 577 * 578 * Note that the above is true for Action Optional case. For Action Required 579 * case, it's only meaningful to the current thread which need to be signaled 580 * with SIGBUS, this error is Action Optional for other non current 581 * processes sharing the same error page,if the process is "early kill", the 582 * task_struct of the dedicated thread will also be returned. 583 */ 584 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) 585 { 586 if (!tsk->mm) 587 return NULL; 588 /* 589 * Comparing ->mm here because current task might represent 590 * a subthread, while tsk always points to the main thread. 591 */ 592 if (force_early && tsk->mm == current->mm) 593 return current; 594 595 return find_early_kill_thread(tsk); 596 } 597 598 /* 599 * Collect processes when the error hit an anonymous page. 600 */ 601 static void collect_procs_anon(struct folio *folio, struct page *page, 602 struct list_head *to_kill, int force_early) 603 { 604 struct task_struct *tsk; 605 struct anon_vma *av; 606 pgoff_t pgoff; 607 608 av = folio_lock_anon_vma_read(folio, NULL); 609 if (av == NULL) /* Not actually mapped anymore */ 610 return; 611 612 pgoff = page_to_pgoff(page); 613 rcu_read_lock(); 614 for_each_process(tsk) { 615 struct vm_area_struct *vma; 616 struct anon_vma_chain *vmac; 617 struct task_struct *t = task_early_kill(tsk, force_early); 618 unsigned long addr; 619 620 if (!t) 621 continue; 622 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 623 pgoff, pgoff) { 624 vma = vmac->vma; 625 if (vma->vm_mm != t->mm) 626 continue; 627 addr = page_mapped_in_vma(page, vma); 628 add_to_kill_anon_file(t, page, vma, to_kill, addr); 629 } 630 } 631 rcu_read_unlock(); 632 anon_vma_unlock_read(av); 633 } 634 635 /* 636 * Collect processes when the error hit a file mapped page. 637 */ 638 static void collect_procs_file(struct folio *folio, struct page *page, 639 struct list_head *to_kill, int force_early) 640 { 641 struct vm_area_struct *vma; 642 struct task_struct *tsk; 643 struct address_space *mapping = folio->mapping; 644 pgoff_t pgoff; 645 646 i_mmap_lock_read(mapping); 647 rcu_read_lock(); 648 pgoff = page_to_pgoff(page); 649 for_each_process(tsk) { 650 struct task_struct *t = task_early_kill(tsk, force_early); 651 unsigned long addr; 652 653 if (!t) 654 continue; 655 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, 656 pgoff) { 657 /* 658 * Send early kill signal to tasks where a vma covers 659 * the page but the corrupted page is not necessarily 660 * mapped in its pte. 661 * Assume applications who requested early kill want 662 * to be informed of all such data corruptions. 663 */ 664 if (vma->vm_mm != t->mm) 665 continue; 666 addr = page_address_in_vma(page, vma); 667 add_to_kill_anon_file(t, page, vma, to_kill, addr); 668 } 669 } 670 rcu_read_unlock(); 671 i_mmap_unlock_read(mapping); 672 } 673 674 #ifdef CONFIG_FS_DAX 675 static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p, 676 struct vm_area_struct *vma, 677 struct list_head *to_kill, pgoff_t pgoff) 678 { 679 unsigned long addr = vma_address(vma, pgoff, 1); 680 __add_to_kill(tsk, p, vma, to_kill, addr); 681 } 682 683 /* 684 * Collect processes when the error hit a fsdax page. 685 */ 686 static void collect_procs_fsdax(struct page *page, 687 struct address_space *mapping, pgoff_t pgoff, 688 struct list_head *to_kill, bool pre_remove) 689 { 690 struct vm_area_struct *vma; 691 struct task_struct *tsk; 692 693 i_mmap_lock_read(mapping); 694 rcu_read_lock(); 695 for_each_process(tsk) { 696 struct task_struct *t = tsk; 697 698 /* 699 * Search for all tasks while MF_MEM_PRE_REMOVE is set, because 700 * the current may not be the one accessing the fsdax page. 701 * Otherwise, search for the current task. 702 */ 703 if (!pre_remove) 704 t = task_early_kill(tsk, true); 705 if (!t) 706 continue; 707 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 708 if (vma->vm_mm == t->mm) 709 add_to_kill_fsdax(t, page, vma, to_kill, pgoff); 710 } 711 } 712 rcu_read_unlock(); 713 i_mmap_unlock_read(mapping); 714 } 715 #endif /* CONFIG_FS_DAX */ 716 717 /* 718 * Collect the processes who have the corrupted page mapped to kill. 719 */ 720 static void collect_procs(struct folio *folio, struct page *page, 721 struct list_head *tokill, int force_early) 722 { 723 if (!folio->mapping) 724 return; 725 if (unlikely(folio_test_ksm(folio))) 726 collect_procs_ksm(folio, page, tokill, force_early); 727 else if (folio_test_anon(folio)) 728 collect_procs_anon(folio, page, tokill, force_early); 729 else 730 collect_procs_file(folio, page, tokill, force_early); 731 } 732 733 struct hwpoison_walk { 734 struct to_kill tk; 735 unsigned long pfn; 736 int flags; 737 }; 738 739 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift) 740 { 741 tk->addr = addr; 742 tk->size_shift = shift; 743 } 744 745 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, 746 unsigned long poisoned_pfn, struct to_kill *tk) 747 { 748 unsigned long pfn = 0; 749 750 if (pte_present(pte)) { 751 pfn = pte_pfn(pte); 752 } else { 753 swp_entry_t swp = pte_to_swp_entry(pte); 754 755 if (is_hwpoison_entry(swp)) 756 pfn = swp_offset_pfn(swp); 757 } 758 759 if (!pfn || pfn != poisoned_pfn) 760 return 0; 761 762 set_to_kill(tk, addr, shift); 763 return 1; 764 } 765 766 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 767 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 768 struct hwpoison_walk *hwp) 769 { 770 pmd_t pmd = *pmdp; 771 unsigned long pfn; 772 unsigned long hwpoison_vaddr; 773 774 if (!pmd_present(pmd)) 775 return 0; 776 pfn = pmd_pfn(pmd); 777 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { 778 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT); 779 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT); 780 return 1; 781 } 782 return 0; 783 } 784 #else 785 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, 786 struct hwpoison_walk *hwp) 787 { 788 return 0; 789 } 790 #endif 791 792 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, 793 unsigned long end, struct mm_walk *walk) 794 { 795 struct hwpoison_walk *hwp = walk->private; 796 int ret = 0; 797 pte_t *ptep, *mapped_pte; 798 spinlock_t *ptl; 799 800 ptl = pmd_trans_huge_lock(pmdp, walk->vma); 801 if (ptl) { 802 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp); 803 spin_unlock(ptl); 804 goto out; 805 } 806 807 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, 808 addr, &ptl); 809 if (!ptep) 810 goto out; 811 812 for (; addr != end; ptep++, addr += PAGE_SIZE) { 813 ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT, 814 hwp->pfn, &hwp->tk); 815 if (ret == 1) 816 break; 817 } 818 pte_unmap_unlock(mapped_pte, ptl); 819 out: 820 cond_resched(); 821 return ret; 822 } 823 824 #ifdef CONFIG_HUGETLB_PAGE 825 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, 826 unsigned long addr, unsigned long end, 827 struct mm_walk *walk) 828 { 829 struct hwpoison_walk *hwp = walk->private; 830 pte_t pte = huge_ptep_get(ptep); 831 struct hstate *h = hstate_vma(walk->vma); 832 833 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), 834 hwp->pfn, &hwp->tk); 835 } 836 #else 837 #define hwpoison_hugetlb_range NULL 838 #endif 839 840 static const struct mm_walk_ops hwpoison_walk_ops = { 841 .pmd_entry = hwpoison_pte_range, 842 .hugetlb_entry = hwpoison_hugetlb_range, 843 .walk_lock = PGWALK_RDLOCK, 844 }; 845 846 /* 847 * Sends SIGBUS to the current process with error info. 848 * 849 * This function is intended to handle "Action Required" MCEs on already 850 * hardware poisoned pages. They could happen, for example, when 851 * memory_failure() failed to unmap the error page at the first call, or 852 * when multiple local machine checks happened on different CPUs. 853 * 854 * MCE handler currently has no easy access to the error virtual address, 855 * so this function walks page table to find it. The returned virtual address 856 * is proper in most cases, but it could be wrong when the application 857 * process has multiple entries mapping the error page. 858 */ 859 static int kill_accessing_process(struct task_struct *p, unsigned long pfn, 860 int flags) 861 { 862 int ret; 863 struct hwpoison_walk priv = { 864 .pfn = pfn, 865 }; 866 priv.tk.tsk = p; 867 868 if (!p->mm) 869 return -EFAULT; 870 871 mmap_read_lock(p->mm); 872 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, 873 (void *)&priv); 874 if (ret == 1 && priv.tk.addr) 875 kill_proc(&priv.tk, pfn, flags); 876 else 877 ret = 0; 878 mmap_read_unlock(p->mm); 879 return ret > 0 ? -EHWPOISON : -EFAULT; 880 } 881 882 /* 883 * MF_IGNORED - The m-f() handler marks the page as PG_hwpoisoned'ed. 884 * But it could not do more to isolate the page from being accessed again, 885 * nor does it kill the process. This is extremely rare and one of the 886 * potential causes is that the page state has been changed due to 887 * underlying race condition. This is the most severe outcomes. 888 * 889 * MF_FAILED - The m-f() handler marks the page as PG_hwpoisoned'ed. 890 * It should have killed the process, but it can't isolate the page, 891 * due to conditions such as extra pin, unmap failure, etc. Accessing 892 * the page again may trigger another MCE and the process will be killed 893 * by the m-f() handler immediately. 894 * 895 * MF_DELAYED - The m-f() handler marks the page as PG_hwpoisoned'ed. 896 * The page is unmapped, and is removed from the LRU or file mapping. 897 * An attempt to access the page again will trigger page fault and the 898 * PF handler will kill the process. 899 * 900 * MF_RECOVERED - The m-f() handler marks the page as PG_hwpoisoned'ed. 901 * The page has been completely isolated, that is, unmapped, taken out of 902 * the buddy system, or hole-punnched out of the file mapping. 903 */ 904 static const char *action_name[] = { 905 [MF_IGNORED] = "Ignored", 906 [MF_FAILED] = "Failed", 907 [MF_DELAYED] = "Delayed", 908 [MF_RECOVERED] = "Recovered", 909 }; 910 911 static const char * const action_page_types[] = { 912 [MF_MSG_KERNEL] = "reserved kernel page", 913 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page", 914 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", 915 [MF_MSG_HUGE] = "huge page", 916 [MF_MSG_FREE_HUGE] = "free huge page", 917 [MF_MSG_GET_HWPOISON] = "get hwpoison page", 918 [MF_MSG_UNMAP_FAILED] = "unmapping failed page", 919 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", 920 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", 921 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page", 922 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page", 923 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page", 924 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page", 925 [MF_MSG_DIRTY_LRU] = "dirty LRU page", 926 [MF_MSG_CLEAN_LRU] = "clean LRU page", 927 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page", 928 [MF_MSG_BUDDY] = "free buddy page", 929 [MF_MSG_DAX] = "dax page", 930 [MF_MSG_UNSPLIT_THP] = "unsplit thp", 931 [MF_MSG_ALREADY_POISONED] = "already poisoned", 932 [MF_MSG_UNKNOWN] = "unknown page", 933 }; 934 935 /* 936 * XXX: It is possible that a page is isolated from LRU cache, 937 * and then kept in swap cache or failed to remove from page cache. 938 * The page count will stop it from being freed by unpoison. 939 * Stress tests should be aware of this memory leak problem. 940 */ 941 static int delete_from_lru_cache(struct folio *folio) 942 { 943 if (folio_isolate_lru(folio)) { 944 /* 945 * Clear sensible page flags, so that the buddy system won't 946 * complain when the folio is unpoison-and-freed. 947 */ 948 folio_clear_active(folio); 949 folio_clear_unevictable(folio); 950 951 /* 952 * Poisoned page might never drop its ref count to 0 so we have 953 * to uncharge it manually from its memcg. 954 */ 955 mem_cgroup_uncharge(folio); 956 957 /* 958 * drop the refcount elevated by folio_isolate_lru() 959 */ 960 folio_put(folio); 961 return 0; 962 } 963 return -EIO; 964 } 965 966 static int truncate_error_folio(struct folio *folio, unsigned long pfn, 967 struct address_space *mapping) 968 { 969 int ret = MF_FAILED; 970 971 if (mapping->a_ops->error_remove_folio) { 972 int err = mapping->a_ops->error_remove_folio(mapping, folio); 973 974 if (err != 0) 975 pr_info("%#lx: Failed to punch page: %d\n", pfn, err); 976 else if (!filemap_release_folio(folio, GFP_NOIO)) 977 pr_info("%#lx: failed to release buffers\n", pfn); 978 else 979 ret = MF_RECOVERED; 980 } else { 981 /* 982 * If the file system doesn't support it just invalidate 983 * This fails on dirty or anything with private pages 984 */ 985 if (mapping_evict_folio(mapping, folio)) 986 ret = MF_RECOVERED; 987 else 988 pr_info("%#lx: Failed to invalidate\n", pfn); 989 } 990 991 return ret; 992 } 993 994 struct page_state { 995 unsigned long mask; 996 unsigned long res; 997 enum mf_action_page_type type; 998 999 /* Callback ->action() has to unlock the relevant page inside it. */ 1000 int (*action)(struct page_state *ps, struct page *p); 1001 }; 1002 1003 /* 1004 * Return true if page is still referenced by others, otherwise return 1005 * false. 1006 * 1007 * The extra_pins is true when one extra refcount is expected. 1008 */ 1009 static bool has_extra_refcount(struct page_state *ps, struct page *p, 1010 bool extra_pins) 1011 { 1012 int count = page_count(p) - 1; 1013 1014 if (extra_pins) 1015 count -= folio_nr_pages(page_folio(p)); 1016 1017 if (count > 0) { 1018 pr_err("%#lx: %s still referenced by %d users\n", 1019 page_to_pfn(p), action_page_types[ps->type], count); 1020 return true; 1021 } 1022 1023 return false; 1024 } 1025 1026 /* 1027 * Error hit kernel page. 1028 * Do nothing, try to be lucky and not touch this instead. For a few cases we 1029 * could be more sophisticated. 1030 */ 1031 static int me_kernel(struct page_state *ps, struct page *p) 1032 { 1033 unlock_page(p); 1034 return MF_IGNORED; 1035 } 1036 1037 /* 1038 * Page in unknown state. Do nothing. 1039 * This is a catch-all in case we fail to make sense of the page state. 1040 */ 1041 static int me_unknown(struct page_state *ps, struct page *p) 1042 { 1043 pr_err("%#lx: Unknown page state\n", page_to_pfn(p)); 1044 unlock_page(p); 1045 return MF_IGNORED; 1046 } 1047 1048 /* 1049 * Clean (or cleaned) page cache page. 1050 */ 1051 static int me_pagecache_clean(struct page_state *ps, struct page *p) 1052 { 1053 struct folio *folio = page_folio(p); 1054 int ret; 1055 struct address_space *mapping; 1056 bool extra_pins; 1057 1058 delete_from_lru_cache(folio); 1059 1060 /* 1061 * For anonymous folios the only reference left 1062 * should be the one m_f() holds. 1063 */ 1064 if (folio_test_anon(folio)) { 1065 ret = MF_RECOVERED; 1066 goto out; 1067 } 1068 1069 /* 1070 * Now truncate the page in the page cache. This is really 1071 * more like a "temporary hole punch" 1072 * Don't do this for block devices when someone else 1073 * has a reference, because it could be file system metadata 1074 * and that's not safe to truncate. 1075 */ 1076 mapping = folio_mapping(folio); 1077 if (!mapping) { 1078 /* Folio has been torn down in the meantime */ 1079 ret = MF_FAILED; 1080 goto out; 1081 } 1082 1083 /* 1084 * The shmem page is kept in page cache instead of truncating 1085 * so is expected to have an extra refcount after error-handling. 1086 */ 1087 extra_pins = shmem_mapping(mapping); 1088 1089 /* 1090 * Truncation is a bit tricky. Enable it per file system for now. 1091 * 1092 * Open: to take i_rwsem or not for this? Right now we don't. 1093 */ 1094 ret = truncate_error_folio(folio, page_to_pfn(p), mapping); 1095 if (has_extra_refcount(ps, p, extra_pins)) 1096 ret = MF_FAILED; 1097 1098 out: 1099 folio_unlock(folio); 1100 1101 return ret; 1102 } 1103 1104 /* 1105 * Dirty pagecache page 1106 * Issues: when the error hit a hole page the error is not properly 1107 * propagated. 1108 */ 1109 static int me_pagecache_dirty(struct page_state *ps, struct page *p) 1110 { 1111 struct folio *folio = page_folio(p); 1112 struct address_space *mapping = folio_mapping(folio); 1113 1114 /* TBD: print more information about the file. */ 1115 if (mapping) { 1116 /* 1117 * IO error will be reported by write(), fsync(), etc. 1118 * who check the mapping. 1119 * This way the application knows that something went 1120 * wrong with its dirty file data. 1121 */ 1122 mapping_set_error(mapping, -EIO); 1123 } 1124 1125 return me_pagecache_clean(ps, p); 1126 } 1127 1128 /* 1129 * Clean and dirty swap cache. 1130 * 1131 * Dirty swap cache page is tricky to handle. The page could live both in page 1132 * cache and swap cache(ie. page is freshly swapped in). So it could be 1133 * referenced concurrently by 2 types of PTEs: 1134 * normal PTEs and swap PTEs. We try to handle them consistently by calling 1135 * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs, 1136 * and then 1137 * - clear dirty bit to prevent IO 1138 * - remove from LRU 1139 * - but keep in the swap cache, so that when we return to it on 1140 * a later page fault, we know the application is accessing 1141 * corrupted data and shall be killed (we installed simple 1142 * interception code in do_swap_page to catch it). 1143 * 1144 * Clean swap cache pages can be directly isolated. A later page fault will 1145 * bring in the known good data from disk. 1146 */ 1147 static int me_swapcache_dirty(struct page_state *ps, struct page *p) 1148 { 1149 struct folio *folio = page_folio(p); 1150 int ret; 1151 bool extra_pins = false; 1152 1153 folio_clear_dirty(folio); 1154 /* Trigger EIO in shmem: */ 1155 folio_clear_uptodate(folio); 1156 1157 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED; 1158 folio_unlock(folio); 1159 1160 if (ret == MF_DELAYED) 1161 extra_pins = true; 1162 1163 if (has_extra_refcount(ps, p, extra_pins)) 1164 ret = MF_FAILED; 1165 1166 return ret; 1167 } 1168 1169 static int me_swapcache_clean(struct page_state *ps, struct page *p) 1170 { 1171 struct folio *folio = page_folio(p); 1172 int ret; 1173 1174 delete_from_swap_cache(folio); 1175 1176 ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED; 1177 folio_unlock(folio); 1178 1179 if (has_extra_refcount(ps, p, false)) 1180 ret = MF_FAILED; 1181 1182 return ret; 1183 } 1184 1185 /* 1186 * Huge pages. Needs work. 1187 * Issues: 1188 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 1189 * To narrow down kill region to one page, we need to break up pmd. 1190 */ 1191 static int me_huge_page(struct page_state *ps, struct page *p) 1192 { 1193 struct folio *folio = page_folio(p); 1194 int res; 1195 struct address_space *mapping; 1196 bool extra_pins = false; 1197 1198 mapping = folio_mapping(folio); 1199 if (mapping) { 1200 res = truncate_error_folio(folio, page_to_pfn(p), mapping); 1201 /* The page is kept in page cache. */ 1202 extra_pins = true; 1203 folio_unlock(folio); 1204 } else { 1205 folio_unlock(folio); 1206 /* 1207 * migration entry prevents later access on error hugepage, 1208 * so we can free and dissolve it into buddy to save healthy 1209 * subpages. 1210 */ 1211 folio_put(folio); 1212 if (__page_handle_poison(p) > 0) { 1213 page_ref_inc(p); 1214 res = MF_RECOVERED; 1215 } else { 1216 res = MF_FAILED; 1217 } 1218 } 1219 1220 if (has_extra_refcount(ps, p, extra_pins)) 1221 res = MF_FAILED; 1222 1223 return res; 1224 } 1225 1226 /* 1227 * Various page states we can handle. 1228 * 1229 * A page state is defined by its current page->flags bits. 1230 * The table matches them in order and calls the right handler. 1231 * 1232 * This is quite tricky because we can access page at any time 1233 * in its live cycle, so all accesses have to be extremely careful. 1234 * 1235 * This is not complete. More states could be added. 1236 * For any missing state don't attempt recovery. 1237 */ 1238 1239 #define dirty (1UL << PG_dirty) 1240 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked)) 1241 #define unevict (1UL << PG_unevictable) 1242 #define mlock (1UL << PG_mlocked) 1243 #define lru (1UL << PG_lru) 1244 #define head (1UL << PG_head) 1245 #define reserved (1UL << PG_reserved) 1246 1247 static struct page_state error_states[] = { 1248 { reserved, reserved, MF_MSG_KERNEL, me_kernel }, 1249 /* 1250 * free pages are specially detected outside this table: 1251 * PG_buddy pages only make a small fraction of all free pages. 1252 */ 1253 1254 { head, head, MF_MSG_HUGE, me_huge_page }, 1255 1256 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, 1257 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, 1258 1259 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty }, 1260 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean }, 1261 1262 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty }, 1263 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean }, 1264 1265 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 1266 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 1267 1268 /* 1269 * Catchall entry: must be at end. 1270 */ 1271 { 0, 0, MF_MSG_UNKNOWN, me_unknown }, 1272 }; 1273 1274 #undef dirty 1275 #undef sc 1276 #undef unevict 1277 #undef mlock 1278 #undef lru 1279 #undef head 1280 #undef reserved 1281 1282 static void update_per_node_mf_stats(unsigned long pfn, 1283 enum mf_result result) 1284 { 1285 int nid = MAX_NUMNODES; 1286 struct memory_failure_stats *mf_stats = NULL; 1287 1288 nid = pfn_to_nid(pfn); 1289 if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) { 1290 WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid); 1291 return; 1292 } 1293 1294 mf_stats = &NODE_DATA(nid)->mf_stats; 1295 switch (result) { 1296 case MF_IGNORED: 1297 ++mf_stats->ignored; 1298 break; 1299 case MF_FAILED: 1300 ++mf_stats->failed; 1301 break; 1302 case MF_DELAYED: 1303 ++mf_stats->delayed; 1304 break; 1305 case MF_RECOVERED: 1306 ++mf_stats->recovered; 1307 break; 1308 default: 1309 WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result); 1310 break; 1311 } 1312 ++mf_stats->total; 1313 } 1314 1315 /* 1316 * "Dirty/Clean" indication is not 100% accurate due to the possibility of 1317 * setting PG_dirty outside page lock. See also comment above set_page_dirty(). 1318 */ 1319 static int action_result(unsigned long pfn, enum mf_action_page_type type, 1320 enum mf_result result) 1321 { 1322 trace_memory_failure_event(pfn, type, result); 1323 1324 num_poisoned_pages_inc(pfn); 1325 1326 update_per_node_mf_stats(pfn, result); 1327 1328 pr_err("%#lx: recovery action for %s: %s\n", 1329 pfn, action_page_types[type], action_name[result]); 1330 1331 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY; 1332 } 1333 1334 static int page_action(struct page_state *ps, struct page *p, 1335 unsigned long pfn) 1336 { 1337 int result; 1338 1339 /* page p should be unlocked after returning from ps->action(). */ 1340 result = ps->action(ps, p); 1341 1342 /* Could do more checks here if page looks ok */ 1343 /* 1344 * Could adjust zone counters here to correct for the missing page. 1345 */ 1346 1347 return action_result(pfn, ps->type, result); 1348 } 1349 1350 static inline bool PageHWPoisonTakenOff(struct page *page) 1351 { 1352 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON; 1353 } 1354 1355 void SetPageHWPoisonTakenOff(struct page *page) 1356 { 1357 set_page_private(page, MAGIC_HWPOISON); 1358 } 1359 1360 void ClearPageHWPoisonTakenOff(struct page *page) 1361 { 1362 if (PageHWPoison(page)) 1363 set_page_private(page, 0); 1364 } 1365 1366 /* 1367 * Return true if a page type of a given page is supported by hwpoison 1368 * mechanism (while handling could fail), otherwise false. This function 1369 * does not return true for hugetlb or device memory pages, so it's assumed 1370 * to be called only in the context where we never have such pages. 1371 */ 1372 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) 1373 { 1374 if (PageSlab(page)) 1375 return false; 1376 1377 /* Soft offline could migrate non-LRU movable pages */ 1378 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) 1379 return true; 1380 1381 return PageLRU(page) || is_free_buddy_page(page); 1382 } 1383 1384 static int __get_hwpoison_page(struct page *page, unsigned long flags) 1385 { 1386 struct folio *folio = page_folio(page); 1387 int ret = 0; 1388 bool hugetlb = false; 1389 1390 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false); 1391 if (hugetlb) { 1392 /* Make sure hugetlb demotion did not happen from under us. */ 1393 if (folio == page_folio(page)) 1394 return ret; 1395 if (ret > 0) { 1396 folio_put(folio); 1397 folio = page_folio(page); 1398 } 1399 } 1400 1401 /* 1402 * This check prevents from calling folio_try_get() for any 1403 * unsupported type of folio in order to reduce the risk of unexpected 1404 * races caused by taking a folio refcount. 1405 */ 1406 if (!HWPoisonHandlable(&folio->page, flags)) 1407 return -EBUSY; 1408 1409 if (folio_try_get(folio)) { 1410 if (folio == page_folio(page)) 1411 return 1; 1412 1413 pr_info("%#lx cannot catch tail\n", page_to_pfn(page)); 1414 folio_put(folio); 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int get_any_page(struct page *p, unsigned long flags) 1421 { 1422 int ret = 0, pass = 0; 1423 bool count_increased = false; 1424 1425 if (flags & MF_COUNT_INCREASED) 1426 count_increased = true; 1427 1428 try_again: 1429 if (!count_increased) { 1430 ret = __get_hwpoison_page(p, flags); 1431 if (!ret) { 1432 if (page_count(p)) { 1433 /* We raced with an allocation, retry. */ 1434 if (pass++ < 3) 1435 goto try_again; 1436 ret = -EBUSY; 1437 } else if (!PageHuge(p) && !is_free_buddy_page(p)) { 1438 /* We raced with put_page, retry. */ 1439 if (pass++ < 3) 1440 goto try_again; 1441 ret = -EIO; 1442 } 1443 goto out; 1444 } else if (ret == -EBUSY) { 1445 /* 1446 * We raced with (possibly temporary) unhandlable 1447 * page, retry. 1448 */ 1449 if (pass++ < 3) { 1450 shake_page(p); 1451 goto try_again; 1452 } 1453 ret = -EIO; 1454 goto out; 1455 } 1456 } 1457 1458 if (PageHuge(p) || HWPoisonHandlable(p, flags)) { 1459 ret = 1; 1460 } else { 1461 /* 1462 * A page we cannot handle. Check whether we can turn 1463 * it into something we can handle. 1464 */ 1465 if (pass++ < 3) { 1466 put_page(p); 1467 shake_page(p); 1468 count_increased = false; 1469 goto try_again; 1470 } 1471 put_page(p); 1472 ret = -EIO; 1473 } 1474 out: 1475 if (ret == -EIO) 1476 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p)); 1477 1478 return ret; 1479 } 1480 1481 static int __get_unpoison_page(struct page *page) 1482 { 1483 struct folio *folio = page_folio(page); 1484 int ret = 0; 1485 bool hugetlb = false; 1486 1487 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true); 1488 if (hugetlb) { 1489 /* Make sure hugetlb demotion did not happen from under us. */ 1490 if (folio == page_folio(page)) 1491 return ret; 1492 if (ret > 0) 1493 folio_put(folio); 1494 } 1495 1496 /* 1497 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison, 1498 * but also isolated from buddy freelist, so need to identify the 1499 * state and have to cancel both operations to unpoison. 1500 */ 1501 if (PageHWPoisonTakenOff(page)) 1502 return -EHWPOISON; 1503 1504 return get_page_unless_zero(page) ? 1 : 0; 1505 } 1506 1507 /** 1508 * get_hwpoison_page() - Get refcount for memory error handling 1509 * @p: Raw error page (hit by memory error) 1510 * @flags: Flags controlling behavior of error handling 1511 * 1512 * get_hwpoison_page() takes a page refcount of an error page to handle memory 1513 * error on it, after checking that the error page is in a well-defined state 1514 * (defined as a page-type we can successfully handle the memory error on it, 1515 * such as LRU page and hugetlb page). 1516 * 1517 * Memory error handling could be triggered at any time on any type of page, 1518 * so it's prone to race with typical memory management lifecycle (like 1519 * allocation and free). So to avoid such races, get_hwpoison_page() takes 1520 * extra care for the error page's state (as done in __get_hwpoison_page()), 1521 * and has some retry logic in get_any_page(). 1522 * 1523 * When called from unpoison_memory(), the caller should already ensure that 1524 * the given page has PG_hwpoison. So it's never reused for other page 1525 * allocations, and __get_unpoison_page() never races with them. 1526 * 1527 * Return: 0 on failure, 1528 * 1 on success for in-use pages in a well-defined state, 1529 * -EIO for pages on which we can not handle memory errors, 1530 * -EBUSY when get_hwpoison_page() has raced with page lifecycle 1531 * operations like allocation and free, 1532 * -EHWPOISON when the page is hwpoisoned and taken off from buddy. 1533 */ 1534 static int get_hwpoison_page(struct page *p, unsigned long flags) 1535 { 1536 int ret; 1537 1538 zone_pcp_disable(page_zone(p)); 1539 if (flags & MF_UNPOISON) 1540 ret = __get_unpoison_page(p); 1541 else 1542 ret = get_any_page(p, flags); 1543 zone_pcp_enable(page_zone(p)); 1544 1545 return ret; 1546 } 1547 1548 /* 1549 * Do all that is necessary to remove user space mappings. Unmap 1550 * the pages and send SIGBUS to the processes if the data was dirty. 1551 */ 1552 static bool hwpoison_user_mappings(struct folio *folio, struct page *p, 1553 unsigned long pfn, int flags) 1554 { 1555 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; 1556 struct address_space *mapping; 1557 LIST_HEAD(tokill); 1558 bool unmap_success; 1559 int forcekill; 1560 bool mlocked = folio_test_mlocked(folio); 1561 1562 /* 1563 * Here we are interested only in user-mapped pages, so skip any 1564 * other types of pages. 1565 */ 1566 if (folio_test_reserved(folio) || folio_test_slab(folio) || 1567 folio_test_pgtable(folio) || folio_test_offline(folio)) 1568 return true; 1569 if (!(folio_test_lru(folio) || folio_test_hugetlb(folio))) 1570 return true; 1571 1572 /* 1573 * This check implies we don't kill processes if their pages 1574 * are in the swap cache early. Those are always late kills. 1575 */ 1576 if (!page_mapped(p)) 1577 return true; 1578 1579 if (folio_test_swapcache(folio)) { 1580 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); 1581 ttu &= ~TTU_HWPOISON; 1582 } 1583 1584 /* 1585 * Propagate the dirty bit from PTEs to struct page first, because we 1586 * need this to decide if we should kill or just drop the page. 1587 * XXX: the dirty test could be racy: set_page_dirty() may not always 1588 * be called inside page lock (it's recommended but not enforced). 1589 */ 1590 mapping = folio_mapping(folio); 1591 if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping && 1592 mapping_can_writeback(mapping)) { 1593 if (folio_mkclean(folio)) { 1594 folio_set_dirty(folio); 1595 } else { 1596 ttu &= ~TTU_HWPOISON; 1597 pr_info("%#lx: corrupted page was clean: dropped without side effects\n", 1598 pfn); 1599 } 1600 } 1601 1602 /* 1603 * First collect all the processes that have the page 1604 * mapped in dirty form. This has to be done before try_to_unmap, 1605 * because ttu takes the rmap data structures down. 1606 */ 1607 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); 1608 1609 if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { 1610 /* 1611 * For hugetlb pages in shared mappings, try_to_unmap 1612 * could potentially call huge_pmd_unshare. Because of 1613 * this, take semaphore in write mode here and set 1614 * TTU_RMAP_LOCKED to indicate we have taken the lock 1615 * at this higher level. 1616 */ 1617 mapping = hugetlb_folio_mapping_lock_write(folio); 1618 if (mapping) { 1619 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); 1620 i_mmap_unlock_write(mapping); 1621 } else 1622 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); 1623 } else { 1624 try_to_unmap(folio, ttu); 1625 } 1626 1627 unmap_success = !page_mapped(p); 1628 if (!unmap_success) 1629 pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n", 1630 pfn, folio_mapcount(page_folio(p))); 1631 1632 /* 1633 * try_to_unmap() might put mlocked page in lru cache, so call 1634 * shake_page() again to ensure that it's flushed. 1635 */ 1636 if (mlocked) 1637 shake_folio(folio); 1638 1639 /* 1640 * Now that the dirty bit has been propagated to the 1641 * struct page and all unmaps done we can decide if 1642 * killing is needed or not. Only kill when the page 1643 * was dirty or the process is not restartable, 1644 * otherwise the tokill list is merely 1645 * freed. When there was a problem unmapping earlier 1646 * use a more force-full uncatchable kill to prevent 1647 * any accesses to the poisoned memory. 1648 */ 1649 forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) || 1650 !unmap_success; 1651 kill_procs(&tokill, forcekill, pfn, flags); 1652 1653 return unmap_success; 1654 } 1655 1656 static int identify_page_state(unsigned long pfn, struct page *p, 1657 unsigned long page_flags) 1658 { 1659 struct page_state *ps; 1660 1661 /* 1662 * The first check uses the current page flags which may not have any 1663 * relevant information. The second check with the saved page flags is 1664 * carried out only if the first check can't determine the page status. 1665 */ 1666 for (ps = error_states;; ps++) 1667 if ((p->flags & ps->mask) == ps->res) 1668 break; 1669 1670 page_flags |= (p->flags & (1UL << PG_dirty)); 1671 1672 if (!ps->mask) 1673 for (ps = error_states;; ps++) 1674 if ((page_flags & ps->mask) == ps->res) 1675 break; 1676 return page_action(ps, p, pfn); 1677 } 1678 1679 /* 1680 * When 'release' is 'false', it means that if thp split has failed, 1681 * there is still more to do, hence the page refcount we took earlier 1682 * is still needed. 1683 */ 1684 static int try_to_split_thp_page(struct page *page, bool release) 1685 { 1686 int ret; 1687 1688 lock_page(page); 1689 ret = split_huge_page(page); 1690 unlock_page(page); 1691 1692 if (ret && release) 1693 put_page(page); 1694 1695 return ret; 1696 } 1697 1698 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, 1699 struct address_space *mapping, pgoff_t index, int flags) 1700 { 1701 struct to_kill *tk; 1702 unsigned long size = 0; 1703 1704 list_for_each_entry(tk, to_kill, nd) 1705 if (tk->size_shift) 1706 size = max(size, 1UL << tk->size_shift); 1707 1708 if (size) { 1709 /* 1710 * Unmap the largest mapping to avoid breaking up device-dax 1711 * mappings which are constant size. The actual size of the 1712 * mapping being torn down is communicated in siginfo, see 1713 * kill_proc() 1714 */ 1715 loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1); 1716 1717 unmap_mapping_range(mapping, start, size, 0); 1718 } 1719 1720 kill_procs(to_kill, flags & MF_MUST_KILL, pfn, flags); 1721 } 1722 1723 /* 1724 * Only dev_pagemap pages get here, such as fsdax when the filesystem 1725 * either do not claim or fails to claim a hwpoison event, or devdax. 1726 * The fsdax pages are initialized per base page, and the devdax pages 1727 * could be initialized either as base pages, or as compound pages with 1728 * vmemmap optimization enabled. Devdax is simplistic in its dealing with 1729 * hwpoison, such that, if a subpage of a compound page is poisoned, 1730 * simply mark the compound head page is by far sufficient. 1731 */ 1732 static int mf_generic_kill_procs(unsigned long long pfn, int flags, 1733 struct dev_pagemap *pgmap) 1734 { 1735 struct folio *folio = pfn_folio(pfn); 1736 LIST_HEAD(to_kill); 1737 dax_entry_t cookie; 1738 int rc = 0; 1739 1740 /* 1741 * Prevent the inode from being freed while we are interrogating 1742 * the address_space, typically this would be handled by 1743 * lock_page(), but dax pages do not use the page lock. This 1744 * also prevents changes to the mapping of this pfn until 1745 * poison signaling is complete. 1746 */ 1747 cookie = dax_lock_folio(folio); 1748 if (!cookie) 1749 return -EBUSY; 1750 1751 if (hwpoison_filter(&folio->page)) { 1752 rc = -EOPNOTSUPP; 1753 goto unlock; 1754 } 1755 1756 switch (pgmap->type) { 1757 case MEMORY_DEVICE_PRIVATE: 1758 case MEMORY_DEVICE_COHERENT: 1759 /* 1760 * TODO: Handle device pages which may need coordination 1761 * with device-side memory. 1762 */ 1763 rc = -ENXIO; 1764 goto unlock; 1765 default: 1766 break; 1767 } 1768 1769 /* 1770 * Use this flag as an indication that the dax page has been 1771 * remapped UC to prevent speculative consumption of poison. 1772 */ 1773 SetPageHWPoison(&folio->page); 1774 1775 /* 1776 * Unlike System-RAM there is no possibility to swap in a 1777 * different physical page at a given virtual address, so all 1778 * userspace consumption of ZONE_DEVICE memory necessitates 1779 * SIGBUS (i.e. MF_MUST_KILL) 1780 */ 1781 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1782 collect_procs(folio, &folio->page, &to_kill, true); 1783 1784 unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags); 1785 unlock: 1786 dax_unlock_folio(folio, cookie); 1787 return rc; 1788 } 1789 1790 #ifdef CONFIG_FS_DAX 1791 /** 1792 * mf_dax_kill_procs - Collect and kill processes who are using this file range 1793 * @mapping: address_space of the file in use 1794 * @index: start pgoff of the range within the file 1795 * @count: length of the range, in unit of PAGE_SIZE 1796 * @mf_flags: memory failure flags 1797 */ 1798 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, 1799 unsigned long count, int mf_flags) 1800 { 1801 LIST_HEAD(to_kill); 1802 dax_entry_t cookie; 1803 struct page *page; 1804 size_t end = index + count; 1805 bool pre_remove = mf_flags & MF_MEM_PRE_REMOVE; 1806 1807 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1808 1809 for (; index < end; index++) { 1810 page = NULL; 1811 cookie = dax_lock_mapping_entry(mapping, index, &page); 1812 if (!cookie) 1813 return -EBUSY; 1814 if (!page) 1815 goto unlock; 1816 1817 if (!pre_remove) 1818 SetPageHWPoison(page); 1819 1820 /* 1821 * The pre_remove case is revoking access, the memory is still 1822 * good and could theoretically be put back into service. 1823 */ 1824 collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove); 1825 unmap_and_kill(&to_kill, page_to_pfn(page), mapping, 1826 index, mf_flags); 1827 unlock: 1828 dax_unlock_mapping_entry(mapping, index, cookie); 1829 } 1830 return 0; 1831 } 1832 EXPORT_SYMBOL_GPL(mf_dax_kill_procs); 1833 #endif /* CONFIG_FS_DAX */ 1834 1835 #ifdef CONFIG_HUGETLB_PAGE 1836 1837 /* 1838 * Struct raw_hwp_page represents information about "raw error page", 1839 * constructing singly linked list from ->_hugetlb_hwpoison field of folio. 1840 */ 1841 struct raw_hwp_page { 1842 struct llist_node node; 1843 struct page *page; 1844 }; 1845 1846 static inline struct llist_head *raw_hwp_list_head(struct folio *folio) 1847 { 1848 return (struct llist_head *)&folio->_hugetlb_hwpoison; 1849 } 1850 1851 bool is_raw_hwpoison_page_in_hugepage(struct page *page) 1852 { 1853 struct llist_head *raw_hwp_head; 1854 struct raw_hwp_page *p; 1855 struct folio *folio = page_folio(page); 1856 bool ret = false; 1857 1858 if (!folio_test_hwpoison(folio)) 1859 return false; 1860 1861 if (!folio_test_hugetlb(folio)) 1862 return PageHWPoison(page); 1863 1864 /* 1865 * When RawHwpUnreliable is set, kernel lost track of which subpages 1866 * are HWPOISON. So return as if ALL subpages are HWPOISONed. 1867 */ 1868 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1869 return true; 1870 1871 mutex_lock(&mf_mutex); 1872 1873 raw_hwp_head = raw_hwp_list_head(folio); 1874 llist_for_each_entry(p, raw_hwp_head->first, node) { 1875 if (page == p->page) { 1876 ret = true; 1877 break; 1878 } 1879 } 1880 1881 mutex_unlock(&mf_mutex); 1882 1883 return ret; 1884 } 1885 1886 static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) 1887 { 1888 struct llist_node *head; 1889 struct raw_hwp_page *p, *next; 1890 unsigned long count = 0; 1891 1892 head = llist_del_all(raw_hwp_list_head(folio)); 1893 llist_for_each_entry_safe(p, next, head, node) { 1894 if (move_flag) 1895 SetPageHWPoison(p->page); 1896 else 1897 num_poisoned_pages_sub(page_to_pfn(p->page), 1); 1898 kfree(p); 1899 count++; 1900 } 1901 return count; 1902 } 1903 1904 static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) 1905 { 1906 struct llist_head *head; 1907 struct raw_hwp_page *raw_hwp; 1908 struct raw_hwp_page *p; 1909 int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0; 1910 1911 /* 1912 * Once the hwpoison hugepage has lost reliable raw error info, 1913 * there is little meaning to keep additional error info precisely, 1914 * so skip to add additional raw error info. 1915 */ 1916 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1917 return -EHWPOISON; 1918 head = raw_hwp_list_head(folio); 1919 llist_for_each_entry(p, head->first, node) { 1920 if (p->page == page) 1921 return -EHWPOISON; 1922 } 1923 1924 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); 1925 if (raw_hwp) { 1926 raw_hwp->page = page; 1927 llist_add(&raw_hwp->node, head); 1928 /* the first error event will be counted in action_result(). */ 1929 if (ret) 1930 num_poisoned_pages_inc(page_to_pfn(page)); 1931 } else { 1932 /* 1933 * Failed to save raw error info. We no longer trace all 1934 * hwpoisoned subpages, and we need refuse to free/dissolve 1935 * this hwpoisoned hugepage. 1936 */ 1937 folio_set_hugetlb_raw_hwp_unreliable(folio); 1938 /* 1939 * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not 1940 * used any more, so free it. 1941 */ 1942 __folio_free_raw_hwp(folio, false); 1943 } 1944 return ret; 1945 } 1946 1947 static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag) 1948 { 1949 /* 1950 * hugetlb_vmemmap_optimized hugepages can't be freed because struct 1951 * pages for tail pages are required but they don't exist. 1952 */ 1953 if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio)) 1954 return 0; 1955 1956 /* 1957 * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by 1958 * definition. 1959 */ 1960 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1961 return 0; 1962 1963 return __folio_free_raw_hwp(folio, move_flag); 1964 } 1965 1966 void folio_clear_hugetlb_hwpoison(struct folio *folio) 1967 { 1968 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1969 return; 1970 if (folio_test_hugetlb_vmemmap_optimized(folio)) 1971 return; 1972 folio_clear_hwpoison(folio); 1973 folio_free_raw_hwp(folio, true); 1974 } 1975 1976 /* 1977 * Called from hugetlb code with hugetlb_lock held. 1978 * 1979 * Return values: 1980 * 0 - free hugepage 1981 * 1 - in-use hugepage 1982 * 2 - not a hugepage 1983 * -EBUSY - the hugepage is busy (try to retry) 1984 * -EHWPOISON - the hugepage is already hwpoisoned 1985 */ 1986 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, 1987 bool *migratable_cleared) 1988 { 1989 struct page *page = pfn_to_page(pfn); 1990 struct folio *folio = page_folio(page); 1991 int ret = 2; /* fallback to normal page handling */ 1992 bool count_increased = false; 1993 1994 if (!folio_test_hugetlb(folio)) 1995 goto out; 1996 1997 if (flags & MF_COUNT_INCREASED) { 1998 ret = 1; 1999 count_increased = true; 2000 } else if (folio_test_hugetlb_freed(folio)) { 2001 ret = 0; 2002 } else if (folio_test_hugetlb_migratable(folio)) { 2003 ret = folio_try_get(folio); 2004 if (ret) 2005 count_increased = true; 2006 } else { 2007 ret = -EBUSY; 2008 if (!(flags & MF_NO_RETRY)) 2009 goto out; 2010 } 2011 2012 if (folio_set_hugetlb_hwpoison(folio, page)) { 2013 ret = -EHWPOISON; 2014 goto out; 2015 } 2016 2017 /* 2018 * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them 2019 * from being migrated by memory hotremove. 2020 */ 2021 if (count_increased && folio_test_hugetlb_migratable(folio)) { 2022 folio_clear_hugetlb_migratable(folio); 2023 *migratable_cleared = true; 2024 } 2025 2026 return ret; 2027 out: 2028 if (count_increased) 2029 folio_put(folio); 2030 return ret; 2031 } 2032 2033 /* 2034 * Taking refcount of hugetlb pages needs extra care about race conditions 2035 * with basic operations like hugepage allocation/free/demotion. 2036 * So some of prechecks for hwpoison (pinning, and testing/setting 2037 * PageHWPoison) should be done in single hugetlb_lock range. 2038 */ 2039 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) 2040 { 2041 int res; 2042 struct page *p = pfn_to_page(pfn); 2043 struct folio *folio; 2044 unsigned long page_flags; 2045 bool migratable_cleared = false; 2046 2047 *hugetlb = 1; 2048 retry: 2049 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared); 2050 if (res == 2) { /* fallback to normal page handling */ 2051 *hugetlb = 0; 2052 return 0; 2053 } else if (res == -EHWPOISON) { 2054 pr_err("%#lx: already hardware poisoned\n", pfn); 2055 if (flags & MF_ACTION_REQUIRED) { 2056 folio = page_folio(p); 2057 res = kill_accessing_process(current, folio_pfn(folio), flags); 2058 action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); 2059 } 2060 return res; 2061 } else if (res == -EBUSY) { 2062 if (!(flags & MF_NO_RETRY)) { 2063 flags |= MF_NO_RETRY; 2064 goto retry; 2065 } 2066 return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); 2067 } 2068 2069 folio = page_folio(p); 2070 folio_lock(folio); 2071 2072 if (hwpoison_filter(p)) { 2073 folio_clear_hugetlb_hwpoison(folio); 2074 if (migratable_cleared) 2075 folio_set_hugetlb_migratable(folio); 2076 folio_unlock(folio); 2077 if (res == 1) 2078 folio_put(folio); 2079 return -EOPNOTSUPP; 2080 } 2081 2082 /* 2083 * Handling free hugepage. The possible race with hugepage allocation 2084 * or demotion can be prevented by PageHWPoison flag. 2085 */ 2086 if (res == 0) { 2087 folio_unlock(folio); 2088 if (__page_handle_poison(p) > 0) { 2089 page_ref_inc(p); 2090 res = MF_RECOVERED; 2091 } else { 2092 res = MF_FAILED; 2093 } 2094 return action_result(pfn, MF_MSG_FREE_HUGE, res); 2095 } 2096 2097 page_flags = folio->flags; 2098 2099 if (!hwpoison_user_mappings(folio, p, pfn, flags)) { 2100 folio_unlock(folio); 2101 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED); 2102 } 2103 2104 return identify_page_state(pfn, p, page_flags); 2105 } 2106 2107 #else 2108 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) 2109 { 2110 return 0; 2111 } 2112 2113 static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag) 2114 { 2115 return 0; 2116 } 2117 #endif /* CONFIG_HUGETLB_PAGE */ 2118 2119 /* Drop the extra refcount in case we come from madvise() */ 2120 static void put_ref_page(unsigned long pfn, int flags) 2121 { 2122 if (!(flags & MF_COUNT_INCREASED)) 2123 return; 2124 2125 put_page(pfn_to_page(pfn)); 2126 } 2127 2128 static int memory_failure_dev_pagemap(unsigned long pfn, int flags, 2129 struct dev_pagemap *pgmap) 2130 { 2131 int rc = -ENXIO; 2132 2133 /* device metadata space is not recoverable */ 2134 if (!pgmap_pfn_valid(pgmap, pfn)) 2135 goto out; 2136 2137 /* 2138 * Call driver's implementation to handle the memory failure, otherwise 2139 * fall back to generic handler. 2140 */ 2141 if (pgmap_has_memory_failure(pgmap)) { 2142 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); 2143 /* 2144 * Fall back to generic handler too if operation is not 2145 * supported inside the driver/device/filesystem. 2146 */ 2147 if (rc != -EOPNOTSUPP) 2148 goto out; 2149 } 2150 2151 rc = mf_generic_kill_procs(pfn, flags, pgmap); 2152 out: 2153 /* drop pgmap ref acquired in caller */ 2154 put_dev_pagemap(pgmap); 2155 if (rc != -EOPNOTSUPP) 2156 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); 2157 return rc; 2158 } 2159 2160 /* 2161 * The calling condition is as such: thp split failed, page might have 2162 * been RDMA pinned, not much can be done for recovery. 2163 * But a SIGBUS should be delivered with vaddr provided so that the user 2164 * application has a chance to recover. Also, application processes' 2165 * election for MCE early killed will be honored. 2166 */ 2167 static void kill_procs_now(struct page *p, unsigned long pfn, int flags, 2168 struct folio *folio) 2169 { 2170 LIST_HEAD(tokill); 2171 2172 collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); 2173 kill_procs(&tokill, true, pfn, flags); 2174 } 2175 2176 /** 2177 * memory_failure - Handle memory failure of a page. 2178 * @pfn: Page Number of the corrupted page 2179 * @flags: fine tune action taken 2180 * 2181 * This function is called by the low level machine check code 2182 * of an architecture when it detects hardware memory corruption 2183 * of a page. It tries its best to recover, which includes 2184 * dropping pages, killing processes etc. 2185 * 2186 * The function is primarily of use for corruptions that 2187 * happen outside the current execution context (e.g. when 2188 * detected by a background scrubber) 2189 * 2190 * Must run in process context (e.g. a work queue) with interrupts 2191 * enabled and no spinlocks held. 2192 * 2193 * Return: 0 for successfully handled the memory error, 2194 * -EOPNOTSUPP for hwpoison_filter() filtered the error event, 2195 * < 0(except -EOPNOTSUPP) on failure. 2196 */ 2197 int memory_failure(unsigned long pfn, int flags) 2198 { 2199 struct page *p; 2200 struct folio *folio; 2201 struct dev_pagemap *pgmap; 2202 int res = 0; 2203 unsigned long page_flags; 2204 bool retry = true; 2205 int hugetlb = 0; 2206 2207 if (!sysctl_memory_failure_recovery) 2208 panic("Memory failure on page %lx", pfn); 2209 2210 mutex_lock(&mf_mutex); 2211 2212 if (!(flags & MF_SW_SIMULATED)) 2213 hw_memory_failure = true; 2214 2215 p = pfn_to_online_page(pfn); 2216 if (!p) { 2217 res = arch_memory_failure(pfn, flags); 2218 if (res == 0) 2219 goto unlock_mutex; 2220 2221 if (pfn_valid(pfn)) { 2222 pgmap = get_dev_pagemap(pfn, NULL); 2223 put_ref_page(pfn, flags); 2224 if (pgmap) { 2225 res = memory_failure_dev_pagemap(pfn, flags, 2226 pgmap); 2227 goto unlock_mutex; 2228 } 2229 } 2230 pr_err("%#lx: memory outside kernel control\n", pfn); 2231 res = -ENXIO; 2232 goto unlock_mutex; 2233 } 2234 2235 try_again: 2236 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb); 2237 if (hugetlb) 2238 goto unlock_mutex; 2239 2240 if (TestSetPageHWPoison(p)) { 2241 pr_err("%#lx: already hardware poisoned\n", pfn); 2242 res = -EHWPOISON; 2243 if (flags & MF_ACTION_REQUIRED) 2244 res = kill_accessing_process(current, pfn, flags); 2245 if (flags & MF_COUNT_INCREASED) 2246 put_page(p); 2247 action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); 2248 goto unlock_mutex; 2249 } 2250 2251 /* 2252 * We need/can do nothing about count=0 pages. 2253 * 1) it's a free page, and therefore in safe hand: 2254 * check_new_page() will be the gate keeper. 2255 * 2) it's part of a non-compound high order page. 2256 * Implies some kernel user: cannot stop them from 2257 * R/W the page; let's pray that the page has been 2258 * used and will be freed some time later. 2259 * In fact it's dangerous to directly bump up page count from 0, 2260 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. 2261 */ 2262 if (!(flags & MF_COUNT_INCREASED)) { 2263 res = get_hwpoison_page(p, flags); 2264 if (!res) { 2265 if (is_free_buddy_page(p)) { 2266 if (take_page_off_buddy(p)) { 2267 page_ref_inc(p); 2268 res = MF_RECOVERED; 2269 } else { 2270 /* We lost the race, try again */ 2271 if (retry) { 2272 ClearPageHWPoison(p); 2273 retry = false; 2274 goto try_again; 2275 } 2276 res = MF_FAILED; 2277 } 2278 res = action_result(pfn, MF_MSG_BUDDY, res); 2279 } else { 2280 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); 2281 } 2282 goto unlock_mutex; 2283 } else if (res < 0) { 2284 res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); 2285 goto unlock_mutex; 2286 } 2287 } 2288 2289 folio = page_folio(p); 2290 2291 /* filter pages that are protected from hwpoison test by users */ 2292 folio_lock(folio); 2293 if (hwpoison_filter(p)) { 2294 ClearPageHWPoison(p); 2295 folio_unlock(folio); 2296 folio_put(folio); 2297 res = -EOPNOTSUPP; 2298 goto unlock_mutex; 2299 } 2300 folio_unlock(folio); 2301 2302 if (folio_test_large(folio)) { 2303 /* 2304 * The flag must be set after the refcount is bumped 2305 * otherwise it may race with THP split. 2306 * And the flag can't be set in get_hwpoison_page() since 2307 * it is called by soft offline too and it is just called 2308 * for !MF_COUNT_INCREASED. So here seems to be the best 2309 * place. 2310 * 2311 * Don't need care about the above error handling paths for 2312 * get_hwpoison_page() since they handle either free page 2313 * or unhandlable page. The refcount is bumped iff the 2314 * page is a valid handlable page. 2315 */ 2316 folio_set_has_hwpoisoned(folio); 2317 if (try_to_split_thp_page(p, false) < 0) { 2318 res = -EHWPOISON; 2319 kill_procs_now(p, pfn, flags, folio); 2320 put_page(p); 2321 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_FAILED); 2322 goto unlock_mutex; 2323 } 2324 VM_BUG_ON_PAGE(!page_count(p), p); 2325 folio = page_folio(p); 2326 } 2327 2328 /* 2329 * We ignore non-LRU pages for good reasons. 2330 * - PG_locked is only well defined for LRU pages and a few others 2331 * - to avoid races with __SetPageLocked() 2332 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 2333 * The check (unnecessarily) ignores LRU pages being isolated and 2334 * walked by the page reclaim code, however that's not a big loss. 2335 */ 2336 shake_folio(folio); 2337 2338 folio_lock(folio); 2339 2340 /* 2341 * We're only intended to deal with the non-Compound page here. 2342 * However, the page could have changed compound pages due to 2343 * race window. If this happens, we could try again to hopefully 2344 * handle the page next round. 2345 */ 2346 if (folio_test_large(folio)) { 2347 if (retry) { 2348 ClearPageHWPoison(p); 2349 folio_unlock(folio); 2350 folio_put(folio); 2351 flags &= ~MF_COUNT_INCREASED; 2352 retry = false; 2353 goto try_again; 2354 } 2355 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); 2356 goto unlock_page; 2357 } 2358 2359 /* 2360 * We use page flags to determine what action should be taken, but 2361 * the flags can be modified by the error containment action. One 2362 * example is an mlocked page, where PG_mlocked is cleared by 2363 * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page 2364 * status correctly, we save a copy of the page flags at this time. 2365 */ 2366 page_flags = folio->flags; 2367 2368 /* 2369 * __munlock_folio() may clear a writeback folio's LRU flag without 2370 * the folio lock. We need to wait for writeback completion for this 2371 * folio or it may trigger a vfs BUG while evicting inode. 2372 */ 2373 if (!folio_test_lru(folio) && !folio_test_writeback(folio)) 2374 goto identify_page_state; 2375 2376 /* 2377 * It's very difficult to mess with pages currently under IO 2378 * and in many cases impossible, so we just avoid it here. 2379 */ 2380 folio_wait_writeback(folio); 2381 2382 /* 2383 * Now take care of user space mappings. 2384 * Abort on fail: __filemap_remove_folio() assumes unmapped page. 2385 */ 2386 if (!hwpoison_user_mappings(folio, p, pfn, flags)) { 2387 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_FAILED); 2388 goto unlock_page; 2389 } 2390 2391 /* 2392 * Torn down by someone else? 2393 */ 2394 if (folio_test_lru(folio) && !folio_test_swapcache(folio) && 2395 folio->mapping == NULL) { 2396 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); 2397 goto unlock_page; 2398 } 2399 2400 identify_page_state: 2401 res = identify_page_state(pfn, p, page_flags); 2402 mutex_unlock(&mf_mutex); 2403 return res; 2404 unlock_page: 2405 folio_unlock(folio); 2406 unlock_mutex: 2407 mutex_unlock(&mf_mutex); 2408 return res; 2409 } 2410 EXPORT_SYMBOL_GPL(memory_failure); 2411 2412 #define MEMORY_FAILURE_FIFO_ORDER 4 2413 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) 2414 2415 struct memory_failure_entry { 2416 unsigned long pfn; 2417 int flags; 2418 }; 2419 2420 struct memory_failure_cpu { 2421 DECLARE_KFIFO(fifo, struct memory_failure_entry, 2422 MEMORY_FAILURE_FIFO_SIZE); 2423 spinlock_t lock; 2424 struct work_struct work; 2425 }; 2426 2427 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); 2428 2429 /** 2430 * memory_failure_queue - Schedule handling memory failure of a page. 2431 * @pfn: Page Number of the corrupted page 2432 * @flags: Flags for memory failure handling 2433 * 2434 * This function is called by the low level hardware error handler 2435 * when it detects hardware memory corruption of a page. It schedules 2436 * the recovering of error page, including dropping pages, killing 2437 * processes etc. 2438 * 2439 * The function is primarily of use for corruptions that 2440 * happen outside the current execution context (e.g. when 2441 * detected by a background scrubber) 2442 * 2443 * Can run in IRQ context. 2444 */ 2445 void memory_failure_queue(unsigned long pfn, int flags) 2446 { 2447 struct memory_failure_cpu *mf_cpu; 2448 unsigned long proc_flags; 2449 struct memory_failure_entry entry = { 2450 .pfn = pfn, 2451 .flags = flags, 2452 }; 2453 2454 mf_cpu = &get_cpu_var(memory_failure_cpu); 2455 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 2456 if (kfifo_put(&mf_cpu->fifo, entry)) 2457 schedule_work_on(smp_processor_id(), &mf_cpu->work); 2458 else 2459 pr_err("buffer overflow when queuing memory failure at %#lx\n", 2460 pfn); 2461 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 2462 put_cpu_var(memory_failure_cpu); 2463 } 2464 EXPORT_SYMBOL_GPL(memory_failure_queue); 2465 2466 static void memory_failure_work_func(struct work_struct *work) 2467 { 2468 struct memory_failure_cpu *mf_cpu; 2469 struct memory_failure_entry entry = { 0, }; 2470 unsigned long proc_flags; 2471 int gotten; 2472 2473 mf_cpu = container_of(work, struct memory_failure_cpu, work); 2474 for (;;) { 2475 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 2476 gotten = kfifo_get(&mf_cpu->fifo, &entry); 2477 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 2478 if (!gotten) 2479 break; 2480 if (entry.flags & MF_SOFT_OFFLINE) 2481 soft_offline_page(entry.pfn, entry.flags); 2482 else 2483 memory_failure(entry.pfn, entry.flags); 2484 } 2485 } 2486 2487 /* 2488 * Process memory_failure work queued on the specified CPU. 2489 * Used to avoid return-to-userspace racing with the memory_failure workqueue. 2490 */ 2491 void memory_failure_queue_kick(int cpu) 2492 { 2493 struct memory_failure_cpu *mf_cpu; 2494 2495 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 2496 cancel_work_sync(&mf_cpu->work); 2497 memory_failure_work_func(&mf_cpu->work); 2498 } 2499 2500 static int __init memory_failure_init(void) 2501 { 2502 struct memory_failure_cpu *mf_cpu; 2503 int cpu; 2504 2505 for_each_possible_cpu(cpu) { 2506 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 2507 spin_lock_init(&mf_cpu->lock); 2508 INIT_KFIFO(mf_cpu->fifo); 2509 INIT_WORK(&mf_cpu->work, memory_failure_work_func); 2510 } 2511 2512 register_sysctl_init("vm", memory_failure_table); 2513 2514 return 0; 2515 } 2516 core_initcall(memory_failure_init); 2517 2518 #undef pr_fmt 2519 #define pr_fmt(fmt) "" fmt 2520 #define unpoison_pr_info(fmt, pfn, rs) \ 2521 ({ \ 2522 if (__ratelimit(rs)) \ 2523 pr_info(fmt, pfn); \ 2524 }) 2525 2526 /** 2527 * unpoison_memory - Unpoison a previously poisoned page 2528 * @pfn: Page number of the to be unpoisoned page 2529 * 2530 * Software-unpoison a page that has been poisoned by 2531 * memory_failure() earlier. 2532 * 2533 * This is only done on the software-level, so it only works 2534 * for linux injected failures, not real hardware failures 2535 * 2536 * Returns 0 for success, otherwise -errno. 2537 */ 2538 int unpoison_memory(unsigned long pfn) 2539 { 2540 struct folio *folio; 2541 struct page *p; 2542 int ret = -EBUSY, ghp; 2543 unsigned long count = 1; 2544 bool huge = false; 2545 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, 2546 DEFAULT_RATELIMIT_BURST); 2547 2548 if (!pfn_valid(pfn)) 2549 return -ENXIO; 2550 2551 p = pfn_to_page(pfn); 2552 folio = page_folio(p); 2553 2554 mutex_lock(&mf_mutex); 2555 2556 if (hw_memory_failure) { 2557 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", 2558 pfn, &unpoison_rs); 2559 ret = -EOPNOTSUPP; 2560 goto unlock_mutex; 2561 } 2562 2563 if (is_huge_zero_folio(folio)) { 2564 unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n", 2565 pfn, &unpoison_rs); 2566 ret = -EOPNOTSUPP; 2567 goto unlock_mutex; 2568 } 2569 2570 if (!PageHWPoison(p)) { 2571 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", 2572 pfn, &unpoison_rs); 2573 goto unlock_mutex; 2574 } 2575 2576 if (folio_ref_count(folio) > 1) { 2577 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", 2578 pfn, &unpoison_rs); 2579 goto unlock_mutex; 2580 } 2581 2582 if (folio_test_slab(folio) || folio_test_pgtable(folio) || 2583 folio_test_reserved(folio) || folio_test_offline(folio)) 2584 goto unlock_mutex; 2585 2586 /* 2587 * Note that folio->_mapcount is overloaded in SLAB, so the simple test 2588 * in folio_mapped() has to be done after folio_test_slab() is checked. 2589 */ 2590 if (folio_mapped(folio)) { 2591 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", 2592 pfn, &unpoison_rs); 2593 goto unlock_mutex; 2594 } 2595 2596 if (folio_mapping(folio)) { 2597 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", 2598 pfn, &unpoison_rs); 2599 goto unlock_mutex; 2600 } 2601 2602 ghp = get_hwpoison_page(p, MF_UNPOISON); 2603 if (!ghp) { 2604 if (folio_test_hugetlb(folio)) { 2605 huge = true; 2606 count = folio_free_raw_hwp(folio, false); 2607 if (count == 0) 2608 goto unlock_mutex; 2609 } 2610 ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY; 2611 } else if (ghp < 0) { 2612 if (ghp == -EHWPOISON) { 2613 ret = put_page_back_buddy(p) ? 0 : -EBUSY; 2614 } else { 2615 ret = ghp; 2616 unpoison_pr_info("Unpoison: failed to grab page %#lx\n", 2617 pfn, &unpoison_rs); 2618 } 2619 } else { 2620 if (folio_test_hugetlb(folio)) { 2621 huge = true; 2622 count = folio_free_raw_hwp(folio, false); 2623 if (count == 0) { 2624 folio_put(folio); 2625 goto unlock_mutex; 2626 } 2627 } 2628 2629 folio_put(folio); 2630 if (TestClearPageHWPoison(p)) { 2631 folio_put(folio); 2632 ret = 0; 2633 } 2634 } 2635 2636 unlock_mutex: 2637 mutex_unlock(&mf_mutex); 2638 if (!ret) { 2639 if (!huge) 2640 num_poisoned_pages_sub(pfn, 1); 2641 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", 2642 page_to_pfn(p), &unpoison_rs); 2643 } 2644 return ret; 2645 } 2646 EXPORT_SYMBOL(unpoison_memory); 2647 2648 static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist) 2649 { 2650 bool isolated = false; 2651 2652 if (folio_test_hugetlb(folio)) { 2653 isolated = isolate_hugetlb(folio, pagelist); 2654 } else { 2655 bool lru = !__folio_test_movable(folio); 2656 2657 if (lru) 2658 isolated = folio_isolate_lru(folio); 2659 else 2660 isolated = isolate_movable_page(&folio->page, 2661 ISOLATE_UNEVICTABLE); 2662 2663 if (isolated) { 2664 list_add(&folio->lru, pagelist); 2665 if (lru) 2666 node_stat_add_folio(folio, NR_ISOLATED_ANON + 2667 folio_is_file_lru(folio)); 2668 } 2669 } 2670 2671 /* 2672 * If we succeed to isolate the folio, we grabbed another refcount on 2673 * the folio, so we can safely drop the one we got from get_any_page(). 2674 * If we failed to isolate the folio, it means that we cannot go further 2675 * and we will return an error, so drop the reference we got from 2676 * get_any_page() as well. 2677 */ 2678 folio_put(folio); 2679 return isolated; 2680 } 2681 2682 /* 2683 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. 2684 * If the page is a non-dirty unmapped page-cache page, it simply invalidates. 2685 * If the page is mapped, it migrates the contents over. 2686 */ 2687 static int soft_offline_in_use_page(struct page *page) 2688 { 2689 long ret = 0; 2690 unsigned long pfn = page_to_pfn(page); 2691 struct folio *folio = page_folio(page); 2692 char const *msg_page[] = {"page", "hugepage"}; 2693 bool huge = folio_test_hugetlb(folio); 2694 LIST_HEAD(pagelist); 2695 struct migration_target_control mtc = { 2696 .nid = NUMA_NO_NODE, 2697 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 2698 .reason = MR_MEMORY_FAILURE, 2699 }; 2700 2701 if (!huge && folio_test_large(folio)) { 2702 if (try_to_split_thp_page(page, true)) { 2703 pr_info("soft offline: %#lx: thp split failed\n", pfn); 2704 return -EBUSY; 2705 } 2706 folio = page_folio(page); 2707 } 2708 2709 folio_lock(folio); 2710 if (!huge) 2711 folio_wait_writeback(folio); 2712 if (PageHWPoison(page)) { 2713 folio_unlock(folio); 2714 folio_put(folio); 2715 pr_info("soft offline: %#lx page already poisoned\n", pfn); 2716 return 0; 2717 } 2718 2719 if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio)) 2720 /* 2721 * Try to invalidate first. This should work for 2722 * non dirty unmapped page cache pages. 2723 */ 2724 ret = mapping_evict_folio(folio_mapping(folio), folio); 2725 folio_unlock(folio); 2726 2727 if (ret) { 2728 pr_info("soft_offline: %#lx: invalidated\n", pfn); 2729 page_handle_poison(page, false, true); 2730 return 0; 2731 } 2732 2733 if (mf_isolate_folio(folio, &pagelist)) { 2734 ret = migrate_pages(&pagelist, alloc_migration_target, NULL, 2735 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); 2736 if (!ret) { 2737 bool release = !huge; 2738 2739 if (!page_handle_poison(page, huge, release)) 2740 ret = -EBUSY; 2741 } else { 2742 if (!list_empty(&pagelist)) 2743 putback_movable_pages(&pagelist); 2744 2745 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n", 2746 pfn, msg_page[huge], ret, &page->flags); 2747 if (ret > 0) 2748 ret = -EBUSY; 2749 } 2750 } else { 2751 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", 2752 pfn, msg_page[huge], page_count(page), &page->flags); 2753 ret = -EBUSY; 2754 } 2755 return ret; 2756 } 2757 2758 /** 2759 * soft_offline_page - Soft offline a page. 2760 * @pfn: pfn to soft-offline 2761 * @flags: flags. Same as memory_failure(). 2762 * 2763 * Returns 0 on success 2764 * -EOPNOTSUPP for hwpoison_filter() filtered the error event 2765 * < 0 otherwise negated errno. 2766 * 2767 * Soft offline a page, by migration or invalidation, 2768 * without killing anything. This is for the case when 2769 * a page is not corrupted yet (so it's still valid to access), 2770 * but has had a number of corrected errors and is better taken 2771 * out. 2772 * 2773 * The actual policy on when to do that is maintained by 2774 * user space. 2775 * 2776 * This should never impact any application or cause data loss, 2777 * however it might take some time. 2778 * 2779 * This is not a 100% solution for all memory, but tries to be 2780 * ``good enough'' for the majority of memory. 2781 */ 2782 int soft_offline_page(unsigned long pfn, int flags) 2783 { 2784 int ret; 2785 bool try_again = true; 2786 struct page *page; 2787 2788 if (!pfn_valid(pfn)) { 2789 WARN_ON_ONCE(flags & MF_COUNT_INCREASED); 2790 return -ENXIO; 2791 } 2792 2793 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ 2794 page = pfn_to_online_page(pfn); 2795 if (!page) { 2796 put_ref_page(pfn, flags); 2797 return -EIO; 2798 } 2799 2800 mutex_lock(&mf_mutex); 2801 2802 if (PageHWPoison(page)) { 2803 pr_info("%s: %#lx page already poisoned\n", __func__, pfn); 2804 put_ref_page(pfn, flags); 2805 mutex_unlock(&mf_mutex); 2806 return 0; 2807 } 2808 2809 retry: 2810 get_online_mems(); 2811 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE); 2812 put_online_mems(); 2813 2814 if (hwpoison_filter(page)) { 2815 if (ret > 0) 2816 put_page(page); 2817 2818 mutex_unlock(&mf_mutex); 2819 return -EOPNOTSUPP; 2820 } 2821 2822 if (ret > 0) { 2823 ret = soft_offline_in_use_page(page); 2824 } else if (ret == 0) { 2825 if (!page_handle_poison(page, true, false)) { 2826 if (try_again) { 2827 try_again = false; 2828 flags &= ~MF_COUNT_INCREASED; 2829 goto retry; 2830 } 2831 ret = -EBUSY; 2832 } 2833 } 2834 2835 mutex_unlock(&mf_mutex); 2836 2837 return ret; 2838 } 2839