1 /* 2 * Copyright (C) 2008, 2009 Intel Corporation 3 * Authors: Andi Kleen, Fengguang Wu 4 * 5 * This software may be redistributed and/or modified under the terms of 6 * the GNU General Public License ("GPL") version 2 only as published by the 7 * Free Software Foundation. 8 * 9 * High level machine check handler. Handles pages reported by the 10 * hardware as being corrupted usually due to a 2bit ECC memory or cache 11 * failure. 12 * 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronous to other VM 15 * users, because memory failures could happen anytime and anywhere, 16 * possibly violating some of their assumptions. This is why this code 17 * has to be extremely careful. Generally it tries to use normal locking 18 * rules, as in get the standard locks, even if that means the 19 * error handling takes potentially a long time. 20 * 21 * The operation to map back from RMAP chains to processes has to walk 22 * the complete process list and has non linear complexity with the number 23 * mappings. In short it can be quite slow. But since memory corruptions 24 * are rare we hope to get away with this. 25 */ 26 27 /* 28 * Notebook: 29 * - hugetlb needs more code 30 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages 31 * - pass bad pages to kdump next kernel 32 */ 33 #define DEBUG 1 /* remove me in 2.6.34 */ 34 #include <linux/kernel.h> 35 #include <linux/mm.h> 36 #include <linux/page-flags.h> 37 #include <linux/kernel-page-flags.h> 38 #include <linux/sched.h> 39 #include <linux/ksm.h> 40 #include <linux/rmap.h> 41 #include <linux/pagemap.h> 42 #include <linux/swap.h> 43 #include <linux/backing-dev.h> 44 #include <linux/migrate.h> 45 #include <linux/page-isolation.h> 46 #include <linux/suspend.h> 47 #include <linux/slab.h> 48 #include <linux/swapops.h> 49 #include "internal.h" 50 51 int sysctl_memory_failure_early_kill __read_mostly = 0; 52 53 int sysctl_memory_failure_recovery __read_mostly = 1; 54 55 atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); 56 57 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) 58 59 u32 hwpoison_filter_enable = 0; 60 u32 hwpoison_filter_dev_major = ~0U; 61 u32 hwpoison_filter_dev_minor = ~0U; 62 u64 hwpoison_filter_flags_mask; 63 u64 hwpoison_filter_flags_value; 64 EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 65 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 66 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 67 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 68 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 69 70 static int hwpoison_filter_dev(struct page *p) 71 { 72 struct address_space *mapping; 73 dev_t dev; 74 75 if (hwpoison_filter_dev_major == ~0U && 76 hwpoison_filter_dev_minor == ~0U) 77 return 0; 78 79 /* 80 * page_mapping() does not accept slab page 81 */ 82 if (PageSlab(p)) 83 return -EINVAL; 84 85 mapping = page_mapping(p); 86 if (mapping == NULL || mapping->host == NULL) 87 return -EINVAL; 88 89 dev = mapping->host->i_sb->s_dev; 90 if (hwpoison_filter_dev_major != ~0U && 91 hwpoison_filter_dev_major != MAJOR(dev)) 92 return -EINVAL; 93 if (hwpoison_filter_dev_minor != ~0U && 94 hwpoison_filter_dev_minor != MINOR(dev)) 95 return -EINVAL; 96 97 return 0; 98 } 99 100 static int hwpoison_filter_flags(struct page *p) 101 { 102 if (!hwpoison_filter_flags_mask) 103 return 0; 104 105 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 106 hwpoison_filter_flags_value) 107 return 0; 108 else 109 return -EINVAL; 110 } 111 112 /* 113 * This allows stress tests to limit test scope to a collection of tasks 114 * by putting them under some memcg. This prevents killing unrelated/important 115 * processes such as /sbin/init. Note that the target task may share clean 116 * pages with init (eg. libc text), which is harmless. If the target task 117 * share _dirty_ pages with another task B, the test scheme must make sure B 118 * is also included in the memcg. At last, due to race conditions this filter 119 * can only guarantee that the page either belongs to the memcg tasks, or is 120 * a freed page. 121 */ 122 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 123 u64 hwpoison_filter_memcg; 124 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 125 static int hwpoison_filter_task(struct page *p) 126 { 127 struct mem_cgroup *mem; 128 struct cgroup_subsys_state *css; 129 unsigned long ino; 130 131 if (!hwpoison_filter_memcg) 132 return 0; 133 134 mem = try_get_mem_cgroup_from_page(p); 135 if (!mem) 136 return -EINVAL; 137 138 css = mem_cgroup_css(mem); 139 /* root_mem_cgroup has NULL dentries */ 140 if (!css->cgroup->dentry) 141 return -EINVAL; 142 143 ino = css->cgroup->dentry->d_inode->i_ino; 144 css_put(css); 145 146 if (ino != hwpoison_filter_memcg) 147 return -EINVAL; 148 149 return 0; 150 } 151 #else 152 static int hwpoison_filter_task(struct page *p) { return 0; } 153 #endif 154 155 int hwpoison_filter(struct page *p) 156 { 157 if (!hwpoison_filter_enable) 158 return 0; 159 160 if (hwpoison_filter_dev(p)) 161 return -EINVAL; 162 163 if (hwpoison_filter_flags(p)) 164 return -EINVAL; 165 166 if (hwpoison_filter_task(p)) 167 return -EINVAL; 168 169 return 0; 170 } 171 #else 172 int hwpoison_filter(struct page *p) 173 { 174 return 0; 175 } 176 #endif 177 178 EXPORT_SYMBOL_GPL(hwpoison_filter); 179 180 /* 181 * Send all the processes who have the page mapped an ``action optional'' 182 * signal. 183 */ 184 static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, 185 unsigned long pfn) 186 { 187 struct siginfo si; 188 int ret; 189 190 printk(KERN_ERR 191 "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n", 192 pfn, t->comm, t->pid); 193 si.si_signo = SIGBUS; 194 si.si_errno = 0; 195 si.si_code = BUS_MCEERR_AO; 196 si.si_addr = (void *)addr; 197 #ifdef __ARCH_SI_TRAPNO 198 si.si_trapno = trapno; 199 #endif 200 si.si_addr_lsb = PAGE_SHIFT; 201 /* 202 * Don't use force here, it's convenient if the signal 203 * can be temporarily blocked. 204 * This could cause a loop when the user sets SIGBUS 205 * to SIG_IGN, but hopefully noone will do that? 206 */ 207 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ 208 if (ret < 0) 209 printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", 210 t->comm, t->pid, ret); 211 return ret; 212 } 213 214 /* 215 * When a unknown page type is encountered drain as many buffers as possible 216 * in the hope to turn the page into a LRU or free page, which we can handle. 217 */ 218 void shake_page(struct page *p, int access) 219 { 220 if (!PageSlab(p)) { 221 lru_add_drain_all(); 222 if (PageLRU(p)) 223 return; 224 drain_all_pages(); 225 if (PageLRU(p) || is_free_buddy_page(p)) 226 return; 227 } 228 229 /* 230 * Only all shrink_slab here (which would also 231 * shrink other caches) if access is not potentially fatal. 232 */ 233 if (access) { 234 int nr; 235 do { 236 nr = shrink_slab(1000, GFP_KERNEL, 1000); 237 if (page_count(p) == 0) 238 break; 239 } while (nr > 10); 240 } 241 } 242 EXPORT_SYMBOL_GPL(shake_page); 243 244 /* 245 * Kill all processes that have a poisoned page mapped and then isolate 246 * the page. 247 * 248 * General strategy: 249 * Find all processes having the page mapped and kill them. 250 * But we keep a page reference around so that the page is not 251 * actually freed yet. 252 * Then stash the page away 253 * 254 * There's no convenient way to get back to mapped processes 255 * from the VMAs. So do a brute-force search over all 256 * running processes. 257 * 258 * Remember that machine checks are not common (or rather 259 * if they are common you have other problems), so this shouldn't 260 * be a performance issue. 261 * 262 * Also there are some races possible while we get from the 263 * error detection to actually handle it. 264 */ 265 266 struct to_kill { 267 struct list_head nd; 268 struct task_struct *tsk; 269 unsigned long addr; 270 unsigned addr_valid:1; 271 }; 272 273 /* 274 * Failure handling: if we can't find or can't kill a process there's 275 * not much we can do. We just print a message and ignore otherwise. 276 */ 277 278 /* 279 * Schedule a process for later kill. 280 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 281 * TBD would GFP_NOIO be enough? 282 */ 283 static void add_to_kill(struct task_struct *tsk, struct page *p, 284 struct vm_area_struct *vma, 285 struct list_head *to_kill, 286 struct to_kill **tkc) 287 { 288 struct to_kill *tk; 289 290 if (*tkc) { 291 tk = *tkc; 292 *tkc = NULL; 293 } else { 294 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 295 if (!tk) { 296 printk(KERN_ERR 297 "MCE: Out of memory while machine check handling\n"); 298 return; 299 } 300 } 301 tk->addr = page_address_in_vma(p, vma); 302 tk->addr_valid = 1; 303 304 /* 305 * In theory we don't have to kill when the page was 306 * munmaped. But it could be also a mremap. Since that's 307 * likely very rare kill anyways just out of paranoia, but use 308 * a SIGKILL because the error is not contained anymore. 309 */ 310 if (tk->addr == -EFAULT) { 311 pr_debug("MCE: Unable to find user space address %lx in %s\n", 312 page_to_pfn(p), tsk->comm); 313 tk->addr_valid = 0; 314 } 315 get_task_struct(tsk); 316 tk->tsk = tsk; 317 list_add_tail(&tk->nd, to_kill); 318 } 319 320 /* 321 * Kill the processes that have been collected earlier. 322 * 323 * Only do anything when DOIT is set, otherwise just free the list 324 * (this is used for clean pages which do not need killing) 325 * Also when FAIL is set do a force kill because something went 326 * wrong earlier. 327 */ 328 static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, 329 int fail, unsigned long pfn) 330 { 331 struct to_kill *tk, *next; 332 333 list_for_each_entry_safe (tk, next, to_kill, nd) { 334 if (doit) { 335 /* 336 * In case something went wrong with munmapping 337 * make sure the process doesn't catch the 338 * signal and then access the memory. Just kill it. 339 */ 340 if (fail || tk->addr_valid == 0) { 341 printk(KERN_ERR 342 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 343 pfn, tk->tsk->comm, tk->tsk->pid); 344 force_sig(SIGKILL, tk->tsk); 345 } 346 347 /* 348 * In theory the process could have mapped 349 * something else on the address in-between. We could 350 * check for that, but we need to tell the 351 * process anyways. 352 */ 353 else if (kill_proc_ao(tk->tsk, tk->addr, trapno, 354 pfn) < 0) 355 printk(KERN_ERR 356 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", 357 pfn, tk->tsk->comm, tk->tsk->pid); 358 } 359 put_task_struct(tk->tsk); 360 kfree(tk); 361 } 362 } 363 364 static int task_early_kill(struct task_struct *tsk) 365 { 366 if (!tsk->mm) 367 return 0; 368 if (tsk->flags & PF_MCE_PROCESS) 369 return !!(tsk->flags & PF_MCE_EARLY); 370 return sysctl_memory_failure_early_kill; 371 } 372 373 /* 374 * Collect processes when the error hit an anonymous page. 375 */ 376 static void collect_procs_anon(struct page *page, struct list_head *to_kill, 377 struct to_kill **tkc) 378 { 379 struct vm_area_struct *vma; 380 struct task_struct *tsk; 381 struct anon_vma *av; 382 383 read_lock(&tasklist_lock); 384 av = page_lock_anon_vma(page); 385 if (av == NULL) /* Not actually mapped anymore */ 386 goto out; 387 for_each_process (tsk) { 388 struct anon_vma_chain *vmac; 389 390 if (!task_early_kill(tsk)) 391 continue; 392 list_for_each_entry(vmac, &av->head, same_anon_vma) { 393 vma = vmac->vma; 394 if (!page_mapped_in_vma(page, vma)) 395 continue; 396 if (vma->vm_mm == tsk->mm) 397 add_to_kill(tsk, page, vma, to_kill, tkc); 398 } 399 } 400 page_unlock_anon_vma(av); 401 out: 402 read_unlock(&tasklist_lock); 403 } 404 405 /* 406 * Collect processes when the error hit a file mapped page. 407 */ 408 static void collect_procs_file(struct page *page, struct list_head *to_kill, 409 struct to_kill **tkc) 410 { 411 struct vm_area_struct *vma; 412 struct task_struct *tsk; 413 struct prio_tree_iter iter; 414 struct address_space *mapping = page->mapping; 415 416 /* 417 * A note on the locking order between the two locks. 418 * We don't rely on this particular order. 419 * If you have some other code that needs a different order 420 * feel free to switch them around. Or add a reverse link 421 * from mm_struct to task_struct, then this could be all 422 * done without taking tasklist_lock and looping over all tasks. 423 */ 424 425 read_lock(&tasklist_lock); 426 spin_lock(&mapping->i_mmap_lock); 427 for_each_process(tsk) { 428 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 429 430 if (!task_early_kill(tsk)) 431 continue; 432 433 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, 434 pgoff) { 435 /* 436 * Send early kill signal to tasks where a vma covers 437 * the page but the corrupted page is not necessarily 438 * mapped it in its pte. 439 * Assume applications who requested early kill want 440 * to be informed of all such data corruptions. 441 */ 442 if (vma->vm_mm == tsk->mm) 443 add_to_kill(tsk, page, vma, to_kill, tkc); 444 } 445 } 446 spin_unlock(&mapping->i_mmap_lock); 447 read_unlock(&tasklist_lock); 448 } 449 450 /* 451 * Collect the processes who have the corrupted page mapped to kill. 452 * This is done in two steps for locking reasons. 453 * First preallocate one tokill structure outside the spin locks, 454 * so that we can kill at least one process reasonably reliable. 455 */ 456 static void collect_procs(struct page *page, struct list_head *tokill) 457 { 458 struct to_kill *tk; 459 460 if (!page->mapping) 461 return; 462 463 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO); 464 if (!tk) 465 return; 466 if (PageAnon(page)) 467 collect_procs_anon(page, tokill, &tk); 468 else 469 collect_procs_file(page, tokill, &tk); 470 kfree(tk); 471 } 472 473 /* 474 * Error handlers for various types of pages. 475 */ 476 477 enum outcome { 478 IGNORED, /* Error: cannot be handled */ 479 FAILED, /* Error: handling failed */ 480 DELAYED, /* Will be handled later */ 481 RECOVERED, /* Successfully recovered */ 482 }; 483 484 static const char *action_name[] = { 485 [IGNORED] = "Ignored", 486 [FAILED] = "Failed", 487 [DELAYED] = "Delayed", 488 [RECOVERED] = "Recovered", 489 }; 490 491 /* 492 * XXX: It is possible that a page is isolated from LRU cache, 493 * and then kept in swap cache or failed to remove from page cache. 494 * The page count will stop it from being freed by unpoison. 495 * Stress tests should be aware of this memory leak problem. 496 */ 497 static int delete_from_lru_cache(struct page *p) 498 { 499 if (!isolate_lru_page(p)) { 500 /* 501 * Clear sensible page flags, so that the buddy system won't 502 * complain when the page is unpoison-and-freed. 503 */ 504 ClearPageActive(p); 505 ClearPageUnevictable(p); 506 /* 507 * drop the page count elevated by isolate_lru_page() 508 */ 509 page_cache_release(p); 510 return 0; 511 } 512 return -EIO; 513 } 514 515 /* 516 * Error hit kernel page. 517 * Do nothing, try to be lucky and not touch this instead. For a few cases we 518 * could be more sophisticated. 519 */ 520 static int me_kernel(struct page *p, unsigned long pfn) 521 { 522 return IGNORED; 523 } 524 525 /* 526 * Page in unknown state. Do nothing. 527 */ 528 static int me_unknown(struct page *p, unsigned long pfn) 529 { 530 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); 531 return FAILED; 532 } 533 534 /* 535 * Clean (or cleaned) page cache page. 536 */ 537 static int me_pagecache_clean(struct page *p, unsigned long pfn) 538 { 539 int err; 540 int ret = FAILED; 541 struct address_space *mapping; 542 543 delete_from_lru_cache(p); 544 545 /* 546 * For anonymous pages we're done the only reference left 547 * should be the one m_f() holds. 548 */ 549 if (PageAnon(p)) 550 return RECOVERED; 551 552 /* 553 * Now truncate the page in the page cache. This is really 554 * more like a "temporary hole punch" 555 * Don't do this for block devices when someone else 556 * has a reference, because it could be file system metadata 557 * and that's not safe to truncate. 558 */ 559 mapping = page_mapping(p); 560 if (!mapping) { 561 /* 562 * Page has been teared down in the meanwhile 563 */ 564 return FAILED; 565 } 566 567 /* 568 * Truncation is a bit tricky. Enable it per file system for now. 569 * 570 * Open: to take i_mutex or not for this? Right now we don't. 571 */ 572 if (mapping->a_ops->error_remove_page) { 573 err = mapping->a_ops->error_remove_page(mapping, p); 574 if (err != 0) { 575 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", 576 pfn, err); 577 } else if (page_has_private(p) && 578 !try_to_release_page(p, GFP_NOIO)) { 579 pr_debug("MCE %#lx: failed to release buffers\n", pfn); 580 } else { 581 ret = RECOVERED; 582 } 583 } else { 584 /* 585 * If the file system doesn't support it just invalidate 586 * This fails on dirty or anything with private pages 587 */ 588 if (invalidate_inode_page(p)) 589 ret = RECOVERED; 590 else 591 printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", 592 pfn); 593 } 594 return ret; 595 } 596 597 /* 598 * Dirty cache page page 599 * Issues: when the error hit a hole page the error is not properly 600 * propagated. 601 */ 602 static int me_pagecache_dirty(struct page *p, unsigned long pfn) 603 { 604 struct address_space *mapping = page_mapping(p); 605 606 SetPageError(p); 607 /* TBD: print more information about the file. */ 608 if (mapping) { 609 /* 610 * IO error will be reported by write(), fsync(), etc. 611 * who check the mapping. 612 * This way the application knows that something went 613 * wrong with its dirty file data. 614 * 615 * There's one open issue: 616 * 617 * The EIO will be only reported on the next IO 618 * operation and then cleared through the IO map. 619 * Normally Linux has two mechanisms to pass IO error 620 * first through the AS_EIO flag in the address space 621 * and then through the PageError flag in the page. 622 * Since we drop pages on memory failure handling the 623 * only mechanism open to use is through AS_AIO. 624 * 625 * This has the disadvantage that it gets cleared on 626 * the first operation that returns an error, while 627 * the PageError bit is more sticky and only cleared 628 * when the page is reread or dropped. If an 629 * application assumes it will always get error on 630 * fsync, but does other operations on the fd before 631 * and the page is dropped inbetween then the error 632 * will not be properly reported. 633 * 634 * This can already happen even without hwpoisoned 635 * pages: first on metadata IO errors (which only 636 * report through AS_EIO) or when the page is dropped 637 * at the wrong time. 638 * 639 * So right now we assume that the application DTRT on 640 * the first EIO, but we're not worse than other parts 641 * of the kernel. 642 */ 643 mapping_set_error(mapping, EIO); 644 } 645 646 return me_pagecache_clean(p, pfn); 647 } 648 649 /* 650 * Clean and dirty swap cache. 651 * 652 * Dirty swap cache page is tricky to handle. The page could live both in page 653 * cache and swap cache(ie. page is freshly swapped in). So it could be 654 * referenced concurrently by 2 types of PTEs: 655 * normal PTEs and swap PTEs. We try to handle them consistently by calling 656 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 657 * and then 658 * - clear dirty bit to prevent IO 659 * - remove from LRU 660 * - but keep in the swap cache, so that when we return to it on 661 * a later page fault, we know the application is accessing 662 * corrupted data and shall be killed (we installed simple 663 * interception code in do_swap_page to catch it). 664 * 665 * Clean swap cache pages can be directly isolated. A later page fault will 666 * bring in the known good data from disk. 667 */ 668 static int me_swapcache_dirty(struct page *p, unsigned long pfn) 669 { 670 ClearPageDirty(p); 671 /* Trigger EIO in shmem: */ 672 ClearPageUptodate(p); 673 674 if (!delete_from_lru_cache(p)) 675 return DELAYED; 676 else 677 return FAILED; 678 } 679 680 static int me_swapcache_clean(struct page *p, unsigned long pfn) 681 { 682 delete_from_swap_cache(p); 683 684 if (!delete_from_lru_cache(p)) 685 return RECOVERED; 686 else 687 return FAILED; 688 } 689 690 /* 691 * Huge pages. Needs work. 692 * Issues: 693 * No rmap support so we cannot find the original mapper. In theory could walk 694 * all MMs and look for the mappings, but that would be non atomic and racy. 695 * Need rmap for hugepages for this. Alternatively we could employ a heuristic, 696 * like just walking the current process and hoping it has it mapped (that 697 * should be usually true for the common "shared database cache" case) 698 * Should handle free huge pages and dequeue them too, but this needs to 699 * handle huge page accounting correctly. 700 */ 701 static int me_huge_page(struct page *p, unsigned long pfn) 702 { 703 return FAILED; 704 } 705 706 /* 707 * Various page states we can handle. 708 * 709 * A page state is defined by its current page->flags bits. 710 * The table matches them in order and calls the right handler. 711 * 712 * This is quite tricky because we can access page at any time 713 * in its live cycle, so all accesses have to be extremly careful. 714 * 715 * This is not complete. More states could be added. 716 * For any missing state don't attempt recovery. 717 */ 718 719 #define dirty (1UL << PG_dirty) 720 #define sc (1UL << PG_swapcache) 721 #define unevict (1UL << PG_unevictable) 722 #define mlock (1UL << PG_mlocked) 723 #define writeback (1UL << PG_writeback) 724 #define lru (1UL << PG_lru) 725 #define swapbacked (1UL << PG_swapbacked) 726 #define head (1UL << PG_head) 727 #define tail (1UL << PG_tail) 728 #define compound (1UL << PG_compound) 729 #define slab (1UL << PG_slab) 730 #define reserved (1UL << PG_reserved) 731 732 static struct page_state { 733 unsigned long mask; 734 unsigned long res; 735 char *msg; 736 int (*action)(struct page *p, unsigned long pfn); 737 } error_states[] = { 738 { reserved, reserved, "reserved kernel", me_kernel }, 739 /* 740 * free pages are specially detected outside this table: 741 * PG_buddy pages only make a small fraction of all free pages. 742 */ 743 744 /* 745 * Could in theory check if slab page is free or if we can drop 746 * currently unused objects without touching them. But just 747 * treat it as standard kernel for now. 748 */ 749 { slab, slab, "kernel slab", me_kernel }, 750 751 #ifdef CONFIG_PAGEFLAGS_EXTENDED 752 { head, head, "huge", me_huge_page }, 753 { tail, tail, "huge", me_huge_page }, 754 #else 755 { compound, compound, "huge", me_huge_page }, 756 #endif 757 758 { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty }, 759 { sc|dirty, sc, "swapcache", me_swapcache_clean }, 760 761 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, 762 { unevict, unevict, "unevictable LRU", me_pagecache_clean}, 763 764 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, 765 { mlock, mlock, "mlocked LRU", me_pagecache_clean }, 766 767 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, 768 { lru|dirty, lru, "clean LRU", me_pagecache_clean }, 769 770 /* 771 * Catchall entry: must be at end. 772 */ 773 { 0, 0, "unknown page state", me_unknown }, 774 }; 775 776 #undef dirty 777 #undef sc 778 #undef unevict 779 #undef mlock 780 #undef writeback 781 #undef lru 782 #undef swapbacked 783 #undef head 784 #undef tail 785 #undef compound 786 #undef slab 787 #undef reserved 788 789 static void action_result(unsigned long pfn, char *msg, int result) 790 { 791 struct page *page = pfn_to_page(pfn); 792 793 printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n", 794 pfn, 795 PageDirty(page) ? "dirty " : "", 796 msg, action_name[result]); 797 } 798 799 static int page_action(struct page_state *ps, struct page *p, 800 unsigned long pfn) 801 { 802 int result; 803 int count; 804 805 result = ps->action(p, pfn); 806 action_result(pfn, ps->msg, result); 807 808 count = page_count(p) - 1; 809 if (ps->action == me_swapcache_dirty && result == DELAYED) 810 count--; 811 if (count != 0) { 812 printk(KERN_ERR 813 "MCE %#lx: %s page still referenced by %d users\n", 814 pfn, ps->msg, count); 815 result = FAILED; 816 } 817 818 /* Could do more checks here if page looks ok */ 819 /* 820 * Could adjust zone counters here to correct for the missing page. 821 */ 822 823 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; 824 } 825 826 #define N_UNMAP_TRIES 5 827 828 /* 829 * Do all that is necessary to remove user space mappings. Unmap 830 * the pages and send SIGBUS to the processes if the data was dirty. 831 */ 832 static int hwpoison_user_mappings(struct page *p, unsigned long pfn, 833 int trapno) 834 { 835 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 836 struct address_space *mapping; 837 LIST_HEAD(tokill); 838 int ret; 839 int i; 840 int kill = 1; 841 842 if (PageReserved(p) || PageSlab(p)) 843 return SWAP_SUCCESS; 844 845 /* 846 * This check implies we don't kill processes if their pages 847 * are in the swap cache early. Those are always late kills. 848 */ 849 if (!page_mapped(p)) 850 return SWAP_SUCCESS; 851 852 if (PageCompound(p) || PageKsm(p)) 853 return SWAP_FAIL; 854 855 if (PageSwapCache(p)) { 856 printk(KERN_ERR 857 "MCE %#lx: keeping poisoned page in swap cache\n", pfn); 858 ttu |= TTU_IGNORE_HWPOISON; 859 } 860 861 /* 862 * Propagate the dirty bit from PTEs to struct page first, because we 863 * need this to decide if we should kill or just drop the page. 864 * XXX: the dirty test could be racy: set_page_dirty() may not always 865 * be called inside page lock (it's recommended but not enforced). 866 */ 867 mapping = page_mapping(p); 868 if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) { 869 if (page_mkclean(p)) { 870 SetPageDirty(p); 871 } else { 872 kill = 0; 873 ttu |= TTU_IGNORE_HWPOISON; 874 printk(KERN_INFO 875 "MCE %#lx: corrupted page was clean: dropped without side effects\n", 876 pfn); 877 } 878 } 879 880 /* 881 * First collect all the processes that have the page 882 * mapped in dirty form. This has to be done before try_to_unmap, 883 * because ttu takes the rmap data structures down. 884 * 885 * Error handling: We ignore errors here because 886 * there's nothing that can be done. 887 */ 888 if (kill) 889 collect_procs(p, &tokill); 890 891 /* 892 * try_to_unmap can fail temporarily due to races. 893 * Try a few times (RED-PEN better strategy?) 894 */ 895 for (i = 0; i < N_UNMAP_TRIES; i++) { 896 ret = try_to_unmap(p, ttu); 897 if (ret == SWAP_SUCCESS) 898 break; 899 pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret); 900 } 901 902 if (ret != SWAP_SUCCESS) 903 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", 904 pfn, page_mapcount(p)); 905 906 /* 907 * Now that the dirty bit has been propagated to the 908 * struct page and all unmaps done we can decide if 909 * killing is needed or not. Only kill when the page 910 * was dirty, otherwise the tokill list is merely 911 * freed. When there was a problem unmapping earlier 912 * use a more force-full uncatchable kill to prevent 913 * any accesses to the poisoned memory. 914 */ 915 kill_procs_ao(&tokill, !!PageDirty(p), trapno, 916 ret != SWAP_SUCCESS, pfn); 917 918 return ret; 919 } 920 921 int __memory_failure(unsigned long pfn, int trapno, int flags) 922 { 923 struct page_state *ps; 924 struct page *p; 925 int res; 926 927 if (!sysctl_memory_failure_recovery) 928 panic("Memory failure from trap %d on page %lx", trapno, pfn); 929 930 if (!pfn_valid(pfn)) { 931 printk(KERN_ERR 932 "MCE %#lx: memory outside kernel control\n", 933 pfn); 934 return -ENXIO; 935 } 936 937 p = pfn_to_page(pfn); 938 if (TestSetPageHWPoison(p)) { 939 printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); 940 return 0; 941 } 942 943 atomic_long_add(1, &mce_bad_pages); 944 945 /* 946 * We need/can do nothing about count=0 pages. 947 * 1) it's a free page, and therefore in safe hand: 948 * prep_new_page() will be the gate keeper. 949 * 2) it's part of a non-compound high order page. 950 * Implies some kernel user: cannot stop them from 951 * R/W the page; let's pray that the page has been 952 * used and will be freed some time later. 953 * In fact it's dangerous to directly bump up page count from 0, 954 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. 955 */ 956 if (!(flags & MF_COUNT_INCREASED) && 957 !get_page_unless_zero(compound_head(p))) { 958 if (is_free_buddy_page(p)) { 959 action_result(pfn, "free buddy", DELAYED); 960 return 0; 961 } else { 962 action_result(pfn, "high order kernel", IGNORED); 963 return -EBUSY; 964 } 965 } 966 967 /* 968 * We ignore non-LRU pages for good reasons. 969 * - PG_locked is only well defined for LRU pages and a few others 970 * - to avoid races with __set_page_locked() 971 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 972 * The check (unnecessarily) ignores LRU pages being isolated and 973 * walked by the page reclaim code, however that's not a big loss. 974 */ 975 if (!PageLRU(p)) 976 shake_page(p, 0); 977 if (!PageLRU(p)) { 978 /* 979 * shake_page could have turned it free. 980 */ 981 if (is_free_buddy_page(p)) { 982 action_result(pfn, "free buddy, 2nd try", DELAYED); 983 return 0; 984 } 985 action_result(pfn, "non LRU", IGNORED); 986 put_page(p); 987 return -EBUSY; 988 } 989 990 /* 991 * Lock the page and wait for writeback to finish. 992 * It's very difficult to mess with pages currently under IO 993 * and in many cases impossible, so we just avoid it here. 994 */ 995 lock_page_nosync(p); 996 997 /* 998 * unpoison always clear PG_hwpoison inside page lock 999 */ 1000 if (!PageHWPoison(p)) { 1001 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1002 res = 0; 1003 goto out; 1004 } 1005 if (hwpoison_filter(p)) { 1006 if (TestClearPageHWPoison(p)) 1007 atomic_long_dec(&mce_bad_pages); 1008 unlock_page(p); 1009 put_page(p); 1010 return 0; 1011 } 1012 1013 wait_on_page_writeback(p); 1014 1015 /* 1016 * Now take care of user space mappings. 1017 * Abort on fail: __remove_from_page_cache() assumes unmapped page. 1018 */ 1019 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) { 1020 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1021 res = -EBUSY; 1022 goto out; 1023 } 1024 1025 /* 1026 * Torn down by someone else? 1027 */ 1028 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 1029 action_result(pfn, "already truncated LRU", IGNORED); 1030 res = -EBUSY; 1031 goto out; 1032 } 1033 1034 res = -EBUSY; 1035 for (ps = error_states;; ps++) { 1036 if ((p->flags & ps->mask) == ps->res) { 1037 res = page_action(ps, p, pfn); 1038 break; 1039 } 1040 } 1041 out: 1042 unlock_page(p); 1043 return res; 1044 } 1045 EXPORT_SYMBOL_GPL(__memory_failure); 1046 1047 /** 1048 * memory_failure - Handle memory failure of a page. 1049 * @pfn: Page Number of the corrupted page 1050 * @trapno: Trap number reported in the signal to user space. 1051 * 1052 * This function is called by the low level machine check code 1053 * of an architecture when it detects hardware memory corruption 1054 * of a page. It tries its best to recover, which includes 1055 * dropping pages, killing processes etc. 1056 * 1057 * The function is primarily of use for corruptions that 1058 * happen outside the current execution context (e.g. when 1059 * detected by a background scrubber) 1060 * 1061 * Must run in process context (e.g. a work queue) with interrupts 1062 * enabled and no spinlocks hold. 1063 */ 1064 void memory_failure(unsigned long pfn, int trapno) 1065 { 1066 __memory_failure(pfn, trapno, 0); 1067 } 1068 1069 /** 1070 * unpoison_memory - Unpoison a previously poisoned page 1071 * @pfn: Page number of the to be unpoisoned page 1072 * 1073 * Software-unpoison a page that has been poisoned by 1074 * memory_failure() earlier. 1075 * 1076 * This is only done on the software-level, so it only works 1077 * for linux injected failures, not real hardware failures 1078 * 1079 * Returns 0 for success, otherwise -errno. 1080 */ 1081 int unpoison_memory(unsigned long pfn) 1082 { 1083 struct page *page; 1084 struct page *p; 1085 int freeit = 0; 1086 1087 if (!pfn_valid(pfn)) 1088 return -ENXIO; 1089 1090 p = pfn_to_page(pfn); 1091 page = compound_head(p); 1092 1093 if (!PageHWPoison(p)) { 1094 pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn); 1095 return 0; 1096 } 1097 1098 if (!get_page_unless_zero(page)) { 1099 if (TestClearPageHWPoison(p)) 1100 atomic_long_dec(&mce_bad_pages); 1101 pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn); 1102 return 0; 1103 } 1104 1105 lock_page_nosync(page); 1106 /* 1107 * This test is racy because PG_hwpoison is set outside of page lock. 1108 * That's acceptable because that won't trigger kernel panic. Instead, 1109 * the PG_hwpoison page will be caught and isolated on the entrance to 1110 * the free buddy page pool. 1111 */ 1112 if (TestClearPageHWPoison(p)) { 1113 pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn); 1114 atomic_long_dec(&mce_bad_pages); 1115 freeit = 1; 1116 } 1117 unlock_page(page); 1118 1119 put_page(page); 1120 if (freeit) 1121 put_page(page); 1122 1123 return 0; 1124 } 1125 EXPORT_SYMBOL(unpoison_memory); 1126 1127 static struct page *new_page(struct page *p, unsigned long private, int **x) 1128 { 1129 int nid = page_to_nid(p); 1130 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); 1131 } 1132 1133 /* 1134 * Safely get reference count of an arbitrary page. 1135 * Returns 0 for a free page, -EIO for a zero refcount page 1136 * that is not free, and 1 for any other page type. 1137 * For 1 the page is returned with increased page count, otherwise not. 1138 */ 1139 static int get_any_page(struct page *p, unsigned long pfn, int flags) 1140 { 1141 int ret; 1142 1143 if (flags & MF_COUNT_INCREASED) 1144 return 1; 1145 1146 /* 1147 * The lock_system_sleep prevents a race with memory hotplug, 1148 * because the isolation assumes there's only a single user. 1149 * This is a big hammer, a better would be nicer. 1150 */ 1151 lock_system_sleep(); 1152 1153 /* 1154 * Isolate the page, so that it doesn't get reallocated if it 1155 * was free. 1156 */ 1157 set_migratetype_isolate(p); 1158 if (!get_page_unless_zero(compound_head(p))) { 1159 if (is_free_buddy_page(p)) { 1160 pr_debug("get_any_page: %#lx free buddy page\n", pfn); 1161 /* Set hwpoison bit while page is still isolated */ 1162 SetPageHWPoison(p); 1163 ret = 0; 1164 } else { 1165 pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n", 1166 pfn, p->flags); 1167 ret = -EIO; 1168 } 1169 } else { 1170 /* Not a free page */ 1171 ret = 1; 1172 } 1173 unset_migratetype_isolate(p); 1174 unlock_system_sleep(); 1175 return ret; 1176 } 1177 1178 /** 1179 * soft_offline_page - Soft offline a page. 1180 * @page: page to offline 1181 * @flags: flags. Same as memory_failure(). 1182 * 1183 * Returns 0 on success, otherwise negated errno. 1184 * 1185 * Soft offline a page, by migration or invalidation, 1186 * without killing anything. This is for the case when 1187 * a page is not corrupted yet (so it's still valid to access), 1188 * but has had a number of corrected errors and is better taken 1189 * out. 1190 * 1191 * The actual policy on when to do that is maintained by 1192 * user space. 1193 * 1194 * This should never impact any application or cause data loss, 1195 * however it might take some time. 1196 * 1197 * This is not a 100% solution for all memory, but tries to be 1198 * ``good enough'' for the majority of memory. 1199 */ 1200 int soft_offline_page(struct page *page, int flags) 1201 { 1202 int ret; 1203 unsigned long pfn = page_to_pfn(page); 1204 1205 ret = get_any_page(page, pfn, flags); 1206 if (ret < 0) 1207 return ret; 1208 if (ret == 0) 1209 goto done; 1210 1211 /* 1212 * Page cache page we can handle? 1213 */ 1214 if (!PageLRU(page)) { 1215 /* 1216 * Try to free it. 1217 */ 1218 put_page(page); 1219 shake_page(page, 1); 1220 1221 /* 1222 * Did it turn free? 1223 */ 1224 ret = get_any_page(page, pfn, 0); 1225 if (ret < 0) 1226 return ret; 1227 if (ret == 0) 1228 goto done; 1229 } 1230 if (!PageLRU(page)) { 1231 pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n", 1232 pfn, page->flags); 1233 return -EIO; 1234 } 1235 1236 lock_page(page); 1237 wait_on_page_writeback(page); 1238 1239 /* 1240 * Synchronized using the page lock with memory_failure() 1241 */ 1242 if (PageHWPoison(page)) { 1243 unlock_page(page); 1244 put_page(page); 1245 pr_debug("soft offline: %#lx page already poisoned\n", pfn); 1246 return -EBUSY; 1247 } 1248 1249 /* 1250 * Try to invalidate first. This should work for 1251 * non dirty unmapped page cache pages. 1252 */ 1253 ret = invalidate_inode_page(page); 1254 unlock_page(page); 1255 1256 /* 1257 * Drop count because page migration doesn't like raised 1258 * counts. The page could get re-allocated, but if it becomes 1259 * LRU the isolation will just fail. 1260 * RED-PEN would be better to keep it isolated here, but we 1261 * would need to fix isolation locking first. 1262 */ 1263 put_page(page); 1264 if (ret == 1) { 1265 ret = 0; 1266 pr_debug("soft_offline: %#lx: invalidated\n", pfn); 1267 goto done; 1268 } 1269 1270 /* 1271 * Simple invalidation didn't work. 1272 * Try to migrate to a new page instead. migrate.c 1273 * handles a large number of cases for us. 1274 */ 1275 ret = isolate_lru_page(page); 1276 if (!ret) { 1277 LIST_HEAD(pagelist); 1278 1279 list_add(&page->lru, &pagelist); 1280 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); 1281 if (ret) { 1282 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", 1283 pfn, ret, page->flags); 1284 if (ret > 0) 1285 ret = -EIO; 1286 } 1287 } else { 1288 pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1289 pfn, ret, page_count(page), page->flags); 1290 } 1291 if (ret) 1292 return ret; 1293 1294 done: 1295 atomic_long_add(1, &mce_bad_pages); 1296 SetPageHWPoison(page); 1297 /* keep elevated page count for bad page */ 1298 return ret; 1299 } 1300 1301 /* 1302 * The caller must hold current->mm->mmap_sem in read mode. 1303 */ 1304 int is_hwpoison_address(unsigned long addr) 1305 { 1306 pgd_t *pgdp; 1307 pud_t pud, *pudp; 1308 pmd_t pmd, *pmdp; 1309 pte_t pte, *ptep; 1310 swp_entry_t entry; 1311 1312 pgdp = pgd_offset(current->mm, addr); 1313 if (!pgd_present(*pgdp)) 1314 return 0; 1315 pudp = pud_offset(pgdp, addr); 1316 pud = *pudp; 1317 if (!pud_present(pud) || pud_large(pud)) 1318 return 0; 1319 pmdp = pmd_offset(pudp, addr); 1320 pmd = *pmdp; 1321 if (!pmd_present(pmd) || pmd_large(pmd)) 1322 return 0; 1323 ptep = pte_offset_map(pmdp, addr); 1324 pte = *ptep; 1325 pte_unmap(ptep); 1326 if (!is_swap_pte(pte)) 1327 return 0; 1328 entry = pte_to_swp_entry(pte); 1329 return is_hwpoison_entry(entry); 1330 } 1331 EXPORT_SYMBOL_GPL(is_hwpoison_address); 1332