xref: /linux/mm/memory-failure.c (revision 31354121bf03dac6498a4236928a38490745d601)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008, 2009 Intel Corporation
4  * Authors: Andi Kleen, Fengguang Wu
5  *
6  * High level machine check handler. Handles pages reported by the
7  * hardware as being corrupted usually due to a multi-bit ECC memory or cache
8  * failure.
9  *
10  * In addition there is a "soft offline" entry point that allows stop using
11  * not-yet-corrupted-by-suspicious pages without killing anything.
12  *
13  * Handles page cache pages in various states.	The tricky part
14  * here is that we can access any page asynchronously in respect to
15  * other VM users, because memory failures could happen anytime and
16  * anywhere. This could violate some of their assumptions. This is why
17  * this code has to be extremely careful. Generally it tries to use
18  * normal locking rules, as in get the standard locks, even if that means
19  * the error handling takes potentially a long time.
20  *
21  * It can be very tempting to add handling for obscure cases here.
22  * In general any code for handling new cases should only be added iff:
23  * - You know how to test it.
24  * - You have a test that can be added to mce-test
25  *   https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26  * - The case actually shows up as a frequent (top 10) page state in
27  *   tools/vm/page-types when running a real workload.
28  *
29  * There are several operations here with exponential complexity because
30  * of unsuitable VM data structures. For example the operation to map back
31  * from RMAP chains to processes has to walk the complete process list and
32  * has non linear complexity with the number. But since memory corruptions
33  * are rare we hope to get away with this. This avoids impacting the core
34  * VM.
35  */
36 
37 #define pr_fmt(fmt) "Memory failure: " fmt
38 
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/page-flags.h>
42 #include <linux/kernel-page-flags.h>
43 #include <linux/sched/signal.h>
44 #include <linux/sched/task.h>
45 #include <linux/dax.h>
46 #include <linux/ksm.h>
47 #include <linux/rmap.h>
48 #include <linux/export.h>
49 #include <linux/pagemap.h>
50 #include <linux/swap.h>
51 #include <linux/backing-dev.h>
52 #include <linux/migrate.h>
53 #include <linux/suspend.h>
54 #include <linux/slab.h>
55 #include <linux/swapops.h>
56 #include <linux/hugetlb.h>
57 #include <linux/memory_hotplug.h>
58 #include <linux/mm_inline.h>
59 #include <linux/memremap.h>
60 #include <linux/kfifo.h>
61 #include <linux/ratelimit.h>
62 #include <linux/page-isolation.h>
63 #include <linux/pagewalk.h>
64 #include <linux/shmem_fs.h>
65 #include "swap.h"
66 #include "internal.h"
67 #include "ras/ras_event.h"
68 
69 int sysctl_memory_failure_early_kill __read_mostly = 0;
70 
71 int sysctl_memory_failure_recovery __read_mostly = 1;
72 
73 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
74 
75 static bool hw_memory_failure __read_mostly = false;
76 
77 /*
78  * Return values:
79  *   1:   the page is dissolved (if needed) and taken off from buddy,
80  *   0:   the page is dissolved (if needed) and not taken off from buddy,
81  *   < 0: failed to dissolve.
82  */
83 static int __page_handle_poison(struct page *page)
84 {
85 	int ret;
86 
87 	zone_pcp_disable(page_zone(page));
88 	ret = dissolve_free_huge_page(page);
89 	if (!ret)
90 		ret = take_page_off_buddy(page);
91 	zone_pcp_enable(page_zone(page));
92 
93 	return ret;
94 }
95 
96 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
97 {
98 	if (hugepage_or_freepage) {
99 		/*
100 		 * Doing this check for free pages is also fine since dissolve_free_huge_page
101 		 * returns 0 for non-hugetlb pages as well.
102 		 */
103 		if (__page_handle_poison(page) <= 0)
104 			/*
105 			 * We could fail to take off the target page from buddy
106 			 * for example due to racy page allocation, but that's
107 			 * acceptable because soft-offlined page is not broken
108 			 * and if someone really want to use it, they should
109 			 * take it.
110 			 */
111 			return false;
112 	}
113 
114 	SetPageHWPoison(page);
115 	if (release)
116 		put_page(page);
117 	page_ref_inc(page);
118 	num_poisoned_pages_inc();
119 
120 	return true;
121 }
122 
123 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
124 
125 u32 hwpoison_filter_enable = 0;
126 u32 hwpoison_filter_dev_major = ~0U;
127 u32 hwpoison_filter_dev_minor = ~0U;
128 u64 hwpoison_filter_flags_mask;
129 u64 hwpoison_filter_flags_value;
130 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
131 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
132 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
133 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
134 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
135 
136 static int hwpoison_filter_dev(struct page *p)
137 {
138 	struct address_space *mapping;
139 	dev_t dev;
140 
141 	if (hwpoison_filter_dev_major == ~0U &&
142 	    hwpoison_filter_dev_minor == ~0U)
143 		return 0;
144 
145 	mapping = page_mapping(p);
146 	if (mapping == NULL || mapping->host == NULL)
147 		return -EINVAL;
148 
149 	dev = mapping->host->i_sb->s_dev;
150 	if (hwpoison_filter_dev_major != ~0U &&
151 	    hwpoison_filter_dev_major != MAJOR(dev))
152 		return -EINVAL;
153 	if (hwpoison_filter_dev_minor != ~0U &&
154 	    hwpoison_filter_dev_minor != MINOR(dev))
155 		return -EINVAL;
156 
157 	return 0;
158 }
159 
160 static int hwpoison_filter_flags(struct page *p)
161 {
162 	if (!hwpoison_filter_flags_mask)
163 		return 0;
164 
165 	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
166 				    hwpoison_filter_flags_value)
167 		return 0;
168 	else
169 		return -EINVAL;
170 }
171 
172 /*
173  * This allows stress tests to limit test scope to a collection of tasks
174  * by putting them under some memcg. This prevents killing unrelated/important
175  * processes such as /sbin/init. Note that the target task may share clean
176  * pages with init (eg. libc text), which is harmless. If the target task
177  * share _dirty_ pages with another task B, the test scheme must make sure B
178  * is also included in the memcg. At last, due to race conditions this filter
179  * can only guarantee that the page either belongs to the memcg tasks, or is
180  * a freed page.
181  */
182 #ifdef CONFIG_MEMCG
183 u64 hwpoison_filter_memcg;
184 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
185 static int hwpoison_filter_task(struct page *p)
186 {
187 	if (!hwpoison_filter_memcg)
188 		return 0;
189 
190 	if (page_cgroup_ino(p) != hwpoison_filter_memcg)
191 		return -EINVAL;
192 
193 	return 0;
194 }
195 #else
196 static int hwpoison_filter_task(struct page *p) { return 0; }
197 #endif
198 
199 int hwpoison_filter(struct page *p)
200 {
201 	if (!hwpoison_filter_enable)
202 		return 0;
203 
204 	if (hwpoison_filter_dev(p))
205 		return -EINVAL;
206 
207 	if (hwpoison_filter_flags(p))
208 		return -EINVAL;
209 
210 	if (hwpoison_filter_task(p))
211 		return -EINVAL;
212 
213 	return 0;
214 }
215 #else
216 int hwpoison_filter(struct page *p)
217 {
218 	return 0;
219 }
220 #endif
221 
222 EXPORT_SYMBOL_GPL(hwpoison_filter);
223 
224 /*
225  * Kill all processes that have a poisoned page mapped and then isolate
226  * the page.
227  *
228  * General strategy:
229  * Find all processes having the page mapped and kill them.
230  * But we keep a page reference around so that the page is not
231  * actually freed yet.
232  * Then stash the page away
233  *
234  * There's no convenient way to get back to mapped processes
235  * from the VMAs. So do a brute-force search over all
236  * running processes.
237  *
238  * Remember that machine checks are not common (or rather
239  * if they are common you have other problems), so this shouldn't
240  * be a performance issue.
241  *
242  * Also there are some races possible while we get from the
243  * error detection to actually handle it.
244  */
245 
246 struct to_kill {
247 	struct list_head nd;
248 	struct task_struct *tsk;
249 	unsigned long addr;
250 	short size_shift;
251 };
252 
253 /*
254  * Send all the processes who have the page mapped a signal.
255  * ``action optional'' if they are not immediately affected by the error
256  * ``action required'' if error happened in current execution context
257  */
258 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
259 {
260 	struct task_struct *t = tk->tsk;
261 	short addr_lsb = tk->size_shift;
262 	int ret = 0;
263 
264 	pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
265 			pfn, t->comm, t->pid);
266 
267 	if ((flags & MF_ACTION_REQUIRED) && (t == current))
268 		ret = force_sig_mceerr(BUS_MCEERR_AR,
269 				 (void __user *)tk->addr, addr_lsb);
270 	else
271 		/*
272 		 * Signal other processes sharing the page if they have
273 		 * PF_MCE_EARLY set.
274 		 * Don't use force here, it's convenient if the signal
275 		 * can be temporarily blocked.
276 		 * This could cause a loop when the user sets SIGBUS
277 		 * to SIG_IGN, but hopefully no one will do that?
278 		 */
279 		ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
280 				      addr_lsb, t);
281 	if (ret < 0)
282 		pr_info("Error sending signal to %s:%d: %d\n",
283 			t->comm, t->pid, ret);
284 	return ret;
285 }
286 
287 /*
288  * Unknown page type encountered. Try to check whether it can turn PageLRU by
289  * lru_add_drain_all.
290  */
291 void shake_page(struct page *p)
292 {
293 	if (PageHuge(p))
294 		return;
295 
296 	if (!PageSlab(p)) {
297 		lru_add_drain_all();
298 		if (PageLRU(p) || is_free_buddy_page(p))
299 			return;
300 	}
301 
302 	/*
303 	 * TODO: Could shrink slab caches here if a lightweight range-based
304 	 * shrinker will be available.
305 	 */
306 }
307 EXPORT_SYMBOL_GPL(shake_page);
308 
309 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
310 		unsigned long address)
311 {
312 	unsigned long ret = 0;
313 	pgd_t *pgd;
314 	p4d_t *p4d;
315 	pud_t *pud;
316 	pmd_t *pmd;
317 	pte_t *pte;
318 
319 	VM_BUG_ON_VMA(address == -EFAULT, vma);
320 	pgd = pgd_offset(vma->vm_mm, address);
321 	if (!pgd_present(*pgd))
322 		return 0;
323 	p4d = p4d_offset(pgd, address);
324 	if (!p4d_present(*p4d))
325 		return 0;
326 	pud = pud_offset(p4d, address);
327 	if (!pud_present(*pud))
328 		return 0;
329 	if (pud_devmap(*pud))
330 		return PUD_SHIFT;
331 	pmd = pmd_offset(pud, address);
332 	if (!pmd_present(*pmd))
333 		return 0;
334 	if (pmd_devmap(*pmd))
335 		return PMD_SHIFT;
336 	pte = pte_offset_map(pmd, address);
337 	if (pte_present(*pte) && pte_devmap(*pte))
338 		ret = PAGE_SHIFT;
339 	pte_unmap(pte);
340 	return ret;
341 }
342 
343 /*
344  * Failure handling: if we can't find or can't kill a process there's
345  * not much we can do.	We just print a message and ignore otherwise.
346  */
347 
348 #define FSDAX_INVALID_PGOFF ULONG_MAX
349 
350 /*
351  * Schedule a process for later kill.
352  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
353  *
354  * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
355  * filesystem with a memory failure handler has claimed the
356  * memory_failure event. In all other cases, page->index and
357  * page->mapping are sufficient for mapping the page back to its
358  * corresponding user virtual address.
359  */
360 static void add_to_kill(struct task_struct *tsk, struct page *p,
361 			pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
362 			struct list_head *to_kill)
363 {
364 	struct to_kill *tk;
365 
366 	tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
367 	if (!tk) {
368 		pr_err("Out of memory while machine check handling\n");
369 		return;
370 	}
371 
372 	tk->addr = page_address_in_vma(p, vma);
373 	if (is_zone_device_page(p)) {
374 		if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
375 			tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
376 		tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
377 	} else
378 		tk->size_shift = page_shift(compound_head(p));
379 
380 	/*
381 	 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
382 	 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
383 	 * so "tk->size_shift == 0" effectively checks no mapping on
384 	 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
385 	 * to a process' address space, it's possible not all N VMAs
386 	 * contain mappings for the page, but at least one VMA does.
387 	 * Only deliver SIGBUS with payload derived from the VMA that
388 	 * has a mapping for the page.
389 	 */
390 	if (tk->addr == -EFAULT) {
391 		pr_info("Unable to find user space address %lx in %s\n",
392 			page_to_pfn(p), tsk->comm);
393 	} else if (tk->size_shift == 0) {
394 		kfree(tk);
395 		return;
396 	}
397 
398 	get_task_struct(tsk);
399 	tk->tsk = tsk;
400 	list_add_tail(&tk->nd, to_kill);
401 }
402 
403 /*
404  * Kill the processes that have been collected earlier.
405  *
406  * Only do anything when FORCEKILL is set, otherwise just free the
407  * list (this is used for clean pages which do not need killing)
408  * Also when FAIL is set do a force kill because something went
409  * wrong earlier.
410  */
411 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
412 		unsigned long pfn, int flags)
413 {
414 	struct to_kill *tk, *next;
415 
416 	list_for_each_entry_safe(tk, next, to_kill, nd) {
417 		if (forcekill) {
418 			/*
419 			 * In case something went wrong with munmapping
420 			 * make sure the process doesn't catch the
421 			 * signal and then access the memory. Just kill it.
422 			 */
423 			if (fail || tk->addr == -EFAULT) {
424 				pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
425 				       pfn, tk->tsk->comm, tk->tsk->pid);
426 				do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
427 						 tk->tsk, PIDTYPE_PID);
428 			}
429 
430 			/*
431 			 * In theory the process could have mapped
432 			 * something else on the address in-between. We could
433 			 * check for that, but we need to tell the
434 			 * process anyways.
435 			 */
436 			else if (kill_proc(tk, pfn, flags) < 0)
437 				pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
438 				       pfn, tk->tsk->comm, tk->tsk->pid);
439 		}
440 		list_del(&tk->nd);
441 		put_task_struct(tk->tsk);
442 		kfree(tk);
443 	}
444 }
445 
446 /*
447  * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
448  * on behalf of the thread group. Return task_struct of the (first found)
449  * dedicated thread if found, and return NULL otherwise.
450  *
451  * We already hold read_lock(&tasklist_lock) in the caller, so we don't
452  * have to call rcu_read_lock/unlock() in this function.
453  */
454 static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
455 {
456 	struct task_struct *t;
457 
458 	for_each_thread(tsk, t) {
459 		if (t->flags & PF_MCE_PROCESS) {
460 			if (t->flags & PF_MCE_EARLY)
461 				return t;
462 		} else {
463 			if (sysctl_memory_failure_early_kill)
464 				return t;
465 		}
466 	}
467 	return NULL;
468 }
469 
470 /*
471  * Determine whether a given process is "early kill" process which expects
472  * to be signaled when some page under the process is hwpoisoned.
473  * Return task_struct of the dedicated thread (main thread unless explicitly
474  * specified) if the process is "early kill" and otherwise returns NULL.
475  *
476  * Note that the above is true for Action Optional case. For Action Required
477  * case, it's only meaningful to the current thread which need to be signaled
478  * with SIGBUS, this error is Action Optional for other non current
479  * processes sharing the same error page,if the process is "early kill", the
480  * task_struct of the dedicated thread will also be returned.
481  */
482 static struct task_struct *task_early_kill(struct task_struct *tsk,
483 					   int force_early)
484 {
485 	if (!tsk->mm)
486 		return NULL;
487 	/*
488 	 * Comparing ->mm here because current task might represent
489 	 * a subthread, while tsk always points to the main thread.
490 	 */
491 	if (force_early && tsk->mm == current->mm)
492 		return current;
493 
494 	return find_early_kill_thread(tsk);
495 }
496 
497 /*
498  * Collect processes when the error hit an anonymous page.
499  */
500 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
501 				int force_early)
502 {
503 	struct folio *folio = page_folio(page);
504 	struct vm_area_struct *vma;
505 	struct task_struct *tsk;
506 	struct anon_vma *av;
507 	pgoff_t pgoff;
508 
509 	av = folio_lock_anon_vma_read(folio, NULL);
510 	if (av == NULL)	/* Not actually mapped anymore */
511 		return;
512 
513 	pgoff = page_to_pgoff(page);
514 	read_lock(&tasklist_lock);
515 	for_each_process (tsk) {
516 		struct anon_vma_chain *vmac;
517 		struct task_struct *t = task_early_kill(tsk, force_early);
518 
519 		if (!t)
520 			continue;
521 		anon_vma_interval_tree_foreach(vmac, &av->rb_root,
522 					       pgoff, pgoff) {
523 			vma = vmac->vma;
524 			if (vma->vm_mm != t->mm)
525 				continue;
526 			if (!page_mapped_in_vma(page, vma))
527 				continue;
528 			add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, to_kill);
529 		}
530 	}
531 	read_unlock(&tasklist_lock);
532 	anon_vma_unlock_read(av);
533 }
534 
535 /*
536  * Collect processes when the error hit a file mapped page.
537  */
538 static void collect_procs_file(struct page *page, struct list_head *to_kill,
539 				int force_early)
540 {
541 	struct vm_area_struct *vma;
542 	struct task_struct *tsk;
543 	struct address_space *mapping = page->mapping;
544 	pgoff_t pgoff;
545 
546 	i_mmap_lock_read(mapping);
547 	read_lock(&tasklist_lock);
548 	pgoff = page_to_pgoff(page);
549 	for_each_process(tsk) {
550 		struct task_struct *t = task_early_kill(tsk, force_early);
551 
552 		if (!t)
553 			continue;
554 		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
555 				      pgoff) {
556 			/*
557 			 * Send early kill signal to tasks where a vma covers
558 			 * the page but the corrupted page is not necessarily
559 			 * mapped it in its pte.
560 			 * Assume applications who requested early kill want
561 			 * to be informed of all such data corruptions.
562 			 */
563 			if (vma->vm_mm == t->mm)
564 				add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
565 					    to_kill);
566 		}
567 	}
568 	read_unlock(&tasklist_lock);
569 	i_mmap_unlock_read(mapping);
570 }
571 
572 #ifdef CONFIG_FS_DAX
573 /*
574  * Collect processes when the error hit a fsdax page.
575  */
576 static void collect_procs_fsdax(struct page *page,
577 		struct address_space *mapping, pgoff_t pgoff,
578 		struct list_head *to_kill)
579 {
580 	struct vm_area_struct *vma;
581 	struct task_struct *tsk;
582 
583 	i_mmap_lock_read(mapping);
584 	read_lock(&tasklist_lock);
585 	for_each_process(tsk) {
586 		struct task_struct *t = task_early_kill(tsk, true);
587 
588 		if (!t)
589 			continue;
590 		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
591 			if (vma->vm_mm == t->mm)
592 				add_to_kill(t, page, pgoff, vma, to_kill);
593 		}
594 	}
595 	read_unlock(&tasklist_lock);
596 	i_mmap_unlock_read(mapping);
597 }
598 #endif /* CONFIG_FS_DAX */
599 
600 /*
601  * Collect the processes who have the corrupted page mapped to kill.
602  */
603 static void collect_procs(struct page *page, struct list_head *tokill,
604 				int force_early)
605 {
606 	if (!page->mapping)
607 		return;
608 
609 	if (PageAnon(page))
610 		collect_procs_anon(page, tokill, force_early);
611 	else
612 		collect_procs_file(page, tokill, force_early);
613 }
614 
615 struct hwp_walk {
616 	struct to_kill tk;
617 	unsigned long pfn;
618 	int flags;
619 };
620 
621 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
622 {
623 	tk->addr = addr;
624 	tk->size_shift = shift;
625 }
626 
627 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
628 				unsigned long poisoned_pfn, struct to_kill *tk)
629 {
630 	unsigned long pfn = 0;
631 
632 	if (pte_present(pte)) {
633 		pfn = pte_pfn(pte);
634 	} else {
635 		swp_entry_t swp = pte_to_swp_entry(pte);
636 
637 		if (is_hwpoison_entry(swp))
638 			pfn = swp_offset_pfn(swp);
639 	}
640 
641 	if (!pfn || pfn != poisoned_pfn)
642 		return 0;
643 
644 	set_to_kill(tk, addr, shift);
645 	return 1;
646 }
647 
648 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
649 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
650 				      struct hwp_walk *hwp)
651 {
652 	pmd_t pmd = *pmdp;
653 	unsigned long pfn;
654 	unsigned long hwpoison_vaddr;
655 
656 	if (!pmd_present(pmd))
657 		return 0;
658 	pfn = pmd_pfn(pmd);
659 	if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
660 		hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
661 		set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
662 		return 1;
663 	}
664 	return 0;
665 }
666 #else
667 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
668 				      struct hwp_walk *hwp)
669 {
670 	return 0;
671 }
672 #endif
673 
674 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
675 			      unsigned long end, struct mm_walk *walk)
676 {
677 	struct hwp_walk *hwp = walk->private;
678 	int ret = 0;
679 	pte_t *ptep, *mapped_pte;
680 	spinlock_t *ptl;
681 
682 	ptl = pmd_trans_huge_lock(pmdp, walk->vma);
683 	if (ptl) {
684 		ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
685 		spin_unlock(ptl);
686 		goto out;
687 	}
688 
689 	if (pmd_trans_unstable(pmdp))
690 		goto out;
691 
692 	mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
693 						addr, &ptl);
694 	for (; addr != end; ptep++, addr += PAGE_SIZE) {
695 		ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
696 					     hwp->pfn, &hwp->tk);
697 		if (ret == 1)
698 			break;
699 	}
700 	pte_unmap_unlock(mapped_pte, ptl);
701 out:
702 	cond_resched();
703 	return ret;
704 }
705 
706 #ifdef CONFIG_HUGETLB_PAGE
707 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
708 			    unsigned long addr, unsigned long end,
709 			    struct mm_walk *walk)
710 {
711 	struct hwp_walk *hwp = walk->private;
712 	pte_t pte = huge_ptep_get(ptep);
713 	struct hstate *h = hstate_vma(walk->vma);
714 
715 	return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
716 				      hwp->pfn, &hwp->tk);
717 }
718 #else
719 #define hwpoison_hugetlb_range	NULL
720 #endif
721 
722 static const struct mm_walk_ops hwp_walk_ops = {
723 	.pmd_entry = hwpoison_pte_range,
724 	.hugetlb_entry = hwpoison_hugetlb_range,
725 };
726 
727 /*
728  * Sends SIGBUS to the current process with error info.
729  *
730  * This function is intended to handle "Action Required" MCEs on already
731  * hardware poisoned pages. They could happen, for example, when
732  * memory_failure() failed to unmap the error page at the first call, or
733  * when multiple local machine checks happened on different CPUs.
734  *
735  * MCE handler currently has no easy access to the error virtual address,
736  * so this function walks page table to find it. The returned virtual address
737  * is proper in most cases, but it could be wrong when the application
738  * process has multiple entries mapping the error page.
739  */
740 static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
741 				  int flags)
742 {
743 	int ret;
744 	struct hwp_walk priv = {
745 		.pfn = pfn,
746 	};
747 	priv.tk.tsk = p;
748 
749 	if (!p->mm)
750 		return -EFAULT;
751 
752 	mmap_read_lock(p->mm);
753 	ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
754 			      (void *)&priv);
755 	if (ret == 1 && priv.tk.addr)
756 		kill_proc(&priv.tk, pfn, flags);
757 	else
758 		ret = 0;
759 	mmap_read_unlock(p->mm);
760 	return ret > 0 ? -EHWPOISON : -EFAULT;
761 }
762 
763 static const char *action_name[] = {
764 	[MF_IGNORED] = "Ignored",
765 	[MF_FAILED] = "Failed",
766 	[MF_DELAYED] = "Delayed",
767 	[MF_RECOVERED] = "Recovered",
768 };
769 
770 static const char * const action_page_types[] = {
771 	[MF_MSG_KERNEL]			= "reserved kernel page",
772 	[MF_MSG_KERNEL_HIGH_ORDER]	= "high-order kernel page",
773 	[MF_MSG_SLAB]			= "kernel slab page",
774 	[MF_MSG_DIFFERENT_COMPOUND]	= "different compound page after locking",
775 	[MF_MSG_HUGE]			= "huge page",
776 	[MF_MSG_FREE_HUGE]		= "free huge page",
777 	[MF_MSG_UNMAP_FAILED]		= "unmapping failed page",
778 	[MF_MSG_DIRTY_SWAPCACHE]	= "dirty swapcache page",
779 	[MF_MSG_CLEAN_SWAPCACHE]	= "clean swapcache page",
780 	[MF_MSG_DIRTY_MLOCKED_LRU]	= "dirty mlocked LRU page",
781 	[MF_MSG_CLEAN_MLOCKED_LRU]	= "clean mlocked LRU page",
782 	[MF_MSG_DIRTY_UNEVICTABLE_LRU]	= "dirty unevictable LRU page",
783 	[MF_MSG_CLEAN_UNEVICTABLE_LRU]	= "clean unevictable LRU page",
784 	[MF_MSG_DIRTY_LRU]		= "dirty LRU page",
785 	[MF_MSG_CLEAN_LRU]		= "clean LRU page",
786 	[MF_MSG_TRUNCATED_LRU]		= "already truncated LRU page",
787 	[MF_MSG_BUDDY]			= "free buddy page",
788 	[MF_MSG_DAX]			= "dax page",
789 	[MF_MSG_UNSPLIT_THP]		= "unsplit thp",
790 	[MF_MSG_UNKNOWN]		= "unknown page",
791 };
792 
793 /*
794  * XXX: It is possible that a page is isolated from LRU cache,
795  * and then kept in swap cache or failed to remove from page cache.
796  * The page count will stop it from being freed by unpoison.
797  * Stress tests should be aware of this memory leak problem.
798  */
799 static int delete_from_lru_cache(struct page *p)
800 {
801 	if (!isolate_lru_page(p)) {
802 		/*
803 		 * Clear sensible page flags, so that the buddy system won't
804 		 * complain when the page is unpoison-and-freed.
805 		 */
806 		ClearPageActive(p);
807 		ClearPageUnevictable(p);
808 
809 		/*
810 		 * Poisoned page might never drop its ref count to 0 so we have
811 		 * to uncharge it manually from its memcg.
812 		 */
813 		mem_cgroup_uncharge(page_folio(p));
814 
815 		/*
816 		 * drop the page count elevated by isolate_lru_page()
817 		 */
818 		put_page(p);
819 		return 0;
820 	}
821 	return -EIO;
822 }
823 
824 static int truncate_error_page(struct page *p, unsigned long pfn,
825 				struct address_space *mapping)
826 {
827 	int ret = MF_FAILED;
828 
829 	if (mapping->a_ops->error_remove_page) {
830 		int err = mapping->a_ops->error_remove_page(mapping, p);
831 
832 		if (err != 0) {
833 			pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
834 		} else if (page_has_private(p) &&
835 			   !try_to_release_page(p, GFP_NOIO)) {
836 			pr_info("%#lx: failed to release buffers\n", pfn);
837 		} else {
838 			ret = MF_RECOVERED;
839 		}
840 	} else {
841 		/*
842 		 * If the file system doesn't support it just invalidate
843 		 * This fails on dirty or anything with private pages
844 		 */
845 		if (invalidate_inode_page(p))
846 			ret = MF_RECOVERED;
847 		else
848 			pr_info("%#lx: Failed to invalidate\n",	pfn);
849 	}
850 
851 	return ret;
852 }
853 
854 struct page_state {
855 	unsigned long mask;
856 	unsigned long res;
857 	enum mf_action_page_type type;
858 
859 	/* Callback ->action() has to unlock the relevant page inside it. */
860 	int (*action)(struct page_state *ps, struct page *p);
861 };
862 
863 /*
864  * Return true if page is still referenced by others, otherwise return
865  * false.
866  *
867  * The extra_pins is true when one extra refcount is expected.
868  */
869 static bool has_extra_refcount(struct page_state *ps, struct page *p,
870 			       bool extra_pins)
871 {
872 	int count = page_count(p) - 1;
873 
874 	if (extra_pins)
875 		count -= 1;
876 
877 	if (count > 0) {
878 		pr_err("%#lx: %s still referenced by %d users\n",
879 		       page_to_pfn(p), action_page_types[ps->type], count);
880 		return true;
881 	}
882 
883 	return false;
884 }
885 
886 /*
887  * Error hit kernel page.
888  * Do nothing, try to be lucky and not touch this instead. For a few cases we
889  * could be more sophisticated.
890  */
891 static int me_kernel(struct page_state *ps, struct page *p)
892 {
893 	unlock_page(p);
894 	return MF_IGNORED;
895 }
896 
897 /*
898  * Page in unknown state. Do nothing.
899  */
900 static int me_unknown(struct page_state *ps, struct page *p)
901 {
902 	pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
903 	unlock_page(p);
904 	return MF_FAILED;
905 }
906 
907 /*
908  * Clean (or cleaned) page cache page.
909  */
910 static int me_pagecache_clean(struct page_state *ps, struct page *p)
911 {
912 	int ret;
913 	struct address_space *mapping;
914 	bool extra_pins;
915 
916 	delete_from_lru_cache(p);
917 
918 	/*
919 	 * For anonymous pages we're done the only reference left
920 	 * should be the one m_f() holds.
921 	 */
922 	if (PageAnon(p)) {
923 		ret = MF_RECOVERED;
924 		goto out;
925 	}
926 
927 	/*
928 	 * Now truncate the page in the page cache. This is really
929 	 * more like a "temporary hole punch"
930 	 * Don't do this for block devices when someone else
931 	 * has a reference, because it could be file system metadata
932 	 * and that's not safe to truncate.
933 	 */
934 	mapping = page_mapping(p);
935 	if (!mapping) {
936 		/*
937 		 * Page has been teared down in the meanwhile
938 		 */
939 		ret = MF_FAILED;
940 		goto out;
941 	}
942 
943 	/*
944 	 * The shmem page is kept in page cache instead of truncating
945 	 * so is expected to have an extra refcount after error-handling.
946 	 */
947 	extra_pins = shmem_mapping(mapping);
948 
949 	/*
950 	 * Truncation is a bit tricky. Enable it per file system for now.
951 	 *
952 	 * Open: to take i_rwsem or not for this? Right now we don't.
953 	 */
954 	ret = truncate_error_page(p, page_to_pfn(p), mapping);
955 	if (has_extra_refcount(ps, p, extra_pins))
956 		ret = MF_FAILED;
957 
958 out:
959 	unlock_page(p);
960 
961 	return ret;
962 }
963 
964 /*
965  * Dirty pagecache page
966  * Issues: when the error hit a hole page the error is not properly
967  * propagated.
968  */
969 static int me_pagecache_dirty(struct page_state *ps, struct page *p)
970 {
971 	struct address_space *mapping = page_mapping(p);
972 
973 	SetPageError(p);
974 	/* TBD: print more information about the file. */
975 	if (mapping) {
976 		/*
977 		 * IO error will be reported by write(), fsync(), etc.
978 		 * who check the mapping.
979 		 * This way the application knows that something went
980 		 * wrong with its dirty file data.
981 		 *
982 		 * There's one open issue:
983 		 *
984 		 * The EIO will be only reported on the next IO
985 		 * operation and then cleared through the IO map.
986 		 * Normally Linux has two mechanisms to pass IO error
987 		 * first through the AS_EIO flag in the address space
988 		 * and then through the PageError flag in the page.
989 		 * Since we drop pages on memory failure handling the
990 		 * only mechanism open to use is through AS_AIO.
991 		 *
992 		 * This has the disadvantage that it gets cleared on
993 		 * the first operation that returns an error, while
994 		 * the PageError bit is more sticky and only cleared
995 		 * when the page is reread or dropped.  If an
996 		 * application assumes it will always get error on
997 		 * fsync, but does other operations on the fd before
998 		 * and the page is dropped between then the error
999 		 * will not be properly reported.
1000 		 *
1001 		 * This can already happen even without hwpoisoned
1002 		 * pages: first on metadata IO errors (which only
1003 		 * report through AS_EIO) or when the page is dropped
1004 		 * at the wrong time.
1005 		 *
1006 		 * So right now we assume that the application DTRT on
1007 		 * the first EIO, but we're not worse than other parts
1008 		 * of the kernel.
1009 		 */
1010 		mapping_set_error(mapping, -EIO);
1011 	}
1012 
1013 	return me_pagecache_clean(ps, p);
1014 }
1015 
1016 /*
1017  * Clean and dirty swap cache.
1018  *
1019  * Dirty swap cache page is tricky to handle. The page could live both in page
1020  * cache and swap cache(ie. page is freshly swapped in). So it could be
1021  * referenced concurrently by 2 types of PTEs:
1022  * normal PTEs and swap PTEs. We try to handle them consistently by calling
1023  * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
1024  * and then
1025  *      - clear dirty bit to prevent IO
1026  *      - remove from LRU
1027  *      - but keep in the swap cache, so that when we return to it on
1028  *        a later page fault, we know the application is accessing
1029  *        corrupted data and shall be killed (we installed simple
1030  *        interception code in do_swap_page to catch it).
1031  *
1032  * Clean swap cache pages can be directly isolated. A later page fault will
1033  * bring in the known good data from disk.
1034  */
1035 static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1036 {
1037 	int ret;
1038 	bool extra_pins = false;
1039 
1040 	ClearPageDirty(p);
1041 	/* Trigger EIO in shmem: */
1042 	ClearPageUptodate(p);
1043 
1044 	ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
1045 	unlock_page(p);
1046 
1047 	if (ret == MF_DELAYED)
1048 		extra_pins = true;
1049 
1050 	if (has_extra_refcount(ps, p, extra_pins))
1051 		ret = MF_FAILED;
1052 
1053 	return ret;
1054 }
1055 
1056 static int me_swapcache_clean(struct page_state *ps, struct page *p)
1057 {
1058 	struct folio *folio = page_folio(p);
1059 	int ret;
1060 
1061 	delete_from_swap_cache(folio);
1062 
1063 	ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
1064 	folio_unlock(folio);
1065 
1066 	if (has_extra_refcount(ps, p, false))
1067 		ret = MF_FAILED;
1068 
1069 	return ret;
1070 }
1071 
1072 /*
1073  * Huge pages. Needs work.
1074  * Issues:
1075  * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1076  *   To narrow down kill region to one page, we need to break up pmd.
1077  */
1078 static int me_huge_page(struct page_state *ps, struct page *p)
1079 {
1080 	int res;
1081 	struct page *hpage = compound_head(p);
1082 	struct address_space *mapping;
1083 
1084 	if (!PageHuge(hpage))
1085 		return MF_DELAYED;
1086 
1087 	mapping = page_mapping(hpage);
1088 	if (mapping) {
1089 		res = truncate_error_page(hpage, page_to_pfn(p), mapping);
1090 		unlock_page(hpage);
1091 	} else {
1092 		unlock_page(hpage);
1093 		/*
1094 		 * migration entry prevents later access on error hugepage,
1095 		 * so we can free and dissolve it into buddy to save healthy
1096 		 * subpages.
1097 		 */
1098 		put_page(hpage);
1099 		if (__page_handle_poison(p) >= 0) {
1100 			page_ref_inc(p);
1101 			res = MF_RECOVERED;
1102 		} else {
1103 			res = MF_FAILED;
1104 		}
1105 	}
1106 
1107 	if (has_extra_refcount(ps, p, false))
1108 		res = MF_FAILED;
1109 
1110 	return res;
1111 }
1112 
1113 /*
1114  * Various page states we can handle.
1115  *
1116  * A page state is defined by its current page->flags bits.
1117  * The table matches them in order and calls the right handler.
1118  *
1119  * This is quite tricky because we can access page at any time
1120  * in its live cycle, so all accesses have to be extremely careful.
1121  *
1122  * This is not complete. More states could be added.
1123  * For any missing state don't attempt recovery.
1124  */
1125 
1126 #define dirty		(1UL << PG_dirty)
1127 #define sc		((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1128 #define unevict		(1UL << PG_unevictable)
1129 #define mlock		(1UL << PG_mlocked)
1130 #define lru		(1UL << PG_lru)
1131 #define head		(1UL << PG_head)
1132 #define slab		(1UL << PG_slab)
1133 #define reserved	(1UL << PG_reserved)
1134 
1135 static struct page_state error_states[] = {
1136 	{ reserved,	reserved,	MF_MSG_KERNEL,	me_kernel },
1137 	/*
1138 	 * free pages are specially detected outside this table:
1139 	 * PG_buddy pages only make a small fraction of all free pages.
1140 	 */
1141 
1142 	/*
1143 	 * Could in theory check if slab page is free or if we can drop
1144 	 * currently unused objects without touching them. But just
1145 	 * treat it as standard kernel for now.
1146 	 */
1147 	{ slab,		slab,		MF_MSG_SLAB,	me_kernel },
1148 
1149 	{ head,		head,		MF_MSG_HUGE,		me_huge_page },
1150 
1151 	{ sc|dirty,	sc|dirty,	MF_MSG_DIRTY_SWAPCACHE,	me_swapcache_dirty },
1152 	{ sc|dirty,	sc,		MF_MSG_CLEAN_SWAPCACHE,	me_swapcache_clean },
1153 
1154 	{ mlock|dirty,	mlock|dirty,	MF_MSG_DIRTY_MLOCKED_LRU,	me_pagecache_dirty },
1155 	{ mlock|dirty,	mlock,		MF_MSG_CLEAN_MLOCKED_LRU,	me_pagecache_clean },
1156 
1157 	{ unevict|dirty, unevict|dirty,	MF_MSG_DIRTY_UNEVICTABLE_LRU,	me_pagecache_dirty },
1158 	{ unevict|dirty, unevict,	MF_MSG_CLEAN_UNEVICTABLE_LRU,	me_pagecache_clean },
1159 
1160 	{ lru|dirty,	lru|dirty,	MF_MSG_DIRTY_LRU,	me_pagecache_dirty },
1161 	{ lru|dirty,	lru,		MF_MSG_CLEAN_LRU,	me_pagecache_clean },
1162 
1163 	/*
1164 	 * Catchall entry: must be at end.
1165 	 */
1166 	{ 0,		0,		MF_MSG_UNKNOWN,	me_unknown },
1167 };
1168 
1169 #undef dirty
1170 #undef sc
1171 #undef unevict
1172 #undef mlock
1173 #undef lru
1174 #undef head
1175 #undef slab
1176 #undef reserved
1177 
1178 /*
1179  * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1180  * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1181  */
1182 static void action_result(unsigned long pfn, enum mf_action_page_type type,
1183 			  enum mf_result result)
1184 {
1185 	trace_memory_failure_event(pfn, type, result);
1186 
1187 	num_poisoned_pages_inc();
1188 	pr_err("%#lx: recovery action for %s: %s\n",
1189 		pfn, action_page_types[type], action_name[result]);
1190 }
1191 
1192 static int page_action(struct page_state *ps, struct page *p,
1193 			unsigned long pfn)
1194 {
1195 	int result;
1196 
1197 	/* page p should be unlocked after returning from ps->action().  */
1198 	result = ps->action(ps, p);
1199 
1200 	action_result(pfn, ps->type, result);
1201 
1202 	/* Could do more checks here if page looks ok */
1203 	/*
1204 	 * Could adjust zone counters here to correct for the missing page.
1205 	 */
1206 
1207 	return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1208 }
1209 
1210 static inline bool PageHWPoisonTakenOff(struct page *page)
1211 {
1212 	return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1213 }
1214 
1215 void SetPageHWPoisonTakenOff(struct page *page)
1216 {
1217 	set_page_private(page, MAGIC_HWPOISON);
1218 }
1219 
1220 void ClearPageHWPoisonTakenOff(struct page *page)
1221 {
1222 	if (PageHWPoison(page))
1223 		set_page_private(page, 0);
1224 }
1225 
1226 /*
1227  * Return true if a page type of a given page is supported by hwpoison
1228  * mechanism (while handling could fail), otherwise false.  This function
1229  * does not return true for hugetlb or device memory pages, so it's assumed
1230  * to be called only in the context where we never have such pages.
1231  */
1232 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1233 {
1234 	/* Soft offline could migrate non-LRU movable pages */
1235 	if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1236 		return true;
1237 
1238 	return PageLRU(page) || is_free_buddy_page(page);
1239 }
1240 
1241 static int __get_hwpoison_page(struct page *page, unsigned long flags)
1242 {
1243 	struct page *head = compound_head(page);
1244 	int ret = 0;
1245 	bool hugetlb = false;
1246 
1247 	ret = get_hwpoison_huge_page(head, &hugetlb);
1248 	if (hugetlb)
1249 		return ret;
1250 
1251 	/*
1252 	 * This check prevents from calling get_page_unless_zero() for any
1253 	 * unsupported type of page in order to reduce the risk of unexpected
1254 	 * races caused by taking a page refcount.
1255 	 */
1256 	if (!HWPoisonHandlable(head, flags))
1257 		return -EBUSY;
1258 
1259 	if (get_page_unless_zero(head)) {
1260 		if (head == compound_head(page))
1261 			return 1;
1262 
1263 		pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1264 		put_page(head);
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 static int get_any_page(struct page *p, unsigned long flags)
1271 {
1272 	int ret = 0, pass = 0;
1273 	bool count_increased = false;
1274 
1275 	if (flags & MF_COUNT_INCREASED)
1276 		count_increased = true;
1277 
1278 try_again:
1279 	if (!count_increased) {
1280 		ret = __get_hwpoison_page(p, flags);
1281 		if (!ret) {
1282 			if (page_count(p)) {
1283 				/* We raced with an allocation, retry. */
1284 				if (pass++ < 3)
1285 					goto try_again;
1286 				ret = -EBUSY;
1287 			} else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1288 				/* We raced with put_page, retry. */
1289 				if (pass++ < 3)
1290 					goto try_again;
1291 				ret = -EIO;
1292 			}
1293 			goto out;
1294 		} else if (ret == -EBUSY) {
1295 			/*
1296 			 * We raced with (possibly temporary) unhandlable
1297 			 * page, retry.
1298 			 */
1299 			if (pass++ < 3) {
1300 				shake_page(p);
1301 				goto try_again;
1302 			}
1303 			ret = -EIO;
1304 			goto out;
1305 		}
1306 	}
1307 
1308 	if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1309 		ret = 1;
1310 	} else {
1311 		/*
1312 		 * A page we cannot handle. Check whether we can turn
1313 		 * it into something we can handle.
1314 		 */
1315 		if (pass++ < 3) {
1316 			put_page(p);
1317 			shake_page(p);
1318 			count_increased = false;
1319 			goto try_again;
1320 		}
1321 		put_page(p);
1322 		ret = -EIO;
1323 	}
1324 out:
1325 	if (ret == -EIO)
1326 		pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1327 
1328 	return ret;
1329 }
1330 
1331 static int __get_unpoison_page(struct page *page)
1332 {
1333 	struct page *head = compound_head(page);
1334 	int ret = 0;
1335 	bool hugetlb = false;
1336 
1337 	ret = get_hwpoison_huge_page(head, &hugetlb);
1338 	if (hugetlb)
1339 		return ret;
1340 
1341 	/*
1342 	 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1343 	 * but also isolated from buddy freelist, so need to identify the
1344 	 * state and have to cancel both operations to unpoison.
1345 	 */
1346 	if (PageHWPoisonTakenOff(page))
1347 		return -EHWPOISON;
1348 
1349 	return get_page_unless_zero(page) ? 1 : 0;
1350 }
1351 
1352 /**
1353  * get_hwpoison_page() - Get refcount for memory error handling
1354  * @p:		Raw error page (hit by memory error)
1355  * @flags:	Flags controlling behavior of error handling
1356  *
1357  * get_hwpoison_page() takes a page refcount of an error page to handle memory
1358  * error on it, after checking that the error page is in a well-defined state
1359  * (defined as a page-type we can successfully handle the memory error on it,
1360  * such as LRU page and hugetlb page).
1361  *
1362  * Memory error handling could be triggered at any time on any type of page,
1363  * so it's prone to race with typical memory management lifecycle (like
1364  * allocation and free).  So to avoid such races, get_hwpoison_page() takes
1365  * extra care for the error page's state (as done in __get_hwpoison_page()),
1366  * and has some retry logic in get_any_page().
1367  *
1368  * When called from unpoison_memory(), the caller should already ensure that
1369  * the given page has PG_hwpoison. So it's never reused for other page
1370  * allocations, and __get_unpoison_page() never races with them.
1371  *
1372  * Return: 0 on failure,
1373  *         1 on success for in-use pages in a well-defined state,
1374  *         -EIO for pages on which we can not handle memory errors,
1375  *         -EBUSY when get_hwpoison_page() has raced with page lifecycle
1376  *         operations like allocation and free,
1377  *         -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1378  */
1379 static int get_hwpoison_page(struct page *p, unsigned long flags)
1380 {
1381 	int ret;
1382 
1383 	zone_pcp_disable(page_zone(p));
1384 	if (flags & MF_UNPOISON)
1385 		ret = __get_unpoison_page(p);
1386 	else
1387 		ret = get_any_page(p, flags);
1388 	zone_pcp_enable(page_zone(p));
1389 
1390 	return ret;
1391 }
1392 
1393 /*
1394  * Do all that is necessary to remove user space mappings. Unmap
1395  * the pages and send SIGBUS to the processes if the data was dirty.
1396  */
1397 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1398 				  int flags, struct page *hpage)
1399 {
1400 	struct folio *folio = page_folio(hpage);
1401 	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
1402 	struct address_space *mapping;
1403 	LIST_HEAD(tokill);
1404 	bool unmap_success;
1405 	int forcekill;
1406 	bool mlocked = PageMlocked(hpage);
1407 
1408 	/*
1409 	 * Here we are interested only in user-mapped pages, so skip any
1410 	 * other types of pages.
1411 	 */
1412 	if (PageReserved(p) || PageSlab(p) || PageTable(p))
1413 		return true;
1414 	if (!(PageLRU(hpage) || PageHuge(p)))
1415 		return true;
1416 
1417 	/*
1418 	 * This check implies we don't kill processes if their pages
1419 	 * are in the swap cache early. Those are always late kills.
1420 	 */
1421 	if (!page_mapped(hpage))
1422 		return true;
1423 
1424 	if (PageKsm(p)) {
1425 		pr_err("%#lx: can't handle KSM pages.\n", pfn);
1426 		return false;
1427 	}
1428 
1429 	if (PageSwapCache(p)) {
1430 		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1431 		ttu |= TTU_IGNORE_HWPOISON;
1432 	}
1433 
1434 	/*
1435 	 * Propagate the dirty bit from PTEs to struct page first, because we
1436 	 * need this to decide if we should kill or just drop the page.
1437 	 * XXX: the dirty test could be racy: set_page_dirty() may not always
1438 	 * be called inside page lock (it's recommended but not enforced).
1439 	 */
1440 	mapping = page_mapping(hpage);
1441 	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1442 	    mapping_can_writeback(mapping)) {
1443 		if (page_mkclean(hpage)) {
1444 			SetPageDirty(hpage);
1445 		} else {
1446 			ttu |= TTU_IGNORE_HWPOISON;
1447 			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1448 				pfn);
1449 		}
1450 	}
1451 
1452 	/*
1453 	 * First collect all the processes that have the page
1454 	 * mapped in dirty form.  This has to be done before try_to_unmap,
1455 	 * because ttu takes the rmap data structures down.
1456 	 */
1457 	collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1458 
1459 	if (PageHuge(hpage) && !PageAnon(hpage)) {
1460 		/*
1461 		 * For hugetlb pages in shared mappings, try_to_unmap
1462 		 * could potentially call huge_pmd_unshare.  Because of
1463 		 * this, take semaphore in write mode here and set
1464 		 * TTU_RMAP_LOCKED to indicate we have taken the lock
1465 		 * at this higher level.
1466 		 */
1467 		mapping = hugetlb_page_mapping_lock_write(hpage);
1468 		if (mapping) {
1469 			try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1470 			i_mmap_unlock_write(mapping);
1471 		} else
1472 			pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1473 	} else {
1474 		try_to_unmap(folio, ttu);
1475 	}
1476 
1477 	unmap_success = !page_mapped(hpage);
1478 	if (!unmap_success)
1479 		pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1480 		       pfn, page_mapcount(hpage));
1481 
1482 	/*
1483 	 * try_to_unmap() might put mlocked page in lru cache, so call
1484 	 * shake_page() again to ensure that it's flushed.
1485 	 */
1486 	if (mlocked)
1487 		shake_page(hpage);
1488 
1489 	/*
1490 	 * Now that the dirty bit has been propagated to the
1491 	 * struct page and all unmaps done we can decide if
1492 	 * killing is needed or not.  Only kill when the page
1493 	 * was dirty or the process is not restartable,
1494 	 * otherwise the tokill list is merely
1495 	 * freed.  When there was a problem unmapping earlier
1496 	 * use a more force-full uncatchable kill to prevent
1497 	 * any accesses to the poisoned memory.
1498 	 */
1499 	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
1500 		    !unmap_success;
1501 	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1502 
1503 	return unmap_success;
1504 }
1505 
1506 static int identify_page_state(unsigned long pfn, struct page *p,
1507 				unsigned long page_flags)
1508 {
1509 	struct page_state *ps;
1510 
1511 	/*
1512 	 * The first check uses the current page flags which may not have any
1513 	 * relevant information. The second check with the saved page flags is
1514 	 * carried out only if the first check can't determine the page status.
1515 	 */
1516 	for (ps = error_states;; ps++)
1517 		if ((p->flags & ps->mask) == ps->res)
1518 			break;
1519 
1520 	page_flags |= (p->flags & (1UL << PG_dirty));
1521 
1522 	if (!ps->mask)
1523 		for (ps = error_states;; ps++)
1524 			if ((page_flags & ps->mask) == ps->res)
1525 				break;
1526 	return page_action(ps, p, pfn);
1527 }
1528 
1529 static int try_to_split_thp_page(struct page *page)
1530 {
1531 	int ret;
1532 
1533 	lock_page(page);
1534 	ret = split_huge_page(page);
1535 	unlock_page(page);
1536 
1537 	if (unlikely(ret))
1538 		put_page(page);
1539 
1540 	return ret;
1541 }
1542 
1543 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1544 		struct address_space *mapping, pgoff_t index, int flags)
1545 {
1546 	struct to_kill *tk;
1547 	unsigned long size = 0;
1548 
1549 	list_for_each_entry(tk, to_kill, nd)
1550 		if (tk->size_shift)
1551 			size = max(size, 1UL << tk->size_shift);
1552 
1553 	if (size) {
1554 		/*
1555 		 * Unmap the largest mapping to avoid breaking up device-dax
1556 		 * mappings which are constant size. The actual size of the
1557 		 * mapping being torn down is communicated in siginfo, see
1558 		 * kill_proc()
1559 		 */
1560 		loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
1561 
1562 		unmap_mapping_range(mapping, start, size, 0);
1563 	}
1564 
1565 	kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1566 }
1567 
1568 static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1569 		struct dev_pagemap *pgmap)
1570 {
1571 	struct page *page = pfn_to_page(pfn);
1572 	LIST_HEAD(to_kill);
1573 	dax_entry_t cookie;
1574 	int rc = 0;
1575 
1576 	/*
1577 	 * Pages instantiated by device-dax (not filesystem-dax)
1578 	 * may be compound pages.
1579 	 */
1580 	page = compound_head(page);
1581 
1582 	/*
1583 	 * Prevent the inode from being freed while we are interrogating
1584 	 * the address_space, typically this would be handled by
1585 	 * lock_page(), but dax pages do not use the page lock. This
1586 	 * also prevents changes to the mapping of this pfn until
1587 	 * poison signaling is complete.
1588 	 */
1589 	cookie = dax_lock_page(page);
1590 	if (!cookie)
1591 		return -EBUSY;
1592 
1593 	if (hwpoison_filter(page)) {
1594 		rc = -EOPNOTSUPP;
1595 		goto unlock;
1596 	}
1597 
1598 	switch (pgmap->type) {
1599 	case MEMORY_DEVICE_PRIVATE:
1600 	case MEMORY_DEVICE_COHERENT:
1601 		/*
1602 		 * TODO: Handle device pages which may need coordination
1603 		 * with device-side memory.
1604 		 */
1605 		rc = -ENXIO;
1606 		goto unlock;
1607 	default:
1608 		break;
1609 	}
1610 
1611 	/*
1612 	 * Use this flag as an indication that the dax page has been
1613 	 * remapped UC to prevent speculative consumption of poison.
1614 	 */
1615 	SetPageHWPoison(page);
1616 
1617 	/*
1618 	 * Unlike System-RAM there is no possibility to swap in a
1619 	 * different physical page at a given virtual address, so all
1620 	 * userspace consumption of ZONE_DEVICE memory necessitates
1621 	 * SIGBUS (i.e. MF_MUST_KILL)
1622 	 */
1623 	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1624 	collect_procs(page, &to_kill, true);
1625 
1626 	unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
1627 unlock:
1628 	dax_unlock_page(page, cookie);
1629 	return rc;
1630 }
1631 
1632 #ifdef CONFIG_FS_DAX
1633 /**
1634  * mf_dax_kill_procs - Collect and kill processes who are using this file range
1635  * @mapping:	address_space of the file in use
1636  * @index:	start pgoff of the range within the file
1637  * @count:	length of the range, in unit of PAGE_SIZE
1638  * @mf_flags:	memory failure flags
1639  */
1640 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1641 		unsigned long count, int mf_flags)
1642 {
1643 	LIST_HEAD(to_kill);
1644 	dax_entry_t cookie;
1645 	struct page *page;
1646 	size_t end = index + count;
1647 
1648 	mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1649 
1650 	for (; index < end; index++) {
1651 		page = NULL;
1652 		cookie = dax_lock_mapping_entry(mapping, index, &page);
1653 		if (!cookie)
1654 			return -EBUSY;
1655 		if (!page)
1656 			goto unlock;
1657 
1658 		SetPageHWPoison(page);
1659 
1660 		collect_procs_fsdax(page, mapping, index, &to_kill);
1661 		unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1662 				index, mf_flags);
1663 unlock:
1664 		dax_unlock_mapping_entry(mapping, index, cookie);
1665 	}
1666 	return 0;
1667 }
1668 EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1669 #endif /* CONFIG_FS_DAX */
1670 
1671 #ifdef CONFIG_HUGETLB_PAGE
1672 /*
1673  * Struct raw_hwp_page represents information about "raw error page",
1674  * constructing singly linked list originated from ->private field of
1675  * SUBPAGE_INDEX_HWPOISON-th tail page.
1676  */
1677 struct raw_hwp_page {
1678 	struct llist_node node;
1679 	struct page *page;
1680 };
1681 
1682 static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
1683 {
1684 	return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON);
1685 }
1686 
1687 static unsigned long __free_raw_hwp_pages(struct page *hpage, bool move_flag)
1688 {
1689 	struct llist_head *head;
1690 	struct llist_node *t, *tnode;
1691 	unsigned long count = 0;
1692 
1693 	head = raw_hwp_list_head(hpage);
1694 	llist_for_each_safe(tnode, t, head->first) {
1695 		struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
1696 
1697 		if (move_flag)
1698 			SetPageHWPoison(p->page);
1699 		kfree(p);
1700 		count++;
1701 	}
1702 	llist_del_all(head);
1703 	return count;
1704 }
1705 
1706 static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
1707 {
1708 	struct llist_head *head;
1709 	struct raw_hwp_page *raw_hwp;
1710 	struct llist_node *t, *tnode;
1711 	int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0;
1712 
1713 	/*
1714 	 * Once the hwpoison hugepage has lost reliable raw error info,
1715 	 * there is little meaning to keep additional error info precisely,
1716 	 * so skip to add additional raw error info.
1717 	 */
1718 	if (HPageRawHwpUnreliable(hpage))
1719 		return -EHWPOISON;
1720 	head = raw_hwp_list_head(hpage);
1721 	llist_for_each_safe(tnode, t, head->first) {
1722 		struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
1723 
1724 		if (p->page == page)
1725 			return -EHWPOISON;
1726 	}
1727 
1728 	raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
1729 	if (raw_hwp) {
1730 		raw_hwp->page = page;
1731 		llist_add(&raw_hwp->node, head);
1732 		/* the first error event will be counted in action_result(). */
1733 		if (ret)
1734 			num_poisoned_pages_inc();
1735 	} else {
1736 		/*
1737 		 * Failed to save raw error info.  We no longer trace all
1738 		 * hwpoisoned subpages, and we need refuse to free/dissolve
1739 		 * this hwpoisoned hugepage.
1740 		 */
1741 		SetHPageRawHwpUnreliable(hpage);
1742 		/*
1743 		 * Once HPageRawHwpUnreliable is set, raw_hwp_page is not
1744 		 * used any more, so free it.
1745 		 */
1746 		__free_raw_hwp_pages(hpage, false);
1747 	}
1748 	return ret;
1749 }
1750 
1751 static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
1752 {
1753 	/*
1754 	 * HPageVmemmapOptimized hugepages can't be freed because struct
1755 	 * pages for tail pages are required but they don't exist.
1756 	 */
1757 	if (move_flag && HPageVmemmapOptimized(hpage))
1758 		return 0;
1759 
1760 	/*
1761 	 * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
1762 	 * definition.
1763 	 */
1764 	if (HPageRawHwpUnreliable(hpage))
1765 		return 0;
1766 
1767 	return __free_raw_hwp_pages(hpage, move_flag);
1768 }
1769 
1770 void hugetlb_clear_page_hwpoison(struct page *hpage)
1771 {
1772 	if (HPageRawHwpUnreliable(hpage))
1773 		return;
1774 	ClearPageHWPoison(hpage);
1775 	free_raw_hwp_pages(hpage, true);
1776 }
1777 
1778 /*
1779  * Called from hugetlb code with hugetlb_lock held.
1780  *
1781  * Return values:
1782  *   0             - free hugepage
1783  *   1             - in-use hugepage
1784  *   2             - not a hugepage
1785  *   -EBUSY        - the hugepage is busy (try to retry)
1786  *   -EHWPOISON    - the hugepage is already hwpoisoned
1787  */
1788 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
1789 {
1790 	struct page *page = pfn_to_page(pfn);
1791 	struct page *head = compound_head(page);
1792 	int ret = 2;	/* fallback to normal page handling */
1793 	bool count_increased = false;
1794 
1795 	if (!PageHeadHuge(head))
1796 		goto out;
1797 
1798 	if (flags & MF_COUNT_INCREASED) {
1799 		ret = 1;
1800 		count_increased = true;
1801 	} else if (HPageFreed(head)) {
1802 		ret = 0;
1803 	} else if (HPageMigratable(head)) {
1804 		ret = get_page_unless_zero(head);
1805 		if (ret)
1806 			count_increased = true;
1807 	} else {
1808 		ret = -EBUSY;
1809 		if (!(flags & MF_NO_RETRY))
1810 			goto out;
1811 	}
1812 
1813 	if (hugetlb_set_page_hwpoison(head, page)) {
1814 		ret = -EHWPOISON;
1815 		goto out;
1816 	}
1817 
1818 	return ret;
1819 out:
1820 	if (count_increased)
1821 		put_page(head);
1822 	return ret;
1823 }
1824 
1825 /*
1826  * Taking refcount of hugetlb pages needs extra care about race conditions
1827  * with basic operations like hugepage allocation/free/demotion.
1828  * So some of prechecks for hwpoison (pinning, and testing/setting
1829  * PageHWPoison) should be done in single hugetlb_lock range.
1830  */
1831 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
1832 {
1833 	int res;
1834 	struct page *p = pfn_to_page(pfn);
1835 	struct page *head;
1836 	unsigned long page_flags;
1837 
1838 	*hugetlb = 1;
1839 retry:
1840 	res = get_huge_page_for_hwpoison(pfn, flags);
1841 	if (res == 2) { /* fallback to normal page handling */
1842 		*hugetlb = 0;
1843 		return 0;
1844 	} else if (res == -EHWPOISON) {
1845 		pr_err("%#lx: already hardware poisoned\n", pfn);
1846 		if (flags & MF_ACTION_REQUIRED) {
1847 			head = compound_head(p);
1848 			res = kill_accessing_process(current, page_to_pfn(head), flags);
1849 		}
1850 		return res;
1851 	} else if (res == -EBUSY) {
1852 		if (!(flags & MF_NO_RETRY)) {
1853 			flags |= MF_NO_RETRY;
1854 			goto retry;
1855 		}
1856 		action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1857 		return res;
1858 	}
1859 
1860 	head = compound_head(p);
1861 	lock_page(head);
1862 
1863 	if (hwpoison_filter(p)) {
1864 		hugetlb_clear_page_hwpoison(head);
1865 		unlock_page(head);
1866 		if (res == 1)
1867 			put_page(head);
1868 		return -EOPNOTSUPP;
1869 	}
1870 
1871 	/*
1872 	 * Handling free hugepage.  The possible race with hugepage allocation
1873 	 * or demotion can be prevented by PageHWPoison flag.
1874 	 */
1875 	if (res == 0) {
1876 		unlock_page(head);
1877 		if (__page_handle_poison(p) >= 0) {
1878 			page_ref_inc(p);
1879 			res = MF_RECOVERED;
1880 		} else {
1881 			res = MF_FAILED;
1882 		}
1883 		action_result(pfn, MF_MSG_FREE_HUGE, res);
1884 		return res == MF_RECOVERED ? 0 : -EBUSY;
1885 	}
1886 
1887 	page_flags = head->flags;
1888 
1889 	if (!hwpoison_user_mappings(p, pfn, flags, head)) {
1890 		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1891 		res = -EBUSY;
1892 		goto out;
1893 	}
1894 
1895 	return identify_page_state(pfn, p, page_flags);
1896 out:
1897 	unlock_page(head);
1898 	return res;
1899 }
1900 
1901 #else
1902 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
1903 {
1904 	return 0;
1905 }
1906 
1907 static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
1908 {
1909 	return 0;
1910 }
1911 #endif	/* CONFIG_HUGETLB_PAGE */
1912 
1913 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1914 		struct dev_pagemap *pgmap)
1915 {
1916 	struct page *page = pfn_to_page(pfn);
1917 	int rc = -ENXIO;
1918 
1919 	if (flags & MF_COUNT_INCREASED)
1920 		/*
1921 		 * Drop the extra refcount in case we come from madvise().
1922 		 */
1923 		put_page(page);
1924 
1925 	/* device metadata space is not recoverable */
1926 	if (!pgmap_pfn_valid(pgmap, pfn))
1927 		goto out;
1928 
1929 	/*
1930 	 * Call driver's implementation to handle the memory failure, otherwise
1931 	 * fall back to generic handler.
1932 	 */
1933 	if (pgmap_has_memory_failure(pgmap)) {
1934 		rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
1935 		/*
1936 		 * Fall back to generic handler too if operation is not
1937 		 * supported inside the driver/device/filesystem.
1938 		 */
1939 		if (rc != -EOPNOTSUPP)
1940 			goto out;
1941 	}
1942 
1943 	rc = mf_generic_kill_procs(pfn, flags, pgmap);
1944 out:
1945 	/* drop pgmap ref acquired in caller */
1946 	put_dev_pagemap(pgmap);
1947 	action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1948 	return rc;
1949 }
1950 
1951 static DEFINE_MUTEX(mf_mutex);
1952 
1953 /**
1954  * memory_failure - Handle memory failure of a page.
1955  * @pfn: Page Number of the corrupted page
1956  * @flags: fine tune action taken
1957  *
1958  * This function is called by the low level machine check code
1959  * of an architecture when it detects hardware memory corruption
1960  * of a page. It tries its best to recover, which includes
1961  * dropping pages, killing processes etc.
1962  *
1963  * The function is primarily of use for corruptions that
1964  * happen outside the current execution context (e.g. when
1965  * detected by a background scrubber)
1966  *
1967  * Must run in process context (e.g. a work queue) with interrupts
1968  * enabled and no spinlocks hold.
1969  *
1970  * Return: 0 for successfully handled the memory error,
1971  *         -EOPNOTSUPP for hwpoison_filter() filtered the error event,
1972  *         < 0(except -EOPNOTSUPP) on failure.
1973  */
1974 int memory_failure(unsigned long pfn, int flags)
1975 {
1976 	struct page *p;
1977 	struct page *hpage;
1978 	struct dev_pagemap *pgmap;
1979 	int res = 0;
1980 	unsigned long page_flags;
1981 	bool retry = true;
1982 	int hugetlb = 0;
1983 
1984 	if (!sysctl_memory_failure_recovery)
1985 		panic("Memory failure on page %lx", pfn);
1986 
1987 	mutex_lock(&mf_mutex);
1988 
1989 	if (!(flags & MF_SW_SIMULATED))
1990 		hw_memory_failure = true;
1991 
1992 	p = pfn_to_online_page(pfn);
1993 	if (!p) {
1994 		res = arch_memory_failure(pfn, flags);
1995 		if (res == 0)
1996 			goto unlock_mutex;
1997 
1998 		if (pfn_valid(pfn)) {
1999 			pgmap = get_dev_pagemap(pfn, NULL);
2000 			if (pgmap) {
2001 				res = memory_failure_dev_pagemap(pfn, flags,
2002 								 pgmap);
2003 				goto unlock_mutex;
2004 			}
2005 		}
2006 		pr_err("%#lx: memory outside kernel control\n", pfn);
2007 		res = -ENXIO;
2008 		goto unlock_mutex;
2009 	}
2010 
2011 try_again:
2012 	res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2013 	if (hugetlb)
2014 		goto unlock_mutex;
2015 
2016 	if (TestSetPageHWPoison(p)) {
2017 		pr_err("%#lx: already hardware poisoned\n", pfn);
2018 		res = -EHWPOISON;
2019 		if (flags & MF_ACTION_REQUIRED)
2020 			res = kill_accessing_process(current, pfn, flags);
2021 		if (flags & MF_COUNT_INCREASED)
2022 			put_page(p);
2023 		goto unlock_mutex;
2024 	}
2025 
2026 	hpage = compound_head(p);
2027 
2028 	/*
2029 	 * We need/can do nothing about count=0 pages.
2030 	 * 1) it's a free page, and therefore in safe hand:
2031 	 *    check_new_page() will be the gate keeper.
2032 	 * 2) it's part of a non-compound high order page.
2033 	 *    Implies some kernel user: cannot stop them from
2034 	 *    R/W the page; let's pray that the page has been
2035 	 *    used and will be freed some time later.
2036 	 * In fact it's dangerous to directly bump up page count from 0,
2037 	 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
2038 	 */
2039 	if (!(flags & MF_COUNT_INCREASED)) {
2040 		res = get_hwpoison_page(p, flags);
2041 		if (!res) {
2042 			if (is_free_buddy_page(p)) {
2043 				if (take_page_off_buddy(p)) {
2044 					page_ref_inc(p);
2045 					res = MF_RECOVERED;
2046 				} else {
2047 					/* We lost the race, try again */
2048 					if (retry) {
2049 						ClearPageHWPoison(p);
2050 						retry = false;
2051 						goto try_again;
2052 					}
2053 					res = MF_FAILED;
2054 				}
2055 				action_result(pfn, MF_MSG_BUDDY, res);
2056 				res = res == MF_RECOVERED ? 0 : -EBUSY;
2057 			} else {
2058 				action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
2059 				res = -EBUSY;
2060 			}
2061 			goto unlock_mutex;
2062 		} else if (res < 0) {
2063 			action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2064 			res = -EBUSY;
2065 			goto unlock_mutex;
2066 		}
2067 	}
2068 
2069 	if (PageTransHuge(hpage)) {
2070 		/*
2071 		 * The flag must be set after the refcount is bumped
2072 		 * otherwise it may race with THP split.
2073 		 * And the flag can't be set in get_hwpoison_page() since
2074 		 * it is called by soft offline too and it is just called
2075 		 * for !MF_COUNT_INCREASE.  So here seems to be the best
2076 		 * place.
2077 		 *
2078 		 * Don't need care about the above error handling paths for
2079 		 * get_hwpoison_page() since they handle either free page
2080 		 * or unhandlable page.  The refcount is bumped iff the
2081 		 * page is a valid handlable page.
2082 		 */
2083 		SetPageHasHWPoisoned(hpage);
2084 		if (try_to_split_thp_page(p) < 0) {
2085 			action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
2086 			res = -EBUSY;
2087 			goto unlock_mutex;
2088 		}
2089 		VM_BUG_ON_PAGE(!page_count(p), p);
2090 	}
2091 
2092 	/*
2093 	 * We ignore non-LRU pages for good reasons.
2094 	 * - PG_locked is only well defined for LRU pages and a few others
2095 	 * - to avoid races with __SetPageLocked()
2096 	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
2097 	 * The check (unnecessarily) ignores LRU pages being isolated and
2098 	 * walked by the page reclaim code, however that's not a big loss.
2099 	 */
2100 	shake_page(p);
2101 
2102 	lock_page(p);
2103 
2104 	/*
2105 	 * We're only intended to deal with the non-Compound page here.
2106 	 * However, the page could have changed compound pages due to
2107 	 * race window. If this happens, we could try again to hopefully
2108 	 * handle the page next round.
2109 	 */
2110 	if (PageCompound(p)) {
2111 		if (retry) {
2112 			ClearPageHWPoison(p);
2113 			unlock_page(p);
2114 			put_page(p);
2115 			flags &= ~MF_COUNT_INCREASED;
2116 			retry = false;
2117 			goto try_again;
2118 		}
2119 		action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2120 		res = -EBUSY;
2121 		goto unlock_page;
2122 	}
2123 
2124 	/*
2125 	 * We use page flags to determine what action should be taken, but
2126 	 * the flags can be modified by the error containment action.  One
2127 	 * example is an mlocked page, where PG_mlocked is cleared by
2128 	 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
2129 	 * correctly, we save a copy of the page flags at this time.
2130 	 */
2131 	page_flags = p->flags;
2132 
2133 	if (hwpoison_filter(p)) {
2134 		ClearPageHWPoison(p);
2135 		unlock_page(p);
2136 		put_page(p);
2137 		res = -EOPNOTSUPP;
2138 		goto unlock_mutex;
2139 	}
2140 
2141 	/*
2142 	 * __munlock_pagevec may clear a writeback page's LRU flag without
2143 	 * page_lock. We need wait writeback completion for this page or it
2144 	 * may trigger vfs BUG while evict inode.
2145 	 */
2146 	if (!PageLRU(p) && !PageWriteback(p))
2147 		goto identify_page_state;
2148 
2149 	/*
2150 	 * It's very difficult to mess with pages currently under IO
2151 	 * and in many cases impossible, so we just avoid it here.
2152 	 */
2153 	wait_on_page_writeback(p);
2154 
2155 	/*
2156 	 * Now take care of user space mappings.
2157 	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2158 	 */
2159 	if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2160 		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2161 		res = -EBUSY;
2162 		goto unlock_page;
2163 	}
2164 
2165 	/*
2166 	 * Torn down by someone else?
2167 	 */
2168 	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
2169 		action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2170 		res = -EBUSY;
2171 		goto unlock_page;
2172 	}
2173 
2174 identify_page_state:
2175 	res = identify_page_state(pfn, p, page_flags);
2176 	mutex_unlock(&mf_mutex);
2177 	return res;
2178 unlock_page:
2179 	unlock_page(p);
2180 unlock_mutex:
2181 	mutex_unlock(&mf_mutex);
2182 	return res;
2183 }
2184 EXPORT_SYMBOL_GPL(memory_failure);
2185 
2186 #define MEMORY_FAILURE_FIFO_ORDER	4
2187 #define MEMORY_FAILURE_FIFO_SIZE	(1 << MEMORY_FAILURE_FIFO_ORDER)
2188 
2189 struct memory_failure_entry {
2190 	unsigned long pfn;
2191 	int flags;
2192 };
2193 
2194 struct memory_failure_cpu {
2195 	DECLARE_KFIFO(fifo, struct memory_failure_entry,
2196 		      MEMORY_FAILURE_FIFO_SIZE);
2197 	spinlock_t lock;
2198 	struct work_struct work;
2199 };
2200 
2201 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2202 
2203 /**
2204  * memory_failure_queue - Schedule handling memory failure of a page.
2205  * @pfn: Page Number of the corrupted page
2206  * @flags: Flags for memory failure handling
2207  *
2208  * This function is called by the low level hardware error handler
2209  * when it detects hardware memory corruption of a page. It schedules
2210  * the recovering of error page, including dropping pages, killing
2211  * processes etc.
2212  *
2213  * The function is primarily of use for corruptions that
2214  * happen outside the current execution context (e.g. when
2215  * detected by a background scrubber)
2216  *
2217  * Can run in IRQ context.
2218  */
2219 void memory_failure_queue(unsigned long pfn, int flags)
2220 {
2221 	struct memory_failure_cpu *mf_cpu;
2222 	unsigned long proc_flags;
2223 	struct memory_failure_entry entry = {
2224 		.pfn =		pfn,
2225 		.flags =	flags,
2226 	};
2227 
2228 	mf_cpu = &get_cpu_var(memory_failure_cpu);
2229 	spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2230 	if (kfifo_put(&mf_cpu->fifo, entry))
2231 		schedule_work_on(smp_processor_id(), &mf_cpu->work);
2232 	else
2233 		pr_err("buffer overflow when queuing memory failure at %#lx\n",
2234 		       pfn);
2235 	spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2236 	put_cpu_var(memory_failure_cpu);
2237 }
2238 EXPORT_SYMBOL_GPL(memory_failure_queue);
2239 
2240 static void memory_failure_work_func(struct work_struct *work)
2241 {
2242 	struct memory_failure_cpu *mf_cpu;
2243 	struct memory_failure_entry entry = { 0, };
2244 	unsigned long proc_flags;
2245 	int gotten;
2246 
2247 	mf_cpu = container_of(work, struct memory_failure_cpu, work);
2248 	for (;;) {
2249 		spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2250 		gotten = kfifo_get(&mf_cpu->fifo, &entry);
2251 		spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2252 		if (!gotten)
2253 			break;
2254 		if (entry.flags & MF_SOFT_OFFLINE)
2255 			soft_offline_page(entry.pfn, entry.flags);
2256 		else
2257 			memory_failure(entry.pfn, entry.flags);
2258 	}
2259 }
2260 
2261 /*
2262  * Process memory_failure work queued on the specified CPU.
2263  * Used to avoid return-to-userspace racing with the memory_failure workqueue.
2264  */
2265 void memory_failure_queue_kick(int cpu)
2266 {
2267 	struct memory_failure_cpu *mf_cpu;
2268 
2269 	mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2270 	cancel_work_sync(&mf_cpu->work);
2271 	memory_failure_work_func(&mf_cpu->work);
2272 }
2273 
2274 static int __init memory_failure_init(void)
2275 {
2276 	struct memory_failure_cpu *mf_cpu;
2277 	int cpu;
2278 
2279 	for_each_possible_cpu(cpu) {
2280 		mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2281 		spin_lock_init(&mf_cpu->lock);
2282 		INIT_KFIFO(mf_cpu->fifo);
2283 		INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2284 	}
2285 
2286 	return 0;
2287 }
2288 core_initcall(memory_failure_init);
2289 
2290 #undef pr_fmt
2291 #define pr_fmt(fmt)	"" fmt
2292 #define unpoison_pr_info(fmt, pfn, rs)			\
2293 ({							\
2294 	if (__ratelimit(rs))				\
2295 		pr_info(fmt, pfn);			\
2296 })
2297 
2298 /**
2299  * unpoison_memory - Unpoison a previously poisoned page
2300  * @pfn: Page number of the to be unpoisoned page
2301  *
2302  * Software-unpoison a page that has been poisoned by
2303  * memory_failure() earlier.
2304  *
2305  * This is only done on the software-level, so it only works
2306  * for linux injected failures, not real hardware failures
2307  *
2308  * Returns 0 for success, otherwise -errno.
2309  */
2310 int unpoison_memory(unsigned long pfn)
2311 {
2312 	struct page *page;
2313 	struct page *p;
2314 	int ret = -EBUSY;
2315 	int freeit = 0;
2316 	unsigned long count = 1;
2317 	static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2318 					DEFAULT_RATELIMIT_BURST);
2319 
2320 	if (!pfn_valid(pfn))
2321 		return -ENXIO;
2322 
2323 	p = pfn_to_page(pfn);
2324 	page = compound_head(p);
2325 
2326 	mutex_lock(&mf_mutex);
2327 
2328 	if (hw_memory_failure) {
2329 		unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
2330 				 pfn, &unpoison_rs);
2331 		ret = -EOPNOTSUPP;
2332 		goto unlock_mutex;
2333 	}
2334 
2335 	if (!PageHWPoison(p)) {
2336 		unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
2337 				 pfn, &unpoison_rs);
2338 		goto unlock_mutex;
2339 	}
2340 
2341 	if (page_count(page) > 1) {
2342 		unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2343 				 pfn, &unpoison_rs);
2344 		goto unlock_mutex;
2345 	}
2346 
2347 	if (page_mapped(page)) {
2348 		unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2349 				 pfn, &unpoison_rs);
2350 		goto unlock_mutex;
2351 	}
2352 
2353 	if (page_mapping(page)) {
2354 		unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2355 				 pfn, &unpoison_rs);
2356 		goto unlock_mutex;
2357 	}
2358 
2359 	if (PageSlab(page) || PageTable(page) || PageReserved(page))
2360 		goto unlock_mutex;
2361 
2362 	ret = get_hwpoison_page(p, MF_UNPOISON);
2363 	if (!ret) {
2364 		if (PageHuge(p)) {
2365 			count = free_raw_hwp_pages(page, false);
2366 			if (count == 0) {
2367 				ret = -EBUSY;
2368 				goto unlock_mutex;
2369 			}
2370 		}
2371 		ret = TestClearPageHWPoison(page) ? 0 : -EBUSY;
2372 	} else if (ret < 0) {
2373 		if (ret == -EHWPOISON) {
2374 			ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2375 		} else
2376 			unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2377 					 pfn, &unpoison_rs);
2378 	} else {
2379 		if (PageHuge(p)) {
2380 			count = free_raw_hwp_pages(page, false);
2381 			if (count == 0) {
2382 				ret = -EBUSY;
2383 				put_page(page);
2384 				goto unlock_mutex;
2385 			}
2386 		}
2387 		freeit = !!TestClearPageHWPoison(p);
2388 
2389 		put_page(page);
2390 		if (freeit) {
2391 			put_page(page);
2392 			ret = 0;
2393 		}
2394 	}
2395 
2396 unlock_mutex:
2397 	mutex_unlock(&mf_mutex);
2398 	if (!ret || freeit) {
2399 		num_poisoned_pages_sub(count);
2400 		unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2401 				 page_to_pfn(p), &unpoison_rs);
2402 	}
2403 	return ret;
2404 }
2405 EXPORT_SYMBOL(unpoison_memory);
2406 
2407 static bool isolate_page(struct page *page, struct list_head *pagelist)
2408 {
2409 	bool isolated = false;
2410 
2411 	if (PageHuge(page)) {
2412 		isolated = !isolate_hugetlb(page, pagelist);
2413 	} else {
2414 		bool lru = !__PageMovable(page);
2415 
2416 		if (lru)
2417 			isolated = !isolate_lru_page(page);
2418 		else
2419 			isolated = !isolate_movable_page(page,
2420 							 ISOLATE_UNEVICTABLE);
2421 
2422 		if (isolated) {
2423 			list_add(&page->lru, pagelist);
2424 			if (lru)
2425 				inc_node_page_state(page, NR_ISOLATED_ANON +
2426 						    page_is_file_lru(page));
2427 		}
2428 	}
2429 
2430 	/*
2431 	 * If we succeed to isolate the page, we grabbed another refcount on
2432 	 * the page, so we can safely drop the one we got from get_any_pages().
2433 	 * If we failed to isolate the page, it means that we cannot go further
2434 	 * and we will return an error, so drop the reference we got from
2435 	 * get_any_pages() as well.
2436 	 */
2437 	put_page(page);
2438 	return isolated;
2439 }
2440 
2441 /*
2442  * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
2443  * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2444  * If the page is mapped, it migrates the contents over.
2445  */
2446 static int soft_offline_in_use_page(struct page *page)
2447 {
2448 	long ret = 0;
2449 	unsigned long pfn = page_to_pfn(page);
2450 	struct page *hpage = compound_head(page);
2451 	char const *msg_page[] = {"page", "hugepage"};
2452 	bool huge = PageHuge(page);
2453 	LIST_HEAD(pagelist);
2454 	struct migration_target_control mtc = {
2455 		.nid = NUMA_NO_NODE,
2456 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2457 	};
2458 
2459 	if (!huge && PageTransHuge(hpage)) {
2460 		if (try_to_split_thp_page(page)) {
2461 			pr_info("soft offline: %#lx: thp split failed\n", pfn);
2462 			return -EBUSY;
2463 		}
2464 		hpage = page;
2465 	}
2466 
2467 	lock_page(page);
2468 	if (!PageHuge(page))
2469 		wait_on_page_writeback(page);
2470 	if (PageHWPoison(page)) {
2471 		unlock_page(page);
2472 		put_page(page);
2473 		pr_info("soft offline: %#lx page already poisoned\n", pfn);
2474 		return 0;
2475 	}
2476 
2477 	if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page))
2478 		/*
2479 		 * Try to invalidate first. This should work for
2480 		 * non dirty unmapped page cache pages.
2481 		 */
2482 		ret = invalidate_inode_page(page);
2483 	unlock_page(page);
2484 
2485 	if (ret) {
2486 		pr_info("soft_offline: %#lx: invalidated\n", pfn);
2487 		page_handle_poison(page, false, true);
2488 		return 0;
2489 	}
2490 
2491 	if (isolate_page(hpage, &pagelist)) {
2492 		ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2493 			(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2494 		if (!ret) {
2495 			bool release = !huge;
2496 
2497 			if (!page_handle_poison(page, huge, release))
2498 				ret = -EBUSY;
2499 		} else {
2500 			if (!list_empty(&pagelist))
2501 				putback_movable_pages(&pagelist);
2502 
2503 			pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
2504 				pfn, msg_page[huge], ret, &page->flags);
2505 			if (ret > 0)
2506 				ret = -EBUSY;
2507 		}
2508 	} else {
2509 		pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2510 			pfn, msg_page[huge], page_count(page), &page->flags);
2511 		ret = -EBUSY;
2512 	}
2513 	return ret;
2514 }
2515 
2516 static void put_ref_page(struct page *page)
2517 {
2518 	if (page)
2519 		put_page(page);
2520 }
2521 
2522 /**
2523  * soft_offline_page - Soft offline a page.
2524  * @pfn: pfn to soft-offline
2525  * @flags: flags. Same as memory_failure().
2526  *
2527  * Returns 0 on success
2528  *         -EOPNOTSUPP for hwpoison_filter() filtered the error event
2529  *         < 0 otherwise negated errno.
2530  *
2531  * Soft offline a page, by migration or invalidation,
2532  * without killing anything. This is for the case when
2533  * a page is not corrupted yet (so it's still valid to access),
2534  * but has had a number of corrected errors and is better taken
2535  * out.
2536  *
2537  * The actual policy on when to do that is maintained by
2538  * user space.
2539  *
2540  * This should never impact any application or cause data loss,
2541  * however it might take some time.
2542  *
2543  * This is not a 100% solution for all memory, but tries to be
2544  * ``good enough'' for the majority of memory.
2545  */
2546 int soft_offline_page(unsigned long pfn, int flags)
2547 {
2548 	int ret;
2549 	bool try_again = true;
2550 	struct page *page, *ref_page = NULL;
2551 
2552 	WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
2553 
2554 	if (!pfn_valid(pfn))
2555 		return -ENXIO;
2556 	if (flags & MF_COUNT_INCREASED)
2557 		ref_page = pfn_to_page(pfn);
2558 
2559 	/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2560 	page = pfn_to_online_page(pfn);
2561 	if (!page) {
2562 		put_ref_page(ref_page);
2563 		return -EIO;
2564 	}
2565 
2566 	mutex_lock(&mf_mutex);
2567 
2568 	if (PageHWPoison(page)) {
2569 		pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2570 		put_ref_page(ref_page);
2571 		mutex_unlock(&mf_mutex);
2572 		return 0;
2573 	}
2574 
2575 retry:
2576 	get_online_mems();
2577 	ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2578 	put_online_mems();
2579 
2580 	if (hwpoison_filter(page)) {
2581 		if (ret > 0)
2582 			put_page(page);
2583 
2584 		mutex_unlock(&mf_mutex);
2585 		return -EOPNOTSUPP;
2586 	}
2587 
2588 	if (ret > 0) {
2589 		ret = soft_offline_in_use_page(page);
2590 	} else if (ret == 0) {
2591 		if (!page_handle_poison(page, true, false) && try_again) {
2592 			try_again = false;
2593 			flags &= ~MF_COUNT_INCREASED;
2594 			goto retry;
2595 		}
2596 	}
2597 
2598 	mutex_unlock(&mf_mutex);
2599 
2600 	return ret;
2601 }
2602 
2603 void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
2604 {
2605 	int i, total = 0;
2606 
2607 	/*
2608 	 * A further optimization is to have per section refcounted
2609 	 * num_poisoned_pages.  But that would need more space per memmap, so
2610 	 * for now just do a quick global check to speed up this routine in the
2611 	 * absence of bad pages.
2612 	 */
2613 	if (atomic_long_read(&num_poisoned_pages) == 0)
2614 		return;
2615 
2616 	for (i = 0; i < nr_pages; i++) {
2617 		if (PageHWPoison(&memmap[i])) {
2618 			total++;
2619 			ClearPageHWPoison(&memmap[i]);
2620 		}
2621 	}
2622 	if (total)
2623 		num_poisoned_pages_sub(total);
2624 }
2625