xref: /linux/mm/memory-failure.c (revision 9abd613a85af72fa560e49d9a0acc5b872840c72)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008, 2009 Intel Corporation
4  * Authors: Andi Kleen, Fengguang Wu
5  *
6  * High level machine check handler. Handles pages reported by the
7  * hardware as being corrupted usually due to a multi-bit ECC memory or cache
8  * failure.
9  *
10  * In addition there is a "soft offline" entry point that allows stop using
11  * not-yet-corrupted-by-suspicious pages without killing anything.
12  *
13  * Handles page cache pages in various states.	The tricky part
14  * here is that we can access any page asynchronously in respect to
15  * other VM users, because memory failures could happen anytime and
16  * anywhere. This could violate some of their assumptions. This is why
17  * this code has to be extremely careful. Generally it tries to use
18  * normal locking rules, as in get the standard locks, even if that means
19  * the error handling takes potentially a long time.
20  *
21  * It can be very tempting to add handling for obscure cases here.
22  * In general any code for handling new cases should only be added iff:
23  * - You know how to test it.
24  * - You have a test that can be added to mce-test
25  *   https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26  * - The case actually shows up as a frequent (top 10) page state in
27  *   tools/mm/page-types when running a real workload.
28  *
29  * There are several operations here with exponential complexity because
30  * of unsuitable VM data structures. For example the operation to map back
31  * from RMAP chains to processes has to walk the complete process list and
32  * has non linear complexity with the number. But since memory corruptions
33  * are rare we hope to get away with this. This avoids impacting the core
34  * VM.
35  */
36 
37 #define pr_fmt(fmt) "Memory failure: " fmt
38 
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/page-flags.h>
42 #include <linux/sched/signal.h>
43 #include <linux/sched/task.h>
44 #include <linux/dax.h>
45 #include <linux/ksm.h>
46 #include <linux/rmap.h>
47 #include <linux/export.h>
48 #include <linux/pagemap.h>
49 #include <linux/swap.h>
50 #include <linux/backing-dev.h>
51 #include <linux/migrate.h>
52 #include <linux/slab.h>
53 #include <linux/swapops.h>
54 #include <linux/hugetlb.h>
55 #include <linux/memory_hotplug.h>
56 #include <linux/mm_inline.h>
57 #include <linux/memremap.h>
58 #include <linux/kfifo.h>
59 #include <linux/ratelimit.h>
60 #include <linux/pagewalk.h>
61 #include <linux/shmem_fs.h>
62 #include <linux/sysctl.h>
63 #include "swap.h"
64 #include "internal.h"
65 #include "ras/ras_event.h"
66 
67 static int sysctl_memory_failure_early_kill __read_mostly;
68 
69 static int sysctl_memory_failure_recovery __read_mostly = 1;
70 
71 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
72 
73 static bool hw_memory_failure __read_mostly = false;
74 
75 static DEFINE_MUTEX(mf_mutex);
76 
77 void num_poisoned_pages_inc(unsigned long pfn)
78 {
79 	atomic_long_inc(&num_poisoned_pages);
80 	memblk_nr_poison_inc(pfn);
81 }
82 
83 void num_poisoned_pages_sub(unsigned long pfn, long i)
84 {
85 	atomic_long_sub(i, &num_poisoned_pages);
86 	if (pfn != -1UL)
87 		memblk_nr_poison_sub(pfn, i);
88 }
89 
90 /**
91  * MF_ATTR_RO - Create sysfs entry for each memory failure statistics.
92  * @_name: name of the file in the per NUMA sysfs directory.
93  */
94 #define MF_ATTR_RO(_name)					\
95 static ssize_t _name##_show(struct device *dev,			\
96 			    struct device_attribute *attr,	\
97 			    char *buf)				\
98 {								\
99 	struct memory_failure_stats *mf_stats =			\
100 		&NODE_DATA(dev->id)->mf_stats;			\
101 	return sprintf(buf, "%lu\n", mf_stats->_name);		\
102 }								\
103 static DEVICE_ATTR_RO(_name)
104 
105 MF_ATTR_RO(total);
106 MF_ATTR_RO(ignored);
107 MF_ATTR_RO(failed);
108 MF_ATTR_RO(delayed);
109 MF_ATTR_RO(recovered);
110 
111 static struct attribute *memory_failure_attr[] = {
112 	&dev_attr_total.attr,
113 	&dev_attr_ignored.attr,
114 	&dev_attr_failed.attr,
115 	&dev_attr_delayed.attr,
116 	&dev_attr_recovered.attr,
117 	NULL,
118 };
119 
120 const struct attribute_group memory_failure_attr_group = {
121 	.name = "memory_failure",
122 	.attrs = memory_failure_attr,
123 };
124 
125 static struct ctl_table memory_failure_table[] = {
126 	{
127 		.procname	= "memory_failure_early_kill",
128 		.data		= &sysctl_memory_failure_early_kill,
129 		.maxlen		= sizeof(sysctl_memory_failure_early_kill),
130 		.mode		= 0644,
131 		.proc_handler	= proc_dointvec_minmax,
132 		.extra1		= SYSCTL_ZERO,
133 		.extra2		= SYSCTL_ONE,
134 	},
135 	{
136 		.procname	= "memory_failure_recovery",
137 		.data		= &sysctl_memory_failure_recovery,
138 		.maxlen		= sizeof(sysctl_memory_failure_recovery),
139 		.mode		= 0644,
140 		.proc_handler	= proc_dointvec_minmax,
141 		.extra1		= SYSCTL_ZERO,
142 		.extra2		= SYSCTL_ONE,
143 	},
144 	{ }
145 };
146 
147 /*
148  * Return values:
149  *   1:   the page is dissolved (if needed) and taken off from buddy,
150  *   0:   the page is dissolved (if needed) and not taken off from buddy,
151  *   < 0: failed to dissolve.
152  */
153 static int __page_handle_poison(struct page *page)
154 {
155 	int ret;
156 
157 	zone_pcp_disable(page_zone(page));
158 	ret = dissolve_free_huge_page(page);
159 	if (!ret)
160 		ret = take_page_off_buddy(page);
161 	zone_pcp_enable(page_zone(page));
162 
163 	return ret;
164 }
165 
166 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
167 {
168 	if (hugepage_or_freepage) {
169 		/*
170 		 * Doing this check for free pages is also fine since dissolve_free_huge_page
171 		 * returns 0 for non-hugetlb pages as well.
172 		 */
173 		if (__page_handle_poison(page) <= 0)
174 			/*
175 			 * We could fail to take off the target page from buddy
176 			 * for example due to racy page allocation, but that's
177 			 * acceptable because soft-offlined page is not broken
178 			 * and if someone really want to use it, they should
179 			 * take it.
180 			 */
181 			return false;
182 	}
183 
184 	SetPageHWPoison(page);
185 	if (release)
186 		put_page(page);
187 	page_ref_inc(page);
188 	num_poisoned_pages_inc(page_to_pfn(page));
189 
190 	return true;
191 }
192 
193 #if IS_ENABLED(CONFIG_HWPOISON_INJECT)
194 
195 u32 hwpoison_filter_enable = 0;
196 u32 hwpoison_filter_dev_major = ~0U;
197 u32 hwpoison_filter_dev_minor = ~0U;
198 u64 hwpoison_filter_flags_mask;
199 u64 hwpoison_filter_flags_value;
200 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
201 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
202 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
203 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
204 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
205 
206 static int hwpoison_filter_dev(struct page *p)
207 {
208 	struct address_space *mapping;
209 	dev_t dev;
210 
211 	if (hwpoison_filter_dev_major == ~0U &&
212 	    hwpoison_filter_dev_minor == ~0U)
213 		return 0;
214 
215 	mapping = page_mapping(p);
216 	if (mapping == NULL || mapping->host == NULL)
217 		return -EINVAL;
218 
219 	dev = mapping->host->i_sb->s_dev;
220 	if (hwpoison_filter_dev_major != ~0U &&
221 	    hwpoison_filter_dev_major != MAJOR(dev))
222 		return -EINVAL;
223 	if (hwpoison_filter_dev_minor != ~0U &&
224 	    hwpoison_filter_dev_minor != MINOR(dev))
225 		return -EINVAL;
226 
227 	return 0;
228 }
229 
230 static int hwpoison_filter_flags(struct page *p)
231 {
232 	if (!hwpoison_filter_flags_mask)
233 		return 0;
234 
235 	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
236 				    hwpoison_filter_flags_value)
237 		return 0;
238 	else
239 		return -EINVAL;
240 }
241 
242 /*
243  * This allows stress tests to limit test scope to a collection of tasks
244  * by putting them under some memcg. This prevents killing unrelated/important
245  * processes such as /sbin/init. Note that the target task may share clean
246  * pages with init (eg. libc text), which is harmless. If the target task
247  * share _dirty_ pages with another task B, the test scheme must make sure B
248  * is also included in the memcg. At last, due to race conditions this filter
249  * can only guarantee that the page either belongs to the memcg tasks, or is
250  * a freed page.
251  */
252 #ifdef CONFIG_MEMCG
253 u64 hwpoison_filter_memcg;
254 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
255 static int hwpoison_filter_task(struct page *p)
256 {
257 	if (!hwpoison_filter_memcg)
258 		return 0;
259 
260 	if (page_cgroup_ino(p) != hwpoison_filter_memcg)
261 		return -EINVAL;
262 
263 	return 0;
264 }
265 #else
266 static int hwpoison_filter_task(struct page *p) { return 0; }
267 #endif
268 
269 int hwpoison_filter(struct page *p)
270 {
271 	if (!hwpoison_filter_enable)
272 		return 0;
273 
274 	if (hwpoison_filter_dev(p))
275 		return -EINVAL;
276 
277 	if (hwpoison_filter_flags(p))
278 		return -EINVAL;
279 
280 	if (hwpoison_filter_task(p))
281 		return -EINVAL;
282 
283 	return 0;
284 }
285 #else
286 int hwpoison_filter(struct page *p)
287 {
288 	return 0;
289 }
290 #endif
291 
292 EXPORT_SYMBOL_GPL(hwpoison_filter);
293 
294 /*
295  * Kill all processes that have a poisoned page mapped and then isolate
296  * the page.
297  *
298  * General strategy:
299  * Find all processes having the page mapped and kill them.
300  * But we keep a page reference around so that the page is not
301  * actually freed yet.
302  * Then stash the page away
303  *
304  * There's no convenient way to get back to mapped processes
305  * from the VMAs. So do a brute-force search over all
306  * running processes.
307  *
308  * Remember that machine checks are not common (or rather
309  * if they are common you have other problems), so this shouldn't
310  * be a performance issue.
311  *
312  * Also there are some races possible while we get from the
313  * error detection to actually handle it.
314  */
315 
316 struct to_kill {
317 	struct list_head nd;
318 	struct task_struct *tsk;
319 	unsigned long addr;
320 	short size_shift;
321 };
322 
323 /*
324  * Send all the processes who have the page mapped a signal.
325  * ``action optional'' if they are not immediately affected by the error
326  * ``action required'' if error happened in current execution context
327  */
328 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
329 {
330 	struct task_struct *t = tk->tsk;
331 	short addr_lsb = tk->size_shift;
332 	int ret = 0;
333 
334 	pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
335 			pfn, t->comm, t->pid);
336 
337 	if ((flags & MF_ACTION_REQUIRED) && (t == current))
338 		ret = force_sig_mceerr(BUS_MCEERR_AR,
339 				 (void __user *)tk->addr, addr_lsb);
340 	else
341 		/*
342 		 * Signal other processes sharing the page if they have
343 		 * PF_MCE_EARLY set.
344 		 * Don't use force here, it's convenient if the signal
345 		 * can be temporarily blocked.
346 		 * This could cause a loop when the user sets SIGBUS
347 		 * to SIG_IGN, but hopefully no one will do that?
348 		 */
349 		ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
350 				      addr_lsb, t);
351 	if (ret < 0)
352 		pr_info("Error sending signal to %s:%d: %d\n",
353 			t->comm, t->pid, ret);
354 	return ret;
355 }
356 
357 /*
358  * Unknown page type encountered. Try to check whether it can turn PageLRU by
359  * lru_add_drain_all.
360  */
361 void shake_page(struct page *p)
362 {
363 	if (PageHuge(p))
364 		return;
365 	/*
366 	 * TODO: Could shrink slab caches here if a lightweight range-based
367 	 * shrinker will be available.
368 	 */
369 	if (PageSlab(p))
370 		return;
371 
372 	lru_add_drain_all();
373 }
374 EXPORT_SYMBOL_GPL(shake_page);
375 
376 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
377 		unsigned long address)
378 {
379 	unsigned long ret = 0;
380 	pgd_t *pgd;
381 	p4d_t *p4d;
382 	pud_t *pud;
383 	pmd_t *pmd;
384 	pte_t *pte;
385 	pte_t ptent;
386 
387 	VM_BUG_ON_VMA(address == -EFAULT, vma);
388 	pgd = pgd_offset(vma->vm_mm, address);
389 	if (!pgd_present(*pgd))
390 		return 0;
391 	p4d = p4d_offset(pgd, address);
392 	if (!p4d_present(*p4d))
393 		return 0;
394 	pud = pud_offset(p4d, address);
395 	if (!pud_present(*pud))
396 		return 0;
397 	if (pud_devmap(*pud))
398 		return PUD_SHIFT;
399 	pmd = pmd_offset(pud, address);
400 	if (!pmd_present(*pmd))
401 		return 0;
402 	if (pmd_devmap(*pmd))
403 		return PMD_SHIFT;
404 	pte = pte_offset_map(pmd, address);
405 	if (!pte)
406 		return 0;
407 	ptent = ptep_get(pte);
408 	if (pte_present(ptent) && pte_devmap(ptent))
409 		ret = PAGE_SHIFT;
410 	pte_unmap(pte);
411 	return ret;
412 }
413 
414 /*
415  * Failure handling: if we can't find or can't kill a process there's
416  * not much we can do.	We just print a message and ignore otherwise.
417  */
418 
419 #define FSDAX_INVALID_PGOFF ULONG_MAX
420 
421 /*
422  * Schedule a process for later kill.
423  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
424  *
425  * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
426  * filesystem with a memory failure handler has claimed the
427  * memory_failure event. In all other cases, page->index and
428  * page->mapping are sufficient for mapping the page back to its
429  * corresponding user virtual address.
430  */
431 static void __add_to_kill(struct task_struct *tsk, struct page *p,
432 			  struct vm_area_struct *vma, struct list_head *to_kill,
433 			  unsigned long ksm_addr, pgoff_t fsdax_pgoff)
434 {
435 	struct to_kill *tk;
436 
437 	tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
438 	if (!tk) {
439 		pr_err("Out of memory while machine check handling\n");
440 		return;
441 	}
442 
443 	tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
444 	if (is_zone_device_page(p)) {
445 		if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
446 			tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
447 		tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
448 	} else
449 		tk->size_shift = page_shift(compound_head(p));
450 
451 	/*
452 	 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
453 	 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
454 	 * so "tk->size_shift == 0" effectively checks no mapping on
455 	 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
456 	 * to a process' address space, it's possible not all N VMAs
457 	 * contain mappings for the page, but at least one VMA does.
458 	 * Only deliver SIGBUS with payload derived from the VMA that
459 	 * has a mapping for the page.
460 	 */
461 	if (tk->addr == -EFAULT) {
462 		pr_info("Unable to find user space address %lx in %s\n",
463 			page_to_pfn(p), tsk->comm);
464 	} else if (tk->size_shift == 0) {
465 		kfree(tk);
466 		return;
467 	}
468 
469 	get_task_struct(tsk);
470 	tk->tsk = tsk;
471 	list_add_tail(&tk->nd, to_kill);
472 }
473 
474 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
475 				  struct vm_area_struct *vma,
476 				  struct list_head *to_kill)
477 {
478 	__add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF);
479 }
480 
481 #ifdef CONFIG_KSM
482 static bool task_in_to_kill_list(struct list_head *to_kill,
483 				 struct task_struct *tsk)
484 {
485 	struct to_kill *tk, *next;
486 
487 	list_for_each_entry_safe(tk, next, to_kill, nd) {
488 		if (tk->tsk == tsk)
489 			return true;
490 	}
491 
492 	return false;
493 }
494 void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
495 		     struct vm_area_struct *vma, struct list_head *to_kill,
496 		     unsigned long ksm_addr)
497 {
498 	if (!task_in_to_kill_list(to_kill, tsk))
499 		__add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF);
500 }
501 #endif
502 /*
503  * Kill the processes that have been collected earlier.
504  *
505  * Only do anything when FORCEKILL is set, otherwise just free the
506  * list (this is used for clean pages which do not need killing)
507  * Also when FAIL is set do a force kill because something went
508  * wrong earlier.
509  */
510 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
511 		unsigned long pfn, int flags)
512 {
513 	struct to_kill *tk, *next;
514 
515 	list_for_each_entry_safe(tk, next, to_kill, nd) {
516 		if (forcekill) {
517 			/*
518 			 * In case something went wrong with munmapping
519 			 * make sure the process doesn't catch the
520 			 * signal and then access the memory. Just kill it.
521 			 */
522 			if (fail || tk->addr == -EFAULT) {
523 				pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
524 				       pfn, tk->tsk->comm, tk->tsk->pid);
525 				do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
526 						 tk->tsk, PIDTYPE_PID);
527 			}
528 
529 			/*
530 			 * In theory the process could have mapped
531 			 * something else on the address in-between. We could
532 			 * check for that, but we need to tell the
533 			 * process anyways.
534 			 */
535 			else if (kill_proc(tk, pfn, flags) < 0)
536 				pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
537 				       pfn, tk->tsk->comm, tk->tsk->pid);
538 		}
539 		list_del(&tk->nd);
540 		put_task_struct(tk->tsk);
541 		kfree(tk);
542 	}
543 }
544 
545 /*
546  * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
547  * on behalf of the thread group. Return task_struct of the (first found)
548  * dedicated thread if found, and return NULL otherwise.
549  *
550  * We already hold rcu lock in the caller, so we don't have to call
551  * rcu_read_lock/unlock() in this function.
552  */
553 static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
554 {
555 	struct task_struct *t;
556 
557 	for_each_thread(tsk, t) {
558 		if (t->flags & PF_MCE_PROCESS) {
559 			if (t->flags & PF_MCE_EARLY)
560 				return t;
561 		} else {
562 			if (sysctl_memory_failure_early_kill)
563 				return t;
564 		}
565 	}
566 	return NULL;
567 }
568 
569 /*
570  * Determine whether a given process is "early kill" process which expects
571  * to be signaled when some page under the process is hwpoisoned.
572  * Return task_struct of the dedicated thread (main thread unless explicitly
573  * specified) if the process is "early kill" and otherwise returns NULL.
574  *
575  * Note that the above is true for Action Optional case. For Action Required
576  * case, it's only meaningful to the current thread which need to be signaled
577  * with SIGBUS, this error is Action Optional for other non current
578  * processes sharing the same error page,if the process is "early kill", the
579  * task_struct of the dedicated thread will also be returned.
580  */
581 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
582 {
583 	if (!tsk->mm)
584 		return NULL;
585 	/*
586 	 * Comparing ->mm here because current task might represent
587 	 * a subthread, while tsk always points to the main thread.
588 	 */
589 	if (force_early && tsk->mm == current->mm)
590 		return current;
591 
592 	return find_early_kill_thread(tsk);
593 }
594 
595 /*
596  * Collect processes when the error hit an anonymous page.
597  */
598 static void collect_procs_anon(struct folio *folio, struct page *page,
599 		struct list_head *to_kill, int force_early)
600 {
601 	struct vm_area_struct *vma;
602 	struct task_struct *tsk;
603 	struct anon_vma *av;
604 	pgoff_t pgoff;
605 
606 	av = folio_lock_anon_vma_read(folio, NULL);
607 	if (av == NULL)	/* Not actually mapped anymore */
608 		return;
609 
610 	pgoff = page_to_pgoff(page);
611 	rcu_read_lock();
612 	for_each_process(tsk) {
613 		struct anon_vma_chain *vmac;
614 		struct task_struct *t = task_early_kill(tsk, force_early);
615 
616 		if (!t)
617 			continue;
618 		anon_vma_interval_tree_foreach(vmac, &av->rb_root,
619 					       pgoff, pgoff) {
620 			vma = vmac->vma;
621 			if (vma->vm_mm != t->mm)
622 				continue;
623 			if (!page_mapped_in_vma(page, vma))
624 				continue;
625 			add_to_kill_anon_file(t, page, vma, to_kill);
626 		}
627 	}
628 	rcu_read_unlock();
629 	anon_vma_unlock_read(av);
630 }
631 
632 /*
633  * Collect processes when the error hit a file mapped page.
634  */
635 static void collect_procs_file(struct folio *folio, struct page *page,
636 		struct list_head *to_kill, int force_early)
637 {
638 	struct vm_area_struct *vma;
639 	struct task_struct *tsk;
640 	struct address_space *mapping = folio->mapping;
641 	pgoff_t pgoff;
642 
643 	i_mmap_lock_read(mapping);
644 	rcu_read_lock();
645 	pgoff = page_to_pgoff(page);
646 	for_each_process(tsk) {
647 		struct task_struct *t = task_early_kill(tsk, force_early);
648 
649 		if (!t)
650 			continue;
651 		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
652 				      pgoff) {
653 			/*
654 			 * Send early kill signal to tasks where a vma covers
655 			 * the page but the corrupted page is not necessarily
656 			 * mapped in its pte.
657 			 * Assume applications who requested early kill want
658 			 * to be informed of all such data corruptions.
659 			 */
660 			if (vma->vm_mm == t->mm)
661 				add_to_kill_anon_file(t, page, vma, to_kill);
662 		}
663 	}
664 	rcu_read_unlock();
665 	i_mmap_unlock_read(mapping);
666 }
667 
668 #ifdef CONFIG_FS_DAX
669 static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
670 			      struct vm_area_struct *vma,
671 			      struct list_head *to_kill, pgoff_t pgoff)
672 {
673 	__add_to_kill(tsk, p, vma, to_kill, 0, pgoff);
674 }
675 
676 /*
677  * Collect processes when the error hit a fsdax page.
678  */
679 static void collect_procs_fsdax(struct page *page,
680 		struct address_space *mapping, pgoff_t pgoff,
681 		struct list_head *to_kill, bool pre_remove)
682 {
683 	struct vm_area_struct *vma;
684 	struct task_struct *tsk;
685 
686 	i_mmap_lock_read(mapping);
687 	rcu_read_lock();
688 	for_each_process(tsk) {
689 		struct task_struct *t = tsk;
690 
691 		/*
692 		 * Search for all tasks while MF_MEM_PRE_REMOVE is set, because
693 		 * the current may not be the one accessing the fsdax page.
694 		 * Otherwise, search for the current task.
695 		 */
696 		if (!pre_remove)
697 			t = task_early_kill(tsk, true);
698 		if (!t)
699 			continue;
700 		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
701 			if (vma->vm_mm == t->mm)
702 				add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
703 		}
704 	}
705 	rcu_read_unlock();
706 	i_mmap_unlock_read(mapping);
707 }
708 #endif /* CONFIG_FS_DAX */
709 
710 /*
711  * Collect the processes who have the corrupted page mapped to kill.
712  */
713 static void collect_procs(struct folio *folio, struct page *page,
714 		struct list_head *tokill, int force_early)
715 {
716 	if (!folio->mapping)
717 		return;
718 	if (unlikely(PageKsm(page)))
719 		collect_procs_ksm(page, tokill, force_early);
720 	else if (PageAnon(page))
721 		collect_procs_anon(folio, page, tokill, force_early);
722 	else
723 		collect_procs_file(folio, page, tokill, force_early);
724 }
725 
726 struct hwpoison_walk {
727 	struct to_kill tk;
728 	unsigned long pfn;
729 	int flags;
730 };
731 
732 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
733 {
734 	tk->addr = addr;
735 	tk->size_shift = shift;
736 }
737 
738 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
739 				unsigned long poisoned_pfn, struct to_kill *tk)
740 {
741 	unsigned long pfn = 0;
742 
743 	if (pte_present(pte)) {
744 		pfn = pte_pfn(pte);
745 	} else {
746 		swp_entry_t swp = pte_to_swp_entry(pte);
747 
748 		if (is_hwpoison_entry(swp))
749 			pfn = swp_offset_pfn(swp);
750 	}
751 
752 	if (!pfn || pfn != poisoned_pfn)
753 		return 0;
754 
755 	set_to_kill(tk, addr, shift);
756 	return 1;
757 }
758 
759 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
760 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
761 				      struct hwpoison_walk *hwp)
762 {
763 	pmd_t pmd = *pmdp;
764 	unsigned long pfn;
765 	unsigned long hwpoison_vaddr;
766 
767 	if (!pmd_present(pmd))
768 		return 0;
769 	pfn = pmd_pfn(pmd);
770 	if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
771 		hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
772 		set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
773 		return 1;
774 	}
775 	return 0;
776 }
777 #else
778 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
779 				      struct hwpoison_walk *hwp)
780 {
781 	return 0;
782 }
783 #endif
784 
785 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
786 			      unsigned long end, struct mm_walk *walk)
787 {
788 	struct hwpoison_walk *hwp = walk->private;
789 	int ret = 0;
790 	pte_t *ptep, *mapped_pte;
791 	spinlock_t *ptl;
792 
793 	ptl = pmd_trans_huge_lock(pmdp, walk->vma);
794 	if (ptl) {
795 		ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
796 		spin_unlock(ptl);
797 		goto out;
798 	}
799 
800 	mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
801 						addr, &ptl);
802 	if (!ptep)
803 		goto out;
804 
805 	for (; addr != end; ptep++, addr += PAGE_SIZE) {
806 		ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT,
807 					     hwp->pfn, &hwp->tk);
808 		if (ret == 1)
809 			break;
810 	}
811 	pte_unmap_unlock(mapped_pte, ptl);
812 out:
813 	cond_resched();
814 	return ret;
815 }
816 
817 #ifdef CONFIG_HUGETLB_PAGE
818 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
819 			    unsigned long addr, unsigned long end,
820 			    struct mm_walk *walk)
821 {
822 	struct hwpoison_walk *hwp = walk->private;
823 	pte_t pte = huge_ptep_get(ptep);
824 	struct hstate *h = hstate_vma(walk->vma);
825 
826 	return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
827 				      hwp->pfn, &hwp->tk);
828 }
829 #else
830 #define hwpoison_hugetlb_range	NULL
831 #endif
832 
833 static const struct mm_walk_ops hwpoison_walk_ops = {
834 	.pmd_entry = hwpoison_pte_range,
835 	.hugetlb_entry = hwpoison_hugetlb_range,
836 	.walk_lock = PGWALK_RDLOCK,
837 };
838 
839 /*
840  * Sends SIGBUS to the current process with error info.
841  *
842  * This function is intended to handle "Action Required" MCEs on already
843  * hardware poisoned pages. They could happen, for example, when
844  * memory_failure() failed to unmap the error page at the first call, or
845  * when multiple local machine checks happened on different CPUs.
846  *
847  * MCE handler currently has no easy access to the error virtual address,
848  * so this function walks page table to find it. The returned virtual address
849  * is proper in most cases, but it could be wrong when the application
850  * process has multiple entries mapping the error page.
851  */
852 static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
853 				  int flags)
854 {
855 	int ret;
856 	struct hwpoison_walk priv = {
857 		.pfn = pfn,
858 	};
859 	priv.tk.tsk = p;
860 
861 	if (!p->mm)
862 		return -EFAULT;
863 
864 	mmap_read_lock(p->mm);
865 	ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
866 			      (void *)&priv);
867 	if (ret == 1 && priv.tk.addr)
868 		kill_proc(&priv.tk, pfn, flags);
869 	else
870 		ret = 0;
871 	mmap_read_unlock(p->mm);
872 	return ret > 0 ? -EHWPOISON : -EFAULT;
873 }
874 
875 static const char *action_name[] = {
876 	[MF_IGNORED] = "Ignored",
877 	[MF_FAILED] = "Failed",
878 	[MF_DELAYED] = "Delayed",
879 	[MF_RECOVERED] = "Recovered",
880 };
881 
882 static const char * const action_page_types[] = {
883 	[MF_MSG_KERNEL]			= "reserved kernel page",
884 	[MF_MSG_KERNEL_HIGH_ORDER]	= "high-order kernel page",
885 	[MF_MSG_SLAB]			= "kernel slab page",
886 	[MF_MSG_DIFFERENT_COMPOUND]	= "different compound page after locking",
887 	[MF_MSG_HUGE]			= "huge page",
888 	[MF_MSG_FREE_HUGE]		= "free huge page",
889 	[MF_MSG_UNMAP_FAILED]		= "unmapping failed page",
890 	[MF_MSG_DIRTY_SWAPCACHE]	= "dirty swapcache page",
891 	[MF_MSG_CLEAN_SWAPCACHE]	= "clean swapcache page",
892 	[MF_MSG_DIRTY_MLOCKED_LRU]	= "dirty mlocked LRU page",
893 	[MF_MSG_CLEAN_MLOCKED_LRU]	= "clean mlocked LRU page",
894 	[MF_MSG_DIRTY_UNEVICTABLE_LRU]	= "dirty unevictable LRU page",
895 	[MF_MSG_CLEAN_UNEVICTABLE_LRU]	= "clean unevictable LRU page",
896 	[MF_MSG_DIRTY_LRU]		= "dirty LRU page",
897 	[MF_MSG_CLEAN_LRU]		= "clean LRU page",
898 	[MF_MSG_TRUNCATED_LRU]		= "already truncated LRU page",
899 	[MF_MSG_BUDDY]			= "free buddy page",
900 	[MF_MSG_DAX]			= "dax page",
901 	[MF_MSG_UNSPLIT_THP]		= "unsplit thp",
902 	[MF_MSG_UNKNOWN]		= "unknown page",
903 };
904 
905 /*
906  * XXX: It is possible that a page is isolated from LRU cache,
907  * and then kept in swap cache or failed to remove from page cache.
908  * The page count will stop it from being freed by unpoison.
909  * Stress tests should be aware of this memory leak problem.
910  */
911 static int delete_from_lru_cache(struct folio *folio)
912 {
913 	if (folio_isolate_lru(folio)) {
914 		/*
915 		 * Clear sensible page flags, so that the buddy system won't
916 		 * complain when the folio is unpoison-and-freed.
917 		 */
918 		folio_clear_active(folio);
919 		folio_clear_unevictable(folio);
920 
921 		/*
922 		 * Poisoned page might never drop its ref count to 0 so we have
923 		 * to uncharge it manually from its memcg.
924 		 */
925 		mem_cgroup_uncharge(folio);
926 
927 		/*
928 		 * drop the refcount elevated by folio_isolate_lru()
929 		 */
930 		folio_put(folio);
931 		return 0;
932 	}
933 	return -EIO;
934 }
935 
936 static int truncate_error_folio(struct folio *folio, unsigned long pfn,
937 				struct address_space *mapping)
938 {
939 	int ret = MF_FAILED;
940 
941 	if (mapping->a_ops->error_remove_folio) {
942 		int err = mapping->a_ops->error_remove_folio(mapping, folio);
943 
944 		if (err != 0)
945 			pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
946 		else if (!filemap_release_folio(folio, GFP_NOIO))
947 			pr_info("%#lx: failed to release buffers\n", pfn);
948 		else
949 			ret = MF_RECOVERED;
950 	} else {
951 		/*
952 		 * If the file system doesn't support it just invalidate
953 		 * This fails on dirty or anything with private pages
954 		 */
955 		if (mapping_evict_folio(mapping, folio))
956 			ret = MF_RECOVERED;
957 		else
958 			pr_info("%#lx: Failed to invalidate\n",	pfn);
959 	}
960 
961 	return ret;
962 }
963 
964 struct page_state {
965 	unsigned long mask;
966 	unsigned long res;
967 	enum mf_action_page_type type;
968 
969 	/* Callback ->action() has to unlock the relevant page inside it. */
970 	int (*action)(struct page_state *ps, struct page *p);
971 };
972 
973 /*
974  * Return true if page is still referenced by others, otherwise return
975  * false.
976  *
977  * The extra_pins is true when one extra refcount is expected.
978  */
979 static bool has_extra_refcount(struct page_state *ps, struct page *p,
980 			       bool extra_pins)
981 {
982 	int count = page_count(p) - 1;
983 
984 	if (extra_pins)
985 		count -= folio_nr_pages(page_folio(p));
986 
987 	if (count > 0) {
988 		pr_err("%#lx: %s still referenced by %d users\n",
989 		       page_to_pfn(p), action_page_types[ps->type], count);
990 		return true;
991 	}
992 
993 	return false;
994 }
995 
996 /*
997  * Error hit kernel page.
998  * Do nothing, try to be lucky and not touch this instead. For a few cases we
999  * could be more sophisticated.
1000  */
1001 static int me_kernel(struct page_state *ps, struct page *p)
1002 {
1003 	unlock_page(p);
1004 	return MF_IGNORED;
1005 }
1006 
1007 /*
1008  * Page in unknown state. Do nothing.
1009  */
1010 static int me_unknown(struct page_state *ps, struct page *p)
1011 {
1012 	pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
1013 	unlock_page(p);
1014 	return MF_FAILED;
1015 }
1016 
1017 /*
1018  * Clean (or cleaned) page cache page.
1019  */
1020 static int me_pagecache_clean(struct page_state *ps, struct page *p)
1021 {
1022 	struct folio *folio = page_folio(p);
1023 	int ret;
1024 	struct address_space *mapping;
1025 	bool extra_pins;
1026 
1027 	delete_from_lru_cache(folio);
1028 
1029 	/*
1030 	 * For anonymous folios the only reference left
1031 	 * should be the one m_f() holds.
1032 	 */
1033 	if (folio_test_anon(folio)) {
1034 		ret = MF_RECOVERED;
1035 		goto out;
1036 	}
1037 
1038 	/*
1039 	 * Now truncate the page in the page cache. This is really
1040 	 * more like a "temporary hole punch"
1041 	 * Don't do this for block devices when someone else
1042 	 * has a reference, because it could be file system metadata
1043 	 * and that's not safe to truncate.
1044 	 */
1045 	mapping = folio_mapping(folio);
1046 	if (!mapping) {
1047 		/* Folio has been torn down in the meantime */
1048 		ret = MF_FAILED;
1049 		goto out;
1050 	}
1051 
1052 	/*
1053 	 * The shmem page is kept in page cache instead of truncating
1054 	 * so is expected to have an extra refcount after error-handling.
1055 	 */
1056 	extra_pins = shmem_mapping(mapping);
1057 
1058 	/*
1059 	 * Truncation is a bit tricky. Enable it per file system for now.
1060 	 *
1061 	 * Open: to take i_rwsem or not for this? Right now we don't.
1062 	 */
1063 	ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
1064 	if (has_extra_refcount(ps, p, extra_pins))
1065 		ret = MF_FAILED;
1066 
1067 out:
1068 	folio_unlock(folio);
1069 
1070 	return ret;
1071 }
1072 
1073 /*
1074  * Dirty pagecache page
1075  * Issues: when the error hit a hole page the error is not properly
1076  * propagated.
1077  */
1078 static int me_pagecache_dirty(struct page_state *ps, struct page *p)
1079 {
1080 	struct address_space *mapping = page_mapping(p);
1081 
1082 	SetPageError(p);
1083 	/* TBD: print more information about the file. */
1084 	if (mapping) {
1085 		/*
1086 		 * IO error will be reported by write(), fsync(), etc.
1087 		 * who check the mapping.
1088 		 * This way the application knows that something went
1089 		 * wrong with its dirty file data.
1090 		 *
1091 		 * There's one open issue:
1092 		 *
1093 		 * The EIO will be only reported on the next IO
1094 		 * operation and then cleared through the IO map.
1095 		 * Normally Linux has two mechanisms to pass IO error
1096 		 * first through the AS_EIO flag in the address space
1097 		 * and then through the PageError flag in the page.
1098 		 * Since we drop pages on memory failure handling the
1099 		 * only mechanism open to use is through AS_AIO.
1100 		 *
1101 		 * This has the disadvantage that it gets cleared on
1102 		 * the first operation that returns an error, while
1103 		 * the PageError bit is more sticky and only cleared
1104 		 * when the page is reread or dropped.  If an
1105 		 * application assumes it will always get error on
1106 		 * fsync, but does other operations on the fd before
1107 		 * and the page is dropped between then the error
1108 		 * will not be properly reported.
1109 		 *
1110 		 * This can already happen even without hwpoisoned
1111 		 * pages: first on metadata IO errors (which only
1112 		 * report through AS_EIO) or when the page is dropped
1113 		 * at the wrong time.
1114 		 *
1115 		 * So right now we assume that the application DTRT on
1116 		 * the first EIO, but we're not worse than other parts
1117 		 * of the kernel.
1118 		 */
1119 		mapping_set_error(mapping, -EIO);
1120 	}
1121 
1122 	return me_pagecache_clean(ps, p);
1123 }
1124 
1125 /*
1126  * Clean and dirty swap cache.
1127  *
1128  * Dirty swap cache page is tricky to handle. The page could live both in page
1129  * cache and swap cache(ie. page is freshly swapped in). So it could be
1130  * referenced concurrently by 2 types of PTEs:
1131  * normal PTEs and swap PTEs. We try to handle them consistently by calling
1132  * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
1133  * and then
1134  *      - clear dirty bit to prevent IO
1135  *      - remove from LRU
1136  *      - but keep in the swap cache, so that when we return to it on
1137  *        a later page fault, we know the application is accessing
1138  *        corrupted data and shall be killed (we installed simple
1139  *        interception code in do_swap_page to catch it).
1140  *
1141  * Clean swap cache pages can be directly isolated. A later page fault will
1142  * bring in the known good data from disk.
1143  */
1144 static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1145 {
1146 	struct folio *folio = page_folio(p);
1147 	int ret;
1148 	bool extra_pins = false;
1149 
1150 	folio_clear_dirty(folio);
1151 	/* Trigger EIO in shmem: */
1152 	folio_clear_uptodate(folio);
1153 
1154 	ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
1155 	folio_unlock(folio);
1156 
1157 	if (ret == MF_DELAYED)
1158 		extra_pins = true;
1159 
1160 	if (has_extra_refcount(ps, p, extra_pins))
1161 		ret = MF_FAILED;
1162 
1163 	return ret;
1164 }
1165 
1166 static int me_swapcache_clean(struct page_state *ps, struct page *p)
1167 {
1168 	struct folio *folio = page_folio(p);
1169 	int ret;
1170 
1171 	delete_from_swap_cache(folio);
1172 
1173 	ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
1174 	folio_unlock(folio);
1175 
1176 	if (has_extra_refcount(ps, p, false))
1177 		ret = MF_FAILED;
1178 
1179 	return ret;
1180 }
1181 
1182 /*
1183  * Huge pages. Needs work.
1184  * Issues:
1185  * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1186  *   To narrow down kill region to one page, we need to break up pmd.
1187  */
1188 static int me_huge_page(struct page_state *ps, struct page *p)
1189 {
1190 	struct folio *folio = page_folio(p);
1191 	int res;
1192 	struct address_space *mapping;
1193 	bool extra_pins = false;
1194 
1195 	mapping = folio_mapping(folio);
1196 	if (mapping) {
1197 		res = truncate_error_folio(folio, page_to_pfn(p), mapping);
1198 		/* The page is kept in page cache. */
1199 		extra_pins = true;
1200 		folio_unlock(folio);
1201 	} else {
1202 		folio_unlock(folio);
1203 		/*
1204 		 * migration entry prevents later access on error hugepage,
1205 		 * so we can free and dissolve it into buddy to save healthy
1206 		 * subpages.
1207 		 */
1208 		folio_put(folio);
1209 		if (__page_handle_poison(p) >= 0) {
1210 			page_ref_inc(p);
1211 			res = MF_RECOVERED;
1212 		} else {
1213 			res = MF_FAILED;
1214 		}
1215 	}
1216 
1217 	if (has_extra_refcount(ps, p, extra_pins))
1218 		res = MF_FAILED;
1219 
1220 	return res;
1221 }
1222 
1223 /*
1224  * Various page states we can handle.
1225  *
1226  * A page state is defined by its current page->flags bits.
1227  * The table matches them in order and calls the right handler.
1228  *
1229  * This is quite tricky because we can access page at any time
1230  * in its live cycle, so all accesses have to be extremely careful.
1231  *
1232  * This is not complete. More states could be added.
1233  * For any missing state don't attempt recovery.
1234  */
1235 
1236 #define dirty		(1UL << PG_dirty)
1237 #define sc		((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1238 #define unevict		(1UL << PG_unevictable)
1239 #define mlock		(1UL << PG_mlocked)
1240 #define lru		(1UL << PG_lru)
1241 #define head		(1UL << PG_head)
1242 #define slab		(1UL << PG_slab)
1243 #define reserved	(1UL << PG_reserved)
1244 
1245 static struct page_state error_states[] = {
1246 	{ reserved,	reserved,	MF_MSG_KERNEL,	me_kernel },
1247 	/*
1248 	 * free pages are specially detected outside this table:
1249 	 * PG_buddy pages only make a small fraction of all free pages.
1250 	 */
1251 
1252 	/*
1253 	 * Could in theory check if slab page is free or if we can drop
1254 	 * currently unused objects without touching them. But just
1255 	 * treat it as standard kernel for now.
1256 	 */
1257 	{ slab,		slab,		MF_MSG_SLAB,	me_kernel },
1258 
1259 	{ head,		head,		MF_MSG_HUGE,		me_huge_page },
1260 
1261 	{ sc|dirty,	sc|dirty,	MF_MSG_DIRTY_SWAPCACHE,	me_swapcache_dirty },
1262 	{ sc|dirty,	sc,		MF_MSG_CLEAN_SWAPCACHE,	me_swapcache_clean },
1263 
1264 	{ mlock|dirty,	mlock|dirty,	MF_MSG_DIRTY_MLOCKED_LRU,	me_pagecache_dirty },
1265 	{ mlock|dirty,	mlock,		MF_MSG_CLEAN_MLOCKED_LRU,	me_pagecache_clean },
1266 
1267 	{ unevict|dirty, unevict|dirty,	MF_MSG_DIRTY_UNEVICTABLE_LRU,	me_pagecache_dirty },
1268 	{ unevict|dirty, unevict,	MF_MSG_CLEAN_UNEVICTABLE_LRU,	me_pagecache_clean },
1269 
1270 	{ lru|dirty,	lru|dirty,	MF_MSG_DIRTY_LRU,	me_pagecache_dirty },
1271 	{ lru|dirty,	lru,		MF_MSG_CLEAN_LRU,	me_pagecache_clean },
1272 
1273 	/*
1274 	 * Catchall entry: must be at end.
1275 	 */
1276 	{ 0,		0,		MF_MSG_UNKNOWN,	me_unknown },
1277 };
1278 
1279 #undef dirty
1280 #undef sc
1281 #undef unevict
1282 #undef mlock
1283 #undef lru
1284 #undef head
1285 #undef slab
1286 #undef reserved
1287 
1288 static void update_per_node_mf_stats(unsigned long pfn,
1289 				     enum mf_result result)
1290 {
1291 	int nid = MAX_NUMNODES;
1292 	struct memory_failure_stats *mf_stats = NULL;
1293 
1294 	nid = pfn_to_nid(pfn);
1295 	if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) {
1296 		WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
1297 		return;
1298 	}
1299 
1300 	mf_stats = &NODE_DATA(nid)->mf_stats;
1301 	switch (result) {
1302 	case MF_IGNORED:
1303 		++mf_stats->ignored;
1304 		break;
1305 	case MF_FAILED:
1306 		++mf_stats->failed;
1307 		break;
1308 	case MF_DELAYED:
1309 		++mf_stats->delayed;
1310 		break;
1311 	case MF_RECOVERED:
1312 		++mf_stats->recovered;
1313 		break;
1314 	default:
1315 		WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result);
1316 		break;
1317 	}
1318 	++mf_stats->total;
1319 }
1320 
1321 /*
1322  * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1323  * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1324  */
1325 static int action_result(unsigned long pfn, enum mf_action_page_type type,
1326 			 enum mf_result result)
1327 {
1328 	trace_memory_failure_event(pfn, type, result);
1329 
1330 	num_poisoned_pages_inc(pfn);
1331 
1332 	update_per_node_mf_stats(pfn, result);
1333 
1334 	pr_err("%#lx: recovery action for %s: %s\n",
1335 		pfn, action_page_types[type], action_name[result]);
1336 
1337 	return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1338 }
1339 
1340 static int page_action(struct page_state *ps, struct page *p,
1341 			unsigned long pfn)
1342 {
1343 	int result;
1344 
1345 	/* page p should be unlocked after returning from ps->action().  */
1346 	result = ps->action(ps, p);
1347 
1348 	/* Could do more checks here if page looks ok */
1349 	/*
1350 	 * Could adjust zone counters here to correct for the missing page.
1351 	 */
1352 
1353 	return action_result(pfn, ps->type, result);
1354 }
1355 
1356 static inline bool PageHWPoisonTakenOff(struct page *page)
1357 {
1358 	return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1359 }
1360 
1361 void SetPageHWPoisonTakenOff(struct page *page)
1362 {
1363 	set_page_private(page, MAGIC_HWPOISON);
1364 }
1365 
1366 void ClearPageHWPoisonTakenOff(struct page *page)
1367 {
1368 	if (PageHWPoison(page))
1369 		set_page_private(page, 0);
1370 }
1371 
1372 /*
1373  * Return true if a page type of a given page is supported by hwpoison
1374  * mechanism (while handling could fail), otherwise false.  This function
1375  * does not return true for hugetlb or device memory pages, so it's assumed
1376  * to be called only in the context where we never have such pages.
1377  */
1378 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1379 {
1380 	if (PageSlab(page))
1381 		return false;
1382 
1383 	/* Soft offline could migrate non-LRU movable pages */
1384 	if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1385 		return true;
1386 
1387 	return PageLRU(page) || is_free_buddy_page(page);
1388 }
1389 
1390 static int __get_hwpoison_page(struct page *page, unsigned long flags)
1391 {
1392 	struct folio *folio = page_folio(page);
1393 	int ret = 0;
1394 	bool hugetlb = false;
1395 
1396 	ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false);
1397 	if (hugetlb) {
1398 		/* Make sure hugetlb demotion did not happen from under us. */
1399 		if (folio == page_folio(page))
1400 			return ret;
1401 		if (ret > 0) {
1402 			folio_put(folio);
1403 			folio = page_folio(page);
1404 		}
1405 	}
1406 
1407 	/*
1408 	 * This check prevents from calling folio_try_get() for any
1409 	 * unsupported type of folio in order to reduce the risk of unexpected
1410 	 * races caused by taking a folio refcount.
1411 	 */
1412 	if (!HWPoisonHandlable(&folio->page, flags))
1413 		return -EBUSY;
1414 
1415 	if (folio_try_get(folio)) {
1416 		if (folio == page_folio(page))
1417 			return 1;
1418 
1419 		pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1420 		folio_put(folio);
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static int get_any_page(struct page *p, unsigned long flags)
1427 {
1428 	int ret = 0, pass = 0;
1429 	bool count_increased = false;
1430 
1431 	if (flags & MF_COUNT_INCREASED)
1432 		count_increased = true;
1433 
1434 try_again:
1435 	if (!count_increased) {
1436 		ret = __get_hwpoison_page(p, flags);
1437 		if (!ret) {
1438 			if (page_count(p)) {
1439 				/* We raced with an allocation, retry. */
1440 				if (pass++ < 3)
1441 					goto try_again;
1442 				ret = -EBUSY;
1443 			} else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1444 				/* We raced with put_page, retry. */
1445 				if (pass++ < 3)
1446 					goto try_again;
1447 				ret = -EIO;
1448 			}
1449 			goto out;
1450 		} else if (ret == -EBUSY) {
1451 			/*
1452 			 * We raced with (possibly temporary) unhandlable
1453 			 * page, retry.
1454 			 */
1455 			if (pass++ < 3) {
1456 				shake_page(p);
1457 				goto try_again;
1458 			}
1459 			ret = -EIO;
1460 			goto out;
1461 		}
1462 	}
1463 
1464 	if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1465 		ret = 1;
1466 	} else {
1467 		/*
1468 		 * A page we cannot handle. Check whether we can turn
1469 		 * it into something we can handle.
1470 		 */
1471 		if (pass++ < 3) {
1472 			put_page(p);
1473 			shake_page(p);
1474 			count_increased = false;
1475 			goto try_again;
1476 		}
1477 		put_page(p);
1478 		ret = -EIO;
1479 	}
1480 out:
1481 	if (ret == -EIO)
1482 		pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1483 
1484 	return ret;
1485 }
1486 
1487 static int __get_unpoison_page(struct page *page)
1488 {
1489 	struct folio *folio = page_folio(page);
1490 	int ret = 0;
1491 	bool hugetlb = false;
1492 
1493 	ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true);
1494 	if (hugetlb) {
1495 		/* Make sure hugetlb demotion did not happen from under us. */
1496 		if (folio == page_folio(page))
1497 			return ret;
1498 		if (ret > 0)
1499 			folio_put(folio);
1500 	}
1501 
1502 	/*
1503 	 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1504 	 * but also isolated from buddy freelist, so need to identify the
1505 	 * state and have to cancel both operations to unpoison.
1506 	 */
1507 	if (PageHWPoisonTakenOff(page))
1508 		return -EHWPOISON;
1509 
1510 	return get_page_unless_zero(page) ? 1 : 0;
1511 }
1512 
1513 /**
1514  * get_hwpoison_page() - Get refcount for memory error handling
1515  * @p:		Raw error page (hit by memory error)
1516  * @flags:	Flags controlling behavior of error handling
1517  *
1518  * get_hwpoison_page() takes a page refcount of an error page to handle memory
1519  * error on it, after checking that the error page is in a well-defined state
1520  * (defined as a page-type we can successfully handle the memory error on it,
1521  * such as LRU page and hugetlb page).
1522  *
1523  * Memory error handling could be triggered at any time on any type of page,
1524  * so it's prone to race with typical memory management lifecycle (like
1525  * allocation and free).  So to avoid such races, get_hwpoison_page() takes
1526  * extra care for the error page's state (as done in __get_hwpoison_page()),
1527  * and has some retry logic in get_any_page().
1528  *
1529  * When called from unpoison_memory(), the caller should already ensure that
1530  * the given page has PG_hwpoison. So it's never reused for other page
1531  * allocations, and __get_unpoison_page() never races with them.
1532  *
1533  * Return: 0 on failure,
1534  *         1 on success for in-use pages in a well-defined state,
1535  *         -EIO for pages on which we can not handle memory errors,
1536  *         -EBUSY when get_hwpoison_page() has raced with page lifecycle
1537  *         operations like allocation and free,
1538  *         -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1539  */
1540 static int get_hwpoison_page(struct page *p, unsigned long flags)
1541 {
1542 	int ret;
1543 
1544 	zone_pcp_disable(page_zone(p));
1545 	if (flags & MF_UNPOISON)
1546 		ret = __get_unpoison_page(p);
1547 	else
1548 		ret = get_any_page(p, flags);
1549 	zone_pcp_enable(page_zone(p));
1550 
1551 	return ret;
1552 }
1553 
1554 /*
1555  * Do all that is necessary to remove user space mappings. Unmap
1556  * the pages and send SIGBUS to the processes if the data was dirty.
1557  */
1558 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1559 				  int flags, struct page *hpage)
1560 {
1561 	struct folio *folio = page_folio(hpage);
1562 	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
1563 	struct address_space *mapping;
1564 	LIST_HEAD(tokill);
1565 	bool unmap_success;
1566 	int forcekill;
1567 	bool mlocked = PageMlocked(hpage);
1568 
1569 	/*
1570 	 * Here we are interested only in user-mapped pages, so skip any
1571 	 * other types of pages.
1572 	 */
1573 	if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
1574 		return true;
1575 	if (!(PageLRU(hpage) || PageHuge(p)))
1576 		return true;
1577 
1578 	/*
1579 	 * This check implies we don't kill processes if their pages
1580 	 * are in the swap cache early. Those are always late kills.
1581 	 */
1582 	if (!page_mapped(p))
1583 		return true;
1584 
1585 	if (PageSwapCache(p)) {
1586 		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1587 		ttu &= ~TTU_HWPOISON;
1588 	}
1589 
1590 	/*
1591 	 * Propagate the dirty bit from PTEs to struct page first, because we
1592 	 * need this to decide if we should kill or just drop the page.
1593 	 * XXX: the dirty test could be racy: set_page_dirty() may not always
1594 	 * be called inside page lock (it's recommended but not enforced).
1595 	 */
1596 	mapping = page_mapping(hpage);
1597 	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1598 	    mapping_can_writeback(mapping)) {
1599 		if (page_mkclean(hpage)) {
1600 			SetPageDirty(hpage);
1601 		} else {
1602 			ttu &= ~TTU_HWPOISON;
1603 			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1604 				pfn);
1605 		}
1606 	}
1607 
1608 	/*
1609 	 * First collect all the processes that have the page
1610 	 * mapped in dirty form.  This has to be done before try_to_unmap,
1611 	 * because ttu takes the rmap data structures down.
1612 	 */
1613 	collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
1614 
1615 	if (PageHuge(hpage) && !PageAnon(hpage)) {
1616 		/*
1617 		 * For hugetlb pages in shared mappings, try_to_unmap
1618 		 * could potentially call huge_pmd_unshare.  Because of
1619 		 * this, take semaphore in write mode here and set
1620 		 * TTU_RMAP_LOCKED to indicate we have taken the lock
1621 		 * at this higher level.
1622 		 */
1623 		mapping = hugetlb_page_mapping_lock_write(hpage);
1624 		if (mapping) {
1625 			try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1626 			i_mmap_unlock_write(mapping);
1627 		} else
1628 			pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1629 	} else {
1630 		try_to_unmap(folio, ttu);
1631 	}
1632 
1633 	unmap_success = !page_mapped(p);
1634 	if (!unmap_success)
1635 		pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1636 		       pfn, page_mapcount(p));
1637 
1638 	/*
1639 	 * try_to_unmap() might put mlocked page in lru cache, so call
1640 	 * shake_page() again to ensure that it's flushed.
1641 	 */
1642 	if (mlocked)
1643 		shake_page(hpage);
1644 
1645 	/*
1646 	 * Now that the dirty bit has been propagated to the
1647 	 * struct page and all unmaps done we can decide if
1648 	 * killing is needed or not.  Only kill when the page
1649 	 * was dirty or the process is not restartable,
1650 	 * otherwise the tokill list is merely
1651 	 * freed.  When there was a problem unmapping earlier
1652 	 * use a more force-full uncatchable kill to prevent
1653 	 * any accesses to the poisoned memory.
1654 	 */
1655 	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
1656 		    !unmap_success;
1657 	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1658 
1659 	return unmap_success;
1660 }
1661 
1662 static int identify_page_state(unsigned long pfn, struct page *p,
1663 				unsigned long page_flags)
1664 {
1665 	struct page_state *ps;
1666 
1667 	/*
1668 	 * The first check uses the current page flags which may not have any
1669 	 * relevant information. The second check with the saved page flags is
1670 	 * carried out only if the first check can't determine the page status.
1671 	 */
1672 	for (ps = error_states;; ps++)
1673 		if ((p->flags & ps->mask) == ps->res)
1674 			break;
1675 
1676 	page_flags |= (p->flags & (1UL << PG_dirty));
1677 
1678 	if (!ps->mask)
1679 		for (ps = error_states;; ps++)
1680 			if ((page_flags & ps->mask) == ps->res)
1681 				break;
1682 	return page_action(ps, p, pfn);
1683 }
1684 
1685 static int try_to_split_thp_page(struct page *page)
1686 {
1687 	int ret;
1688 
1689 	lock_page(page);
1690 	ret = split_huge_page(page);
1691 	unlock_page(page);
1692 
1693 	if (unlikely(ret))
1694 		put_page(page);
1695 
1696 	return ret;
1697 }
1698 
1699 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1700 		struct address_space *mapping, pgoff_t index, int flags)
1701 {
1702 	struct to_kill *tk;
1703 	unsigned long size = 0;
1704 
1705 	list_for_each_entry(tk, to_kill, nd)
1706 		if (tk->size_shift)
1707 			size = max(size, 1UL << tk->size_shift);
1708 
1709 	if (size) {
1710 		/*
1711 		 * Unmap the largest mapping to avoid breaking up device-dax
1712 		 * mappings which are constant size. The actual size of the
1713 		 * mapping being torn down is communicated in siginfo, see
1714 		 * kill_proc()
1715 		 */
1716 		loff_t start = ((loff_t)index << PAGE_SHIFT) & ~(size - 1);
1717 
1718 		unmap_mapping_range(mapping, start, size, 0);
1719 	}
1720 
1721 	kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1722 }
1723 
1724 /*
1725  * Only dev_pagemap pages get here, such as fsdax when the filesystem
1726  * either do not claim or fails to claim a hwpoison event, or devdax.
1727  * The fsdax pages are initialized per base page, and the devdax pages
1728  * could be initialized either as base pages, or as compound pages with
1729  * vmemmap optimization enabled. Devdax is simplistic in its dealing with
1730  * hwpoison, such that, if a subpage of a compound page is poisoned,
1731  * simply mark the compound head page is by far sufficient.
1732  */
1733 static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1734 		struct dev_pagemap *pgmap)
1735 {
1736 	struct folio *folio = pfn_folio(pfn);
1737 	LIST_HEAD(to_kill);
1738 	dax_entry_t cookie;
1739 	int rc = 0;
1740 
1741 	/*
1742 	 * Prevent the inode from being freed while we are interrogating
1743 	 * the address_space, typically this would be handled by
1744 	 * lock_page(), but dax pages do not use the page lock. This
1745 	 * also prevents changes to the mapping of this pfn until
1746 	 * poison signaling is complete.
1747 	 */
1748 	cookie = dax_lock_folio(folio);
1749 	if (!cookie)
1750 		return -EBUSY;
1751 
1752 	if (hwpoison_filter(&folio->page)) {
1753 		rc = -EOPNOTSUPP;
1754 		goto unlock;
1755 	}
1756 
1757 	switch (pgmap->type) {
1758 	case MEMORY_DEVICE_PRIVATE:
1759 	case MEMORY_DEVICE_COHERENT:
1760 		/*
1761 		 * TODO: Handle device pages which may need coordination
1762 		 * with device-side memory.
1763 		 */
1764 		rc = -ENXIO;
1765 		goto unlock;
1766 	default:
1767 		break;
1768 	}
1769 
1770 	/*
1771 	 * Use this flag as an indication that the dax page has been
1772 	 * remapped UC to prevent speculative consumption of poison.
1773 	 */
1774 	SetPageHWPoison(&folio->page);
1775 
1776 	/*
1777 	 * Unlike System-RAM there is no possibility to swap in a
1778 	 * different physical page at a given virtual address, so all
1779 	 * userspace consumption of ZONE_DEVICE memory necessitates
1780 	 * SIGBUS (i.e. MF_MUST_KILL)
1781 	 */
1782 	flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1783 	collect_procs(folio, &folio->page, &to_kill, true);
1784 
1785 	unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
1786 unlock:
1787 	dax_unlock_folio(folio, cookie);
1788 	return rc;
1789 }
1790 
1791 #ifdef CONFIG_FS_DAX
1792 /**
1793  * mf_dax_kill_procs - Collect and kill processes who are using this file range
1794  * @mapping:	address_space of the file in use
1795  * @index:	start pgoff of the range within the file
1796  * @count:	length of the range, in unit of PAGE_SIZE
1797  * @mf_flags:	memory failure flags
1798  */
1799 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1800 		unsigned long count, int mf_flags)
1801 {
1802 	LIST_HEAD(to_kill);
1803 	dax_entry_t cookie;
1804 	struct page *page;
1805 	size_t end = index + count;
1806 	bool pre_remove = mf_flags & MF_MEM_PRE_REMOVE;
1807 
1808 	mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1809 
1810 	for (; index < end; index++) {
1811 		page = NULL;
1812 		cookie = dax_lock_mapping_entry(mapping, index, &page);
1813 		if (!cookie)
1814 			return -EBUSY;
1815 		if (!page)
1816 			goto unlock;
1817 
1818 		if (!pre_remove)
1819 			SetPageHWPoison(page);
1820 
1821 		/*
1822 		 * The pre_remove case is revoking access, the memory is still
1823 		 * good and could theoretically be put back into service.
1824 		 */
1825 		collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove);
1826 		unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1827 				index, mf_flags);
1828 unlock:
1829 		dax_unlock_mapping_entry(mapping, index, cookie);
1830 	}
1831 	return 0;
1832 }
1833 EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1834 #endif /* CONFIG_FS_DAX */
1835 
1836 #ifdef CONFIG_HUGETLB_PAGE
1837 
1838 /*
1839  * Struct raw_hwp_page represents information about "raw error page",
1840  * constructing singly linked list from ->_hugetlb_hwpoison field of folio.
1841  */
1842 struct raw_hwp_page {
1843 	struct llist_node node;
1844 	struct page *page;
1845 };
1846 
1847 static inline struct llist_head *raw_hwp_list_head(struct folio *folio)
1848 {
1849 	return (struct llist_head *)&folio->_hugetlb_hwpoison;
1850 }
1851 
1852 bool is_raw_hwpoison_page_in_hugepage(struct page *page)
1853 {
1854 	struct llist_head *raw_hwp_head;
1855 	struct raw_hwp_page *p;
1856 	struct folio *folio = page_folio(page);
1857 	bool ret = false;
1858 
1859 	if (!folio_test_hwpoison(folio))
1860 		return false;
1861 
1862 	if (!folio_test_hugetlb(folio))
1863 		return PageHWPoison(page);
1864 
1865 	/*
1866 	 * When RawHwpUnreliable is set, kernel lost track of which subpages
1867 	 * are HWPOISON. So return as if ALL subpages are HWPOISONed.
1868 	 */
1869 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1870 		return true;
1871 
1872 	mutex_lock(&mf_mutex);
1873 
1874 	raw_hwp_head = raw_hwp_list_head(folio);
1875 	llist_for_each_entry(p, raw_hwp_head->first, node) {
1876 		if (page == p->page) {
1877 			ret = true;
1878 			break;
1879 		}
1880 	}
1881 
1882 	mutex_unlock(&mf_mutex);
1883 
1884 	return ret;
1885 }
1886 
1887 static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag)
1888 {
1889 	struct llist_node *head;
1890 	struct raw_hwp_page *p, *next;
1891 	unsigned long count = 0;
1892 
1893 	head = llist_del_all(raw_hwp_list_head(folio));
1894 	llist_for_each_entry_safe(p, next, head, node) {
1895 		if (move_flag)
1896 			SetPageHWPoison(p->page);
1897 		else
1898 			num_poisoned_pages_sub(page_to_pfn(p->page), 1);
1899 		kfree(p);
1900 		count++;
1901 	}
1902 	return count;
1903 }
1904 
1905 static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
1906 {
1907 	struct llist_head *head;
1908 	struct raw_hwp_page *raw_hwp;
1909 	struct raw_hwp_page *p, *next;
1910 	int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0;
1911 
1912 	/*
1913 	 * Once the hwpoison hugepage has lost reliable raw error info,
1914 	 * there is little meaning to keep additional error info precisely,
1915 	 * so skip to add additional raw error info.
1916 	 */
1917 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1918 		return -EHWPOISON;
1919 	head = raw_hwp_list_head(folio);
1920 	llist_for_each_entry_safe(p, next, head->first, node) {
1921 		if (p->page == page)
1922 			return -EHWPOISON;
1923 	}
1924 
1925 	raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
1926 	if (raw_hwp) {
1927 		raw_hwp->page = page;
1928 		llist_add(&raw_hwp->node, head);
1929 		/* the first error event will be counted in action_result(). */
1930 		if (ret)
1931 			num_poisoned_pages_inc(page_to_pfn(page));
1932 	} else {
1933 		/*
1934 		 * Failed to save raw error info.  We no longer trace all
1935 		 * hwpoisoned subpages, and we need refuse to free/dissolve
1936 		 * this hwpoisoned hugepage.
1937 		 */
1938 		folio_set_hugetlb_raw_hwp_unreliable(folio);
1939 		/*
1940 		 * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not
1941 		 * used any more, so free it.
1942 		 */
1943 		__folio_free_raw_hwp(folio, false);
1944 	}
1945 	return ret;
1946 }
1947 
1948 static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
1949 {
1950 	/*
1951 	 * hugetlb_vmemmap_optimized hugepages can't be freed because struct
1952 	 * pages for tail pages are required but they don't exist.
1953 	 */
1954 	if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
1955 		return 0;
1956 
1957 	/*
1958 	 * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
1959 	 * definition.
1960 	 */
1961 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1962 		return 0;
1963 
1964 	return __folio_free_raw_hwp(folio, move_flag);
1965 }
1966 
1967 void folio_clear_hugetlb_hwpoison(struct folio *folio)
1968 {
1969 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1970 		return;
1971 	if (folio_test_hugetlb_vmemmap_optimized(folio))
1972 		return;
1973 	folio_clear_hwpoison(folio);
1974 	folio_free_raw_hwp(folio, true);
1975 }
1976 
1977 /*
1978  * Called from hugetlb code with hugetlb_lock held.
1979  *
1980  * Return values:
1981  *   0             - free hugepage
1982  *   1             - in-use hugepage
1983  *   2             - not a hugepage
1984  *   -EBUSY        - the hugepage is busy (try to retry)
1985  *   -EHWPOISON    - the hugepage is already hwpoisoned
1986  */
1987 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
1988 				 bool *migratable_cleared)
1989 {
1990 	struct page *page = pfn_to_page(pfn);
1991 	struct folio *folio = page_folio(page);
1992 	int ret = 2;	/* fallback to normal page handling */
1993 	bool count_increased = false;
1994 
1995 	if (!folio_test_hugetlb(folio))
1996 		goto out;
1997 
1998 	if (flags & MF_COUNT_INCREASED) {
1999 		ret = 1;
2000 		count_increased = true;
2001 	} else if (folio_test_hugetlb_freed(folio)) {
2002 		ret = 0;
2003 	} else if (folio_test_hugetlb_migratable(folio)) {
2004 		ret = folio_try_get(folio);
2005 		if (ret)
2006 			count_increased = true;
2007 	} else {
2008 		ret = -EBUSY;
2009 		if (!(flags & MF_NO_RETRY))
2010 			goto out;
2011 	}
2012 
2013 	if (folio_set_hugetlb_hwpoison(folio, page)) {
2014 		ret = -EHWPOISON;
2015 		goto out;
2016 	}
2017 
2018 	/*
2019 	 * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them
2020 	 * from being migrated by memory hotremove.
2021 	 */
2022 	if (count_increased && folio_test_hugetlb_migratable(folio)) {
2023 		folio_clear_hugetlb_migratable(folio);
2024 		*migratable_cleared = true;
2025 	}
2026 
2027 	return ret;
2028 out:
2029 	if (count_increased)
2030 		folio_put(folio);
2031 	return ret;
2032 }
2033 
2034 /*
2035  * Taking refcount of hugetlb pages needs extra care about race conditions
2036  * with basic operations like hugepage allocation/free/demotion.
2037  * So some of prechecks for hwpoison (pinning, and testing/setting
2038  * PageHWPoison) should be done in single hugetlb_lock range.
2039  */
2040 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2041 {
2042 	int res;
2043 	struct page *p = pfn_to_page(pfn);
2044 	struct folio *folio;
2045 	unsigned long page_flags;
2046 	bool migratable_cleared = false;
2047 
2048 	*hugetlb = 1;
2049 retry:
2050 	res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
2051 	if (res == 2) { /* fallback to normal page handling */
2052 		*hugetlb = 0;
2053 		return 0;
2054 	} else if (res == -EHWPOISON) {
2055 		pr_err("%#lx: already hardware poisoned\n", pfn);
2056 		if (flags & MF_ACTION_REQUIRED) {
2057 			folio = page_folio(p);
2058 			res = kill_accessing_process(current, folio_pfn(folio), flags);
2059 		}
2060 		return res;
2061 	} else if (res == -EBUSY) {
2062 		if (!(flags & MF_NO_RETRY)) {
2063 			flags |= MF_NO_RETRY;
2064 			goto retry;
2065 		}
2066 		return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2067 	}
2068 
2069 	folio = page_folio(p);
2070 	folio_lock(folio);
2071 
2072 	if (hwpoison_filter(p)) {
2073 		folio_clear_hugetlb_hwpoison(folio);
2074 		if (migratable_cleared)
2075 			folio_set_hugetlb_migratable(folio);
2076 		folio_unlock(folio);
2077 		if (res == 1)
2078 			folio_put(folio);
2079 		return -EOPNOTSUPP;
2080 	}
2081 
2082 	/*
2083 	 * Handling free hugepage.  The possible race with hugepage allocation
2084 	 * or demotion can be prevented by PageHWPoison flag.
2085 	 */
2086 	if (res == 0) {
2087 		folio_unlock(folio);
2088 		if (__page_handle_poison(p) >= 0) {
2089 			page_ref_inc(p);
2090 			res = MF_RECOVERED;
2091 		} else {
2092 			res = MF_FAILED;
2093 		}
2094 		return action_result(pfn, MF_MSG_FREE_HUGE, res);
2095 	}
2096 
2097 	page_flags = folio->flags;
2098 
2099 	if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
2100 		folio_unlock(folio);
2101 		return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2102 	}
2103 
2104 	return identify_page_state(pfn, p, page_flags);
2105 }
2106 
2107 #else
2108 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2109 {
2110 	return 0;
2111 }
2112 
2113 static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
2114 {
2115 	return 0;
2116 }
2117 #endif	/* CONFIG_HUGETLB_PAGE */
2118 
2119 /* Drop the extra refcount in case we come from madvise() */
2120 static void put_ref_page(unsigned long pfn, int flags)
2121 {
2122 	struct page *page;
2123 
2124 	if (!(flags & MF_COUNT_INCREASED))
2125 		return;
2126 
2127 	page = pfn_to_page(pfn);
2128 	if (page)
2129 		put_page(page);
2130 }
2131 
2132 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
2133 		struct dev_pagemap *pgmap)
2134 {
2135 	int rc = -ENXIO;
2136 
2137 	/* device metadata space is not recoverable */
2138 	if (!pgmap_pfn_valid(pgmap, pfn))
2139 		goto out;
2140 
2141 	/*
2142 	 * Call driver's implementation to handle the memory failure, otherwise
2143 	 * fall back to generic handler.
2144 	 */
2145 	if (pgmap_has_memory_failure(pgmap)) {
2146 		rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
2147 		/*
2148 		 * Fall back to generic handler too if operation is not
2149 		 * supported inside the driver/device/filesystem.
2150 		 */
2151 		if (rc != -EOPNOTSUPP)
2152 			goto out;
2153 	}
2154 
2155 	rc = mf_generic_kill_procs(pfn, flags, pgmap);
2156 out:
2157 	/* drop pgmap ref acquired in caller */
2158 	put_dev_pagemap(pgmap);
2159 	if (rc != -EOPNOTSUPP)
2160 		action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
2161 	return rc;
2162 }
2163 
2164 /**
2165  * memory_failure - Handle memory failure of a page.
2166  * @pfn: Page Number of the corrupted page
2167  * @flags: fine tune action taken
2168  *
2169  * This function is called by the low level machine check code
2170  * of an architecture when it detects hardware memory corruption
2171  * of a page. It tries its best to recover, which includes
2172  * dropping pages, killing processes etc.
2173  *
2174  * The function is primarily of use for corruptions that
2175  * happen outside the current execution context (e.g. when
2176  * detected by a background scrubber)
2177  *
2178  * Must run in process context (e.g. a work queue) with interrupts
2179  * enabled and no spinlocks held.
2180  *
2181  * Return: 0 for successfully handled the memory error,
2182  *         -EOPNOTSUPP for hwpoison_filter() filtered the error event,
2183  *         < 0(except -EOPNOTSUPP) on failure.
2184  */
2185 int memory_failure(unsigned long pfn, int flags)
2186 {
2187 	struct page *p;
2188 	struct page *hpage;
2189 	struct dev_pagemap *pgmap;
2190 	int res = 0;
2191 	unsigned long page_flags;
2192 	bool retry = true;
2193 	int hugetlb = 0;
2194 
2195 	if (!sysctl_memory_failure_recovery)
2196 		panic("Memory failure on page %lx", pfn);
2197 
2198 	mutex_lock(&mf_mutex);
2199 
2200 	if (!(flags & MF_SW_SIMULATED))
2201 		hw_memory_failure = true;
2202 
2203 	p = pfn_to_online_page(pfn);
2204 	if (!p) {
2205 		res = arch_memory_failure(pfn, flags);
2206 		if (res == 0)
2207 			goto unlock_mutex;
2208 
2209 		if (pfn_valid(pfn)) {
2210 			pgmap = get_dev_pagemap(pfn, NULL);
2211 			put_ref_page(pfn, flags);
2212 			if (pgmap) {
2213 				res = memory_failure_dev_pagemap(pfn, flags,
2214 								 pgmap);
2215 				goto unlock_mutex;
2216 			}
2217 		}
2218 		pr_err("%#lx: memory outside kernel control\n", pfn);
2219 		res = -ENXIO;
2220 		goto unlock_mutex;
2221 	}
2222 
2223 try_again:
2224 	res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2225 	if (hugetlb)
2226 		goto unlock_mutex;
2227 
2228 	if (TestSetPageHWPoison(p)) {
2229 		pr_err("%#lx: already hardware poisoned\n", pfn);
2230 		res = -EHWPOISON;
2231 		if (flags & MF_ACTION_REQUIRED)
2232 			res = kill_accessing_process(current, pfn, flags);
2233 		if (flags & MF_COUNT_INCREASED)
2234 			put_page(p);
2235 		goto unlock_mutex;
2236 	}
2237 
2238 	/*
2239 	 * We need/can do nothing about count=0 pages.
2240 	 * 1) it's a free page, and therefore in safe hand:
2241 	 *    check_new_page() will be the gate keeper.
2242 	 * 2) it's part of a non-compound high order page.
2243 	 *    Implies some kernel user: cannot stop them from
2244 	 *    R/W the page; let's pray that the page has been
2245 	 *    used and will be freed some time later.
2246 	 * In fact it's dangerous to directly bump up page count from 0,
2247 	 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
2248 	 */
2249 	if (!(flags & MF_COUNT_INCREASED)) {
2250 		res = get_hwpoison_page(p, flags);
2251 		if (!res) {
2252 			if (is_free_buddy_page(p)) {
2253 				if (take_page_off_buddy(p)) {
2254 					page_ref_inc(p);
2255 					res = MF_RECOVERED;
2256 				} else {
2257 					/* We lost the race, try again */
2258 					if (retry) {
2259 						ClearPageHWPoison(p);
2260 						retry = false;
2261 						goto try_again;
2262 					}
2263 					res = MF_FAILED;
2264 				}
2265 				res = action_result(pfn, MF_MSG_BUDDY, res);
2266 			} else {
2267 				res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
2268 			}
2269 			goto unlock_mutex;
2270 		} else if (res < 0) {
2271 			res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2272 			goto unlock_mutex;
2273 		}
2274 	}
2275 
2276 	hpage = compound_head(p);
2277 	if (PageTransHuge(hpage)) {
2278 		/*
2279 		 * The flag must be set after the refcount is bumped
2280 		 * otherwise it may race with THP split.
2281 		 * And the flag can't be set in get_hwpoison_page() since
2282 		 * it is called by soft offline too and it is just called
2283 		 * for !MF_COUNT_INCREASED.  So here seems to be the best
2284 		 * place.
2285 		 *
2286 		 * Don't need care about the above error handling paths for
2287 		 * get_hwpoison_page() since they handle either free page
2288 		 * or unhandlable page.  The refcount is bumped iff the
2289 		 * page is a valid handlable page.
2290 		 */
2291 		SetPageHasHWPoisoned(hpage);
2292 		if (try_to_split_thp_page(p) < 0) {
2293 			res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
2294 			goto unlock_mutex;
2295 		}
2296 		VM_BUG_ON_PAGE(!page_count(p), p);
2297 	}
2298 
2299 	/*
2300 	 * We ignore non-LRU pages for good reasons.
2301 	 * - PG_locked is only well defined for LRU pages and a few others
2302 	 * - to avoid races with __SetPageLocked()
2303 	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
2304 	 * The check (unnecessarily) ignores LRU pages being isolated and
2305 	 * walked by the page reclaim code, however that's not a big loss.
2306 	 */
2307 	shake_page(p);
2308 
2309 	lock_page(p);
2310 
2311 	/*
2312 	 * We're only intended to deal with the non-Compound page here.
2313 	 * However, the page could have changed compound pages due to
2314 	 * race window. If this happens, we could try again to hopefully
2315 	 * handle the page next round.
2316 	 */
2317 	if (PageCompound(p)) {
2318 		if (retry) {
2319 			ClearPageHWPoison(p);
2320 			unlock_page(p);
2321 			put_page(p);
2322 			flags &= ~MF_COUNT_INCREASED;
2323 			retry = false;
2324 			goto try_again;
2325 		}
2326 		res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2327 		goto unlock_page;
2328 	}
2329 
2330 	/*
2331 	 * We use page flags to determine what action should be taken, but
2332 	 * the flags can be modified by the error containment action.  One
2333 	 * example is an mlocked page, where PG_mlocked is cleared by
2334 	 * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
2335 	 * status correctly, we save a copy of the page flags at this time.
2336 	 */
2337 	page_flags = p->flags;
2338 
2339 	if (hwpoison_filter(p)) {
2340 		ClearPageHWPoison(p);
2341 		unlock_page(p);
2342 		put_page(p);
2343 		res = -EOPNOTSUPP;
2344 		goto unlock_mutex;
2345 	}
2346 
2347 	/*
2348 	 * __munlock_folio() may clear a writeback page's LRU flag without
2349 	 * page_lock. We need wait writeback completion for this page or it
2350 	 * may trigger vfs BUG while evict inode.
2351 	 */
2352 	if (!PageLRU(p) && !PageWriteback(p))
2353 		goto identify_page_state;
2354 
2355 	/*
2356 	 * It's very difficult to mess with pages currently under IO
2357 	 * and in many cases impossible, so we just avoid it here.
2358 	 */
2359 	wait_on_page_writeback(p);
2360 
2361 	/*
2362 	 * Now take care of user space mappings.
2363 	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2364 	 */
2365 	if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2366 		res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2367 		goto unlock_page;
2368 	}
2369 
2370 	/*
2371 	 * Torn down by someone else?
2372 	 */
2373 	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
2374 		res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2375 		goto unlock_page;
2376 	}
2377 
2378 identify_page_state:
2379 	res = identify_page_state(pfn, p, page_flags);
2380 	mutex_unlock(&mf_mutex);
2381 	return res;
2382 unlock_page:
2383 	unlock_page(p);
2384 unlock_mutex:
2385 	mutex_unlock(&mf_mutex);
2386 	return res;
2387 }
2388 EXPORT_SYMBOL_GPL(memory_failure);
2389 
2390 #define MEMORY_FAILURE_FIFO_ORDER	4
2391 #define MEMORY_FAILURE_FIFO_SIZE	(1 << MEMORY_FAILURE_FIFO_ORDER)
2392 
2393 struct memory_failure_entry {
2394 	unsigned long pfn;
2395 	int flags;
2396 };
2397 
2398 struct memory_failure_cpu {
2399 	DECLARE_KFIFO(fifo, struct memory_failure_entry,
2400 		      MEMORY_FAILURE_FIFO_SIZE);
2401 	spinlock_t lock;
2402 	struct work_struct work;
2403 };
2404 
2405 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2406 
2407 /**
2408  * memory_failure_queue - Schedule handling memory failure of a page.
2409  * @pfn: Page Number of the corrupted page
2410  * @flags: Flags for memory failure handling
2411  *
2412  * This function is called by the low level hardware error handler
2413  * when it detects hardware memory corruption of a page. It schedules
2414  * the recovering of error page, including dropping pages, killing
2415  * processes etc.
2416  *
2417  * The function is primarily of use for corruptions that
2418  * happen outside the current execution context (e.g. when
2419  * detected by a background scrubber)
2420  *
2421  * Can run in IRQ context.
2422  */
2423 void memory_failure_queue(unsigned long pfn, int flags)
2424 {
2425 	struct memory_failure_cpu *mf_cpu;
2426 	unsigned long proc_flags;
2427 	struct memory_failure_entry entry = {
2428 		.pfn =		pfn,
2429 		.flags =	flags,
2430 	};
2431 
2432 	mf_cpu = &get_cpu_var(memory_failure_cpu);
2433 	spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2434 	if (kfifo_put(&mf_cpu->fifo, entry))
2435 		schedule_work_on(smp_processor_id(), &mf_cpu->work);
2436 	else
2437 		pr_err("buffer overflow when queuing memory failure at %#lx\n",
2438 		       pfn);
2439 	spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2440 	put_cpu_var(memory_failure_cpu);
2441 }
2442 EXPORT_SYMBOL_GPL(memory_failure_queue);
2443 
2444 static void memory_failure_work_func(struct work_struct *work)
2445 {
2446 	struct memory_failure_cpu *mf_cpu;
2447 	struct memory_failure_entry entry = { 0, };
2448 	unsigned long proc_flags;
2449 	int gotten;
2450 
2451 	mf_cpu = container_of(work, struct memory_failure_cpu, work);
2452 	for (;;) {
2453 		spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2454 		gotten = kfifo_get(&mf_cpu->fifo, &entry);
2455 		spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2456 		if (!gotten)
2457 			break;
2458 		if (entry.flags & MF_SOFT_OFFLINE)
2459 			soft_offline_page(entry.pfn, entry.flags);
2460 		else
2461 			memory_failure(entry.pfn, entry.flags);
2462 	}
2463 }
2464 
2465 /*
2466  * Process memory_failure work queued on the specified CPU.
2467  * Used to avoid return-to-userspace racing with the memory_failure workqueue.
2468  */
2469 void memory_failure_queue_kick(int cpu)
2470 {
2471 	struct memory_failure_cpu *mf_cpu;
2472 
2473 	mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2474 	cancel_work_sync(&mf_cpu->work);
2475 	memory_failure_work_func(&mf_cpu->work);
2476 }
2477 
2478 static int __init memory_failure_init(void)
2479 {
2480 	struct memory_failure_cpu *mf_cpu;
2481 	int cpu;
2482 
2483 	for_each_possible_cpu(cpu) {
2484 		mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2485 		spin_lock_init(&mf_cpu->lock);
2486 		INIT_KFIFO(mf_cpu->fifo);
2487 		INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2488 	}
2489 
2490 	register_sysctl_init("vm", memory_failure_table);
2491 
2492 	return 0;
2493 }
2494 core_initcall(memory_failure_init);
2495 
2496 #undef pr_fmt
2497 #define pr_fmt(fmt)	"" fmt
2498 #define unpoison_pr_info(fmt, pfn, rs)			\
2499 ({							\
2500 	if (__ratelimit(rs))				\
2501 		pr_info(fmt, pfn);			\
2502 })
2503 
2504 /**
2505  * unpoison_memory - Unpoison a previously poisoned page
2506  * @pfn: Page number of the to be unpoisoned page
2507  *
2508  * Software-unpoison a page that has been poisoned by
2509  * memory_failure() earlier.
2510  *
2511  * This is only done on the software-level, so it only works
2512  * for linux injected failures, not real hardware failures
2513  *
2514  * Returns 0 for success, otherwise -errno.
2515  */
2516 int unpoison_memory(unsigned long pfn)
2517 {
2518 	struct folio *folio;
2519 	struct page *p;
2520 	int ret = -EBUSY, ghp;
2521 	unsigned long count = 1;
2522 	bool huge = false;
2523 	static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2524 					DEFAULT_RATELIMIT_BURST);
2525 
2526 	if (!pfn_valid(pfn))
2527 		return -ENXIO;
2528 
2529 	p = pfn_to_page(pfn);
2530 	folio = page_folio(p);
2531 
2532 	mutex_lock(&mf_mutex);
2533 
2534 	if (hw_memory_failure) {
2535 		unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
2536 				 pfn, &unpoison_rs);
2537 		ret = -EOPNOTSUPP;
2538 		goto unlock_mutex;
2539 	}
2540 
2541 	if (!PageHWPoison(p)) {
2542 		unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
2543 				 pfn, &unpoison_rs);
2544 		goto unlock_mutex;
2545 	}
2546 
2547 	if (folio_ref_count(folio) > 1) {
2548 		unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2549 				 pfn, &unpoison_rs);
2550 		goto unlock_mutex;
2551 	}
2552 
2553 	if (folio_test_slab(folio) || PageTable(&folio->page) ||
2554 	    folio_test_reserved(folio) || PageOffline(&folio->page))
2555 		goto unlock_mutex;
2556 
2557 	/*
2558 	 * Note that folio->_mapcount is overloaded in SLAB, so the simple test
2559 	 * in folio_mapped() has to be done after folio_test_slab() is checked.
2560 	 */
2561 	if (folio_mapped(folio)) {
2562 		unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2563 				 pfn, &unpoison_rs);
2564 		goto unlock_mutex;
2565 	}
2566 
2567 	if (folio_mapping(folio)) {
2568 		unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2569 				 pfn, &unpoison_rs);
2570 		goto unlock_mutex;
2571 	}
2572 
2573 	ghp = get_hwpoison_page(p, MF_UNPOISON);
2574 	if (!ghp) {
2575 		if (PageHuge(p)) {
2576 			huge = true;
2577 			count = folio_free_raw_hwp(folio, false);
2578 			if (count == 0)
2579 				goto unlock_mutex;
2580 		}
2581 		ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY;
2582 	} else if (ghp < 0) {
2583 		if (ghp == -EHWPOISON) {
2584 			ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2585 		} else {
2586 			ret = ghp;
2587 			unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2588 					 pfn, &unpoison_rs);
2589 		}
2590 	} else {
2591 		if (PageHuge(p)) {
2592 			huge = true;
2593 			count = folio_free_raw_hwp(folio, false);
2594 			if (count == 0) {
2595 				folio_put(folio);
2596 				goto unlock_mutex;
2597 			}
2598 		}
2599 
2600 		folio_put(folio);
2601 		if (TestClearPageHWPoison(p)) {
2602 			folio_put(folio);
2603 			ret = 0;
2604 		}
2605 	}
2606 
2607 unlock_mutex:
2608 	mutex_unlock(&mf_mutex);
2609 	if (!ret) {
2610 		if (!huge)
2611 			num_poisoned_pages_sub(pfn, 1);
2612 		unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2613 				 page_to_pfn(p), &unpoison_rs);
2614 	}
2615 	return ret;
2616 }
2617 EXPORT_SYMBOL(unpoison_memory);
2618 
2619 static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
2620 {
2621 	bool isolated = false;
2622 
2623 	if (folio_test_hugetlb(folio)) {
2624 		isolated = isolate_hugetlb(folio, pagelist);
2625 	} else {
2626 		bool lru = !__folio_test_movable(folio);
2627 
2628 		if (lru)
2629 			isolated = folio_isolate_lru(folio);
2630 		else
2631 			isolated = isolate_movable_page(&folio->page,
2632 							ISOLATE_UNEVICTABLE);
2633 
2634 		if (isolated) {
2635 			list_add(&folio->lru, pagelist);
2636 			if (lru)
2637 				node_stat_add_folio(folio, NR_ISOLATED_ANON +
2638 						    folio_is_file_lru(folio));
2639 		}
2640 	}
2641 
2642 	/*
2643 	 * If we succeed to isolate the folio, we grabbed another refcount on
2644 	 * the folio, so we can safely drop the one we got from get_any_page().
2645 	 * If we failed to isolate the folio, it means that we cannot go further
2646 	 * and we will return an error, so drop the reference we got from
2647 	 * get_any_page() as well.
2648 	 */
2649 	folio_put(folio);
2650 	return isolated;
2651 }
2652 
2653 /*
2654  * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
2655  * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2656  * If the page is mapped, it migrates the contents over.
2657  */
2658 static int soft_offline_in_use_page(struct page *page)
2659 {
2660 	long ret = 0;
2661 	unsigned long pfn = page_to_pfn(page);
2662 	struct folio *folio = page_folio(page);
2663 	char const *msg_page[] = {"page", "hugepage"};
2664 	bool huge = folio_test_hugetlb(folio);
2665 	LIST_HEAD(pagelist);
2666 	struct migration_target_control mtc = {
2667 		.nid = NUMA_NO_NODE,
2668 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2669 	};
2670 
2671 	if (!huge && folio_test_large(folio)) {
2672 		if (try_to_split_thp_page(page)) {
2673 			pr_info("soft offline: %#lx: thp split failed\n", pfn);
2674 			return -EBUSY;
2675 		}
2676 		folio = page_folio(page);
2677 	}
2678 
2679 	folio_lock(folio);
2680 	if (!huge)
2681 		folio_wait_writeback(folio);
2682 	if (PageHWPoison(page)) {
2683 		folio_unlock(folio);
2684 		folio_put(folio);
2685 		pr_info("soft offline: %#lx page already poisoned\n", pfn);
2686 		return 0;
2687 	}
2688 
2689 	if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
2690 		/*
2691 		 * Try to invalidate first. This should work for
2692 		 * non dirty unmapped page cache pages.
2693 		 */
2694 		ret = mapping_evict_folio(folio_mapping(folio), folio);
2695 	folio_unlock(folio);
2696 
2697 	if (ret) {
2698 		pr_info("soft_offline: %#lx: invalidated\n", pfn);
2699 		page_handle_poison(page, false, true);
2700 		return 0;
2701 	}
2702 
2703 	if (mf_isolate_folio(folio, &pagelist)) {
2704 		ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2705 			(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2706 		if (!ret) {
2707 			bool release = !huge;
2708 
2709 			if (!page_handle_poison(page, huge, release))
2710 				ret = -EBUSY;
2711 		} else {
2712 			if (!list_empty(&pagelist))
2713 				putback_movable_pages(&pagelist);
2714 
2715 			pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
2716 				pfn, msg_page[huge], ret, &page->flags);
2717 			if (ret > 0)
2718 				ret = -EBUSY;
2719 		}
2720 	} else {
2721 		pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2722 			pfn, msg_page[huge], page_count(page), &page->flags);
2723 		ret = -EBUSY;
2724 	}
2725 	return ret;
2726 }
2727 
2728 /**
2729  * soft_offline_page - Soft offline a page.
2730  * @pfn: pfn to soft-offline
2731  * @flags: flags. Same as memory_failure().
2732  *
2733  * Returns 0 on success
2734  *         -EOPNOTSUPP for hwpoison_filter() filtered the error event
2735  *         < 0 otherwise negated errno.
2736  *
2737  * Soft offline a page, by migration or invalidation,
2738  * without killing anything. This is for the case when
2739  * a page is not corrupted yet (so it's still valid to access),
2740  * but has had a number of corrected errors and is better taken
2741  * out.
2742  *
2743  * The actual policy on when to do that is maintained by
2744  * user space.
2745  *
2746  * This should never impact any application or cause data loss,
2747  * however it might take some time.
2748  *
2749  * This is not a 100% solution for all memory, but tries to be
2750  * ``good enough'' for the majority of memory.
2751  */
2752 int soft_offline_page(unsigned long pfn, int flags)
2753 {
2754 	int ret;
2755 	bool try_again = true;
2756 	struct page *page;
2757 
2758 	if (!pfn_valid(pfn)) {
2759 		WARN_ON_ONCE(flags & MF_COUNT_INCREASED);
2760 		return -ENXIO;
2761 	}
2762 
2763 	/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2764 	page = pfn_to_online_page(pfn);
2765 	if (!page) {
2766 		put_ref_page(pfn, flags);
2767 		return -EIO;
2768 	}
2769 
2770 	mutex_lock(&mf_mutex);
2771 
2772 	if (PageHWPoison(page)) {
2773 		pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2774 		put_ref_page(pfn, flags);
2775 		mutex_unlock(&mf_mutex);
2776 		return 0;
2777 	}
2778 
2779 retry:
2780 	get_online_mems();
2781 	ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2782 	put_online_mems();
2783 
2784 	if (hwpoison_filter(page)) {
2785 		if (ret > 0)
2786 			put_page(page);
2787 
2788 		mutex_unlock(&mf_mutex);
2789 		return -EOPNOTSUPP;
2790 	}
2791 
2792 	if (ret > 0) {
2793 		ret = soft_offline_in_use_page(page);
2794 	} else if (ret == 0) {
2795 		if (!page_handle_poison(page, true, false)) {
2796 			if (try_again) {
2797 				try_again = false;
2798 				flags &= ~MF_COUNT_INCREASED;
2799 				goto retry;
2800 			}
2801 			ret = -EBUSY;
2802 		}
2803 	}
2804 
2805 	mutex_unlock(&mf_mutex);
2806 
2807 	return ret;
2808 }
2809